summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--BCC_MAKEFILE80
-rw-r--r--EMX_MAKEFILE141
-rw-r--r--MacOS.c99
-rw-r--r--MacProjects.sit.hqx886
-rw-r--r--Mac_files/MacOS_Test_config.h88
-rw-r--r--Mac_files/MacOS_config.h86
-rw-r--r--Mac_files/dataend.c9
-rw-r--r--Mac_files/datastart.c9
-rw-r--r--Makefile246
-rw-r--r--NT_MAKEFILE42
-rw-r--r--OS2_MAKEFILE34
-rw-r--r--PCR-Makefile86
-rw-r--r--README482
-rw-r--r--README.Mac50
-rw-r--r--README.QUICK2
-rw-r--r--README.amiga50
-rw-r--r--README.debugging55
-rw-r--r--README.win3244
-rw-r--r--SCoptions.amiga11
-rw-r--r--SMakefile.amiga42
-rw-r--r--add_gc_prefix.c14
-rw-r--r--allchblk.c57
-rw-r--r--alloc.c221
-rw-r--r--blacklst.c68
-rwxr-xr-xcallprocs1
-rw-r--r--checksums.c60
-rw-r--r--config.h190
-rwxr-xr-xcord/SCOPTIONS.amiga14
-rw-r--r--cord/SMakefile.amiga20
-rw-r--r--cord/cord.h60
-rw-r--r--cord/cordbscs.c49
-rw-r--r--cord/cordprnt.c13
-rw-r--r--cord/cordtest.c32
-rw-r--r--cord/cordxtra.c68
-rw-r--r--cord/de.c83
-rw-r--r--cord/de_win.c6
-rw-r--r--cord/gc.h583
-rw-r--r--cord/private/cord_pos.h (renamed from cord/cord_pos.h)0
-rw-r--r--dbg_mlc.c141
-rw-r--r--dyn_load.c81
-rw-r--r--finalize.c200
-rw-r--r--gc.h372
-rw-r--r--gc_c++.h161
-rw-r--r--gc_cpp.cc (renamed from gc_c++.cc)23
-rw-r--r--gc_cpp.h285
-rw-r--r--gc_hdrs.h12
-rw-r--r--gc_mark.h37
-rw-r--r--gc_priv.h304
-rw-r--r--gcc_support.c516
-rw-r--r--headers.c16
-rw-r--r--if_mach.c2
-rw-r--r--include/cord.h323
-rw-r--r--include/ec.h70
-rw-r--r--include/gc.h450
-rw-r--r--include/gc_cpp.h285
-rw-r--r--include/gc_inl.h (renamed from gc_inl.h)20
-rw-r--r--include/gc_inline.h (renamed from gc_inline.h)0
-rw-r--r--include/gc_typed.h26
-rw-r--r--include/private/config.h687
-rw-r--r--include/private/cord_pos.h118
-rw-r--r--include/private/gc_hdrs.h133
-rw-r--r--include/private/gc_priv.h1342
-rw-r--r--include/weakpointer.h221
-rw-r--r--mach_dep.c156
-rw-r--r--makefile.depend0
-rw-r--r--malloc.c185
-rw-r--r--mark.c113
-rw-r--r--mark_rts.c124
-rw-r--r--misc.c280
-rw-r--r--obj_map.c11
-rw-r--r--os_dep.c621
-rw-r--r--pc_excludes5
-rw-r--r--pcr_interface.c63
-rw-r--r--ptr_chck.c315
-rw-r--r--reclaim.c78
-rw-r--r--setjmp_t.c19
-rw-r--r--solaris_threads.c380
-rw-r--r--stubborn.c70
-rw-r--r--test.c306
-rw-r--r--test_cpp.cc235
-rw-r--r--typd_mlc.c34
-rw-r--r--weakpointer.h221
82 files changed, 11525 insertions, 1597 deletions
diff --git a/BCC_MAKEFILE b/BCC_MAKEFILE
new file mode 100644
index 00000000..b430cc8d
--- /dev/null
+++ b/BCC_MAKEFILE
@@ -0,0 +1,80 @@
+# Makefile for Borland C++ 4.5 on NT
+#
+bc= c:\bc45
+bcbin= $(bc)\bin
+bclib= $(bc)\lib
+bcinclude= $(bc)\include
+
+cc= $(bcbin)\bcc32
+rc= $(bcbin)\brc32
+lib= $(bcbin)\tlib
+link= $(bcbin)\tlink32
+cflags= -R -v -vi -H -H=gc.csm -I$(bcinclude);cord -L$(bclib) \
+ -w-pro -w-aus -w-par -w-ccc -w-rch -a4 -D__STDC__=0
+#defines= -DSILENT
+defines= -DSMALL_CONFIG -DSILENT -DALL_INTERIOR_POINTERS
+
+.c.obj:
+ $(cc) @&&|
+ $(cdebug) $(cflags) $(cvars) $(defines) -o$* -c $*.c
+|
+
+.cpp.obj:
+ $(cc) @&&|
+ $(cdebug) $(cflags) $(cvars) $(defines) -o$* -c $*.cpp
+|
+
+.rc.res:
+ $(rc) -i$(bcinclude) -r -fo$* $*.rc
+
+XXXOBJS= XXXalloc.obj XXXreclaim.obj XXXallchblk.obj XXXmisc.obj \
+ XXXmach_dep.obj XXXos_dep.obj XXXmark_rts.obj XXXheaders.obj XXXmark.obj \
+ XXXobj_map.obj XXXblacklst.obj XXXfinalize.obj XXXnew_hblk.obj \
+ XXXdbg_mlc.obj XXXmalloc.obj XXXstubborn.obj XXXdyn_load.obj \
+ XXXtypd_mlc.obj XXXptr_chck.obj XXXgc_cpp.obj
+
+OBJS= $(XXXOBJS:XXX=)
+
+all: gctest.exe cord\de.exe test_cpp.exe
+
+$(OBJS) test.obj: gc_priv.h gc_hdrs.h gc.h config.h MAKEFILE
+
+gc.lib: $(OBJS)
+ -del gc.lib
+ tlib $* @&&|
+ $(XXXOBJS:XXX=+)
+|
+
+gctest.exe: test.obj gc.lib
+ $(cc) @&&|
+ $(cflags) -W -e$* test.obj gc.lib
+|
+
+cord\de.obj cord\de_win.obj: cord\cord.h cord\private\cord_pos.h cord\de_win.h \
+ cord\de_cmds.h
+
+cord\de.exe: cord\cordbscs.obj cord\cordxtra.obj cord\de.obj cord\de_win.obj \
+ cord\de_win.res gc.lib
+ $(cc) @&&|
+ $(cflags) -W -e$* cord\cordbscs.obj cord\cordxtra.obj \
+ cord\de.obj cord\de_win.obj gc.lib
+|
+ $(rc) cord\de_win.res cord\de.exe
+
+gc_cpp.obj: gc_cpp.h gc.h
+
+gc_cpp.cpp: gc_cpp.cc
+ copy gc_cpp.cc gc_cpp.cpp
+
+test_cpp.cpp: test_cpp.cc
+ copy test_cpp.cc test_cpp.cpp
+
+test_cpp.exe: test_cpp.obj gc_cpp.h gc.h gc.lib
+ $(cc) @&&|
+ $(cflags) -W -e$* test_cpp.obj gc.lib
+|
+
+scratch:
+ -del *.obj *.res *.exe *.csm cord\*.obj cord\*.res cord\*.exe cord\*.csm
+
+
diff --git a/EMX_MAKEFILE b/EMX_MAKEFILE
new file mode 100644
index 00000000..7eed701e
--- /dev/null
+++ b/EMX_MAKEFILE
@@ -0,0 +1,141 @@
+#
+# OS/2 specific Makefile for the EMX environment
+#
+# You need GNU Make 3.71, gcc 2.5.7, emx 0.8h and GNU fileutils 3.9
+# or similar tools. C++ interface and de.exe weren't tested.
+#
+# Rename this file "Makefile".
+#
+
+# Primary targets:
+# gc.a - builds basic library
+# c++ - adds C++ interface to library and include directory
+# cords - adds cords (heavyweight strings) to library and include directory
+# test - prints porting information, then builds basic version of gc.a, and runs
+# some tests of collector and cords. Does not add cords or c++ interface to gc.a
+# cord/de.exe - builds dumb editor based on cords.
+CC= gcc
+CXX=g++
+# Needed only for "make c++", which adds the c++ interface
+
+CFLAGS= -O -DALL_INTERIOR_POINTERS -DSILENT
+# Setjmp_test may yield overly optimistic results when compiled
+# without optimization.
+# -DSILENT disables statistics printing, and improves performance.
+# -DCHECKSUMS reports on erroneously clear dirty bits, and unexpectedly
+# altered stubborn objects, at substantial performance cost.
+# -DFIND_LEAK causes the collector to assume that all inaccessible
+# objects should have been explicitly deallocated, and reports exceptions
+# -DSOLARIS_THREADS enables support for Solaris (thr_) threads.
+# (Clients should also define SOLARIS_THREADS and then include
+# gc.h before performing thr_ or GC_ operations.)
+# -DALL_INTERIOR_POINTERS allows all pointers to the interior
+# of objects to be recognized. (See gc_private.h for consequences.)
+# -DSMALL_CONFIG tries to tune the collector for small heap sizes,
+# usually causing it to use less space in such situations.
+# Incremental collection no longer works in this case.
+# -DDONT_ADD_BYTE_AT_END is meaningful only with
+# -DALL_INTERIOR_POINTERS. Normally -DALL_INTERIOR_POINTERS
+# causes all objects to be padded so that pointers just past the end of
+# an object can be recognized. This can be expensive. (The padding
+# is normally more than one byte due to alignment constraints.)
+# -DDONT_ADD_BYTE_AT_END disables the padding.
+
+AR= ar
+RANLIB= ar s
+
+# Redefining srcdir allows object code for the nonPCR version of the collector
+# to be generated in different directories
+srcdir = .
+VPATH = $(srcdir)
+
+OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dyn_load.o dbg_mlc.o malloc.o stubborn.o checksums.o typd_mlc.o ptr_chck.o
+
+CORD_OBJS= cord/cordbscs.o cord/cordxtra.o cord/cordprnt.o
+
+CORD_INCLUDE_FILES= $(srcdir)/gc.h $(srcdir)/cord/cord.h $(srcdir)/cord/ec.h \
+ $(srcdir)/cord/cord_pos.h
+
+# Libraries needed for curses applications. Only needed for de.
+CURSES= -lcurses -ltermlib
+
+# The following is irrelevant on most systems. But a few
+# versions of make otherwise fork the shell specified in
+# the SHELL environment variable.
+SHELL= bash
+
+SPECIALCFLAGS =
+# Alternative flags to the C compiler for mach_dep.c.
+# Mach_dep.c often doesn't like optimization, and it's
+# not time-critical anyway.
+
+all: gc.a gctest.exe
+
+$(OBJS) test.o: $(srcdir)/gc_priv.h $(srcdir)/gc_hdrs.h $(srcdir)/gc.h \
+ $(srcdir)/config.h $(srcdir)/gc_typed.h
+# The dependency on Makefile is needed. Changing
+# options such as -DSILENT affects the size of GC_arrays,
+# invalidating all .o files that rely on gc_priv.h
+
+mark.o typd_mlc.o finalize.o: $(srcdir)/gc_mark.h
+
+gc.a: $(OBJS)
+ $(AR) ru gc.a $(OBJS)
+ $(RANLIB) gc.a
+
+cords: $(CORD_OBJS) cord/cordtest.exe
+ $(AR) ru gc.a $(CORD_OBJS)
+ $(RANLIB) gc.a
+ cp $(srcdir)/cord/cord.h include/cord.h
+ cp $(srcdir)/cord/ec.h include/ec.h
+ cp $(srcdir)/cord/cord_pos.h include/cord_pos.h
+
+gc_c++.o: $(srcdir)/gc_c++.cc $(srcdir)/gc_c++.h
+ $(CXX) -c -O $(srcdir)/gc_c++.cc
+
+c++: gc_c++.o $(srcdir)/gc_c++.h
+ $(AR) ru gc.a gc_c++.o
+ $(RANLIB) gc.a
+ cp $(srcdir)/gc_c++.h include/gc_c++.h
+
+mach_dep.o: $(srcdir)/mach_dep.c
+ $(CC) -o mach_dep.o -c $(SPECIALCFLAGS) $(srcdir)/mach_dep.c
+
+mark_rts.o: $(srcdir)/mark_rts.c
+ $(CC) -o mark_rts.o -c $(CFLAGS) $(srcdir)/mark_rts.c
+
+cord/cordbscs.o: $(srcdir)/cord/cordbscs.c $(CORD_INCLUDE_FILES)
+ $(CC) $(CFLAGS) -c $(srcdir)/cord/cordbscs.c -o cord/cordbscs.o
+
+cord/cordxtra.o: $(srcdir)/cord/cordxtra.c $(CORD_INCLUDE_FILES)
+ $(CC) $(CFLAGS) -c $(srcdir)/cord/cordxtra.c -o cord/cordxtra.o
+
+cord/cordprnt.o: $(srcdir)/cord/cordprnt.c $(CORD_INCLUDE_FILES)
+ $(CC) $(CFLAGS) -c $(srcdir)/cord/cordprnt.c -o cord/cordprnt.o
+
+cord/cordtest.exe: $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a
+ $(CC) $(CFLAGS) -o cord/cordtest.exe $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a
+
+cord/de.exe: $(srcdir)/cord/de.c $(srcdir)/cord/cordbscs.o $(srcdir)/cord/cordxtra.o gc.a
+ $(CC) $(CFLAGS) -o cord/de.exe $(srcdir)/cord/de.c $(srcdir)/cord/cordbscs.o $(srcdir)/cord/cordxtra.o gc.a $(CURSES)
+
+clean:
+ rm -f gc.a test.o gctest.exe output-local output-diff $(OBJS) \
+ setjmp_test mon.out gmon.out a.out core \
+ $(CORD_OBJS) cord/cordtest.exe cord/de.exe
+ -rm -f *~
+
+gctest.exe: test.o gc.a
+ $(CC) $(CFLAGS) -o gctest.exe test.o gc.a
+
+# If an optimized setjmp_test generates a segmentation fault,
+# odds are your compiler is broken. Gctest may still work.
+# Try compiling setjmp_t.c unoptimized.
+setjmp_test.exe: $(srcdir)/setjmp_t.c $(srcdir)/gc.h
+ $(CC) $(CFLAGS) -o setjmp_test.exe $(srcdir)/setjmp_t.c
+
+test: setjmp_test.exe gctest.exe
+ ./setjmp_test
+ ./gctest
+ make cord/cordtest.exe
+ cord/cordtest
diff --git a/MacOS.c b/MacOS.c
new file mode 100644
index 00000000..280bf88f
--- /dev/null
+++ b/MacOS.c
@@ -0,0 +1,99 @@
+/*
+ MacOS.c
+
+ Some routines for the Macintosh OS port of the Hans-J. Boehm, Alan J. Demers
+ garbage collector.
+
+ <Revision History>
+
+ 11/22/94 pcb StripAddress the temporary memory handle for 24-bit mode.
+ 11/30/94 pcb Tracking all memory usage so we can deallocate it all at once.
+
+ by Patrick C. Beard.
+ */
+/* Boehm, November 17, 1995 11:50 am PST */
+
+#include <Resources.h>
+#include <Memory.h>
+#include <LowMem.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+// use 'CODE' resource 0 to get exact location of the beginning of global space.
+
+typedef struct {
+ unsigned long aboveA5;
+ unsigned long belowA5;
+ unsigned long JTSize;
+ unsigned long JTOffset;
+} *CodeZeroPtr, **CodeZeroHandle;
+
+void* GC_MacGetDataStart()
+{
+ CodeZeroHandle code0 = (CodeZeroHandle)GetResource('CODE', 0);
+ if (code0) {
+ long belowA5Size = (**code0).belowA5;
+ ReleaseResource((Handle)code0);
+ return (LMGetCurrentA5() - belowA5Size);
+ }
+ fprintf(stderr, "Couldn't load the jump table.");
+ exit(-1);
+ return 0;
+}
+
+/* track the use of temporary memory so it can be freed all at once. */
+
+typedef struct TemporaryMemoryBlock TemporaryMemoryBlock, **TemporaryMemoryHandle;
+
+struct TemporaryMemoryBlock {
+ TemporaryMemoryHandle nextBlock;
+ char data[];
+};
+
+static TemporaryMemoryHandle theTemporaryMemory = NULL;
+static Boolean firstTime = true;
+
+void GC_MacFreeTemporaryMemory(void);
+
+Ptr GC_MacTemporaryNewPtr(Size size, Boolean clearMemory)
+{
+ static Boolean firstTime = true;
+ OSErr result;
+ TemporaryMemoryHandle tempMemBlock;
+ Ptr tempPtr = nil;
+
+ tempMemBlock = (TemporaryMemoryHandle)TempNewHandle(size + sizeof(TemporaryMemoryBlock), &result);
+ if (tempMemBlock && result == noErr) {
+ HLockHi((Handle)tempMemBlock);
+ tempPtr = (**tempMemBlock).data;
+ if (clearMemory) memset(tempPtr, 0, size);
+ tempPtr = StripAddress(tempPtr);
+
+ // keep track of the allocated blocks.
+ (**tempMemBlock).nextBlock = theTemporaryMemory;
+ theTemporaryMemory = tempMemBlock;
+ }
+
+ // install an exit routine to clean up the memory used at the end.
+ if (firstTime) {
+ atexit(&GC_MacFreeTemporaryMemory);
+ firstTime = false;
+ }
+
+ return tempPtr;
+}
+
+void GC_MacFreeTemporaryMemory()
+{
+ long totalMemoryUsed = 0;
+ TemporaryMemoryHandle tempMemBlock = theTemporaryMemory;
+ while (tempMemBlock != NULL) {
+ TemporaryMemoryHandle nextBlock = (**tempMemBlock).nextBlock;
+ totalMemoryUsed += GetHandleSize((Handle)tempMemBlock);
+ DisposeHandle((Handle)tempMemBlock);
+ tempMemBlock = nextBlock;
+ }
+ theTemporaryMemory = NULL;
+ fprintf(stdout, "[total memory used: %ld bytes.]\n", totalMemoryUsed);
+}
diff --git a/MacProjects.sit.hqx b/MacProjects.sit.hqx
new file mode 100644
index 00000000..99dff88b
--- /dev/null
+++ b/MacProjects.sit.hqx
@@ -0,0 +1,886 @@
+(This file must be converted with BinHex 4.0)
+
+:$deKBe"bEfTPBh4c,R0TG!"6594%8dP8)3#3"&)e!!!"4UiT8dP8)3!(!!"50A*
+
+-BA8#ZJ#3!aB"#3d0#'GM,MBi5bkjBf038%-ZZ3#3%)Zi!*!8"@`!N!6rN!4069"
+
+568e$3`%!UbqAD+X`19S!!!Ba!!!,*J!!!F%!!!-PfTmj1`#3"PET)d31)LTH6H4
+
+#*AqG5b5HI*)QjY$IIb00%ReTJSi6rG$jG(bZ,"Rc,9Umf[IRj)6FZ-j`GfGR)#!
+
+m-#qLqB#cj'G%46qffB3q8AppLXKc+P&*il4FMJMq3N32r[U,(PlSNdrQm-J(4!p
+
+jK)NHmKJSHY!,&chS$4)pk%8mL3I)B0'$AU6S3'q)k%%[5[5J&ffa#68)0ZM&#T!
+
+!*fHC-2dFZ3i83[Vr[4Xh'+DNQrm'J)rrpqe%ST`,FeVi6b,*qHH")4eQc28NFMN
+
+ZT*m,L"Y%-`pdAk6RLHDaeVV0a,,@P(4UUK66rUM'8bf91llS("lTh81)MBQ+4*q
+
+rfHENEhD)Ke#3!09'M%bL[P1+G88fa$3e)5Gpf0kARpBf*6eIH*0`ZBHR%ii"PbN
+
++D&*)688M)Sm$Bm[cCdDjh2YIjmAc`(TVpi*Vka((A*&Yl@'LTSH1M*AMP#,2[A$
+
+(FHA@S"dL4dER#3b!EfBYem(C9P5iGH"a-bb-AL(F"bb-AL,F6)%a9pJUL,(hf%B
+
+TeQb["X5ib4DQXV!-fa6&mZf&3,(C&UDd-((SpeMBEIB`8Zc,BcZR3A5'X+jYj$'
+
+6)6HVV+R[!`#3!`X!(E@*MFQ%R4d"))`m[3JM[c)bBS54Tj'M(AP+MK&f%VD5SdG
+
+SANFB@3Rqc$Am83(+)`"G(D%A'9!bBQ6!b)b4Sq3SH8D1NDGNX$)bBi54!51--$*
+
+Kj0L!M"KKK"dC--,)-h+-6#KKC-$)-F)NamL!!Z06#X!!b&%bBUHp8RcN'%%6!b,
+
+i!!kV"`"DLHFaK*!!"Ym4K,,2i2X4c[,`c5!GIPf!ZcNi'8'VfJFpSfdpq+CY$8j
+
+-V'f-DZr2[36#1(ael5hmfT@1cSU66D5pqDSA89pdTP-`Z[jj6T&!PmZBFZjal"&
+
+5iG6#blE$+&kLh#QZ118&(0T1J(hZ,9)5MJ9ic*qPI!ac'RJ96QMZjSbkMq()Ui6
+
+B+f,,#'N1icbM4N"aaBr1`3Z9U'8RY'XAiVXFKp#&k2D5Be%VCdh4%,+2QS'b"Q2
+
+%0PNT4rE#%kTUFqYDM56bVjfe!p8MqmL)1VmjVkJY`U[*$&*L3AMSpB@LCQ*U&l%
+
+T+3890rL,V9klFN*4@f0UTf8Z&&afN!"4GC6G8p3fN9$4+4[-@DAeK%lej"@eAAL
+
+eU@&4[Tm28%mqqUkS(F+VDa#lB&'rlRAllRP&l460Qc,)MHR$jMh@$8Y4Xc'e`cd
+
+ZE2AUUiH+fK96feb$epq&'RAQeLG&lCDjmP+"Kr8k9#qp'eI8RPf[6R$dS+$UcqI
+
+ELYSV[*ETFL&j[@lr803qd9I2A#bi4Vei3*d[+@Urk*!!&abe0HTVm%44"i4A6JN
+
+c(2I!kjRl6a9e813DK"A6p(LjRZZGaGH+1L5SiBT[(6ekd2*ILMSXU(l)#m3QMDB
+
+V+QTG!r*NG#RQai#DNh4,l0&!Ie`dYi98Y1%1A$5hKP4,`d9cHdKP'LkD@q4hYC*
+
+%dfdLeCCNN@i9UIBNLh5l5(8N68qhM&4R`d9cfdKP'bkD@dHU+qe&XRfNZSqc10j
+
+#8Me*&ZNfNZT0hSYd+dP&ri-FGM6G6P,p5D,rPNT0`dQLk5+6'NLb5"HDe'$L)Pe
+
+X8N2bj-Z'$r$6-$NZjLGC)1lB-"jQSff@[ak%LJ[rI#%p2ddAGREN(@"V+,S6CI!
+
+I!!!0$3KRBbj38%-ZZ@0M8&"$,VN!N"#$BJ#3%4B!!!d'!*!%rj!%68e38Ne33d-
+
+"!+X[PfqV-$P*!!!'-3!!&UB!!!(&!!!&C80(jji!N!BMM#0%$L)UANhN3L9rV@9
+
+B`f#c2p$XpAVVCc-[`k20Y5bJ+CTHPScj`Z'!lmr*#EPaRH(ZcR!J!!rqMKG"q)#
+
+cj'G%46qffB3q8Aqp4R6FA83PM6`KUjaYD&IlZ@jDrY"pk[b&AZrdH*kFbb9PM*S
+
+`4Kh$c8Lf0bVe+Y`Q$amM6mc%*C1(jF&1bFSdGIlLpc*04b#X&D8[&6R%+-#6HhJ
+
+kX"#A+Bp6%6RGkB&kM%'jh$ZLmam[1Irq,r82rGM"5H4bh1ZB+b"Z%&-pD)5CL9(
+
+AP(4UUK6$!(lkH+UPFXFARF-MIHHMXf!5Nd%SZYRQj'pfL)G3N!$94X#(q25G8U`
+
+VXL'QU3Njk8[phV2@0Q92J#d6rA2N1["[!%c(M4X-8p,0IcYJf2lRBmD2c)*RQEF
+
+68m'9jqq*MjHTji&GqDp$kh501r9fqVPJe4iQDRS)L!)ELqiX08i#@40jpP1+F@p
+
+iC&))L)Qq4Bk-cK-i*h`cDlN1cMBUbZA3+beKhX*-&UD`X%ME%F91fHB3BaCC''Y
+
+KNba-C@(,"-40Yl"l,#c8`YCDf%#"XGD%F4m3'*i'k"iah[Ddam+k"Xd3eV@02'B
+
+bj'D90I9p!!!-q)[jAU2HhQ[NiCQC&f(Ne`JR!hlN1''4Sjc`)hcL5IK+f(@8(q&
+
+(1&Nj2XreTBI[M!0dGB4'MK01#CFF2c,JK"*1MNZ1(q&(11@5ii5EKimF*ja``Np
+
+#bA(#bBL6BpQ6jq5imT-m2mQ!dq2N'H&2RT2M%Nii'6$J,PF!#N#jGS3IS9Uba%G
+
+'A-)*8[#%!j-9'#r3@EpUPQ9+NL6$ldj*kVS6INIK@`*q'q$hGRJCPb,`pUJm(fQ
+
+3!#mGrdQqe$Nm22hkJ2cerNp"i3$m4Z62S5YA40V([V`MbHF@)QPT2IN@3@$ceHm
+
+I&dT3GqF9K,'&&8[6LKMTbQ6@-*%bJE#4RM,b'FA*'VC5`0BBdTa"@aNXM#)mU'"
+
+N@d@XSIKMMiMh#RbbLSjLT49GG9"F84)Q8QfN&![N1hK"A'V5F,,dJIF@+`iNJEb
+
+H-(5Nar84j!"*Q54MH+j&08dYQc,(ipT9I+aFqIQc-XP313&803UUPPD4*+UAIlj
+
+$U+jMAP1QUSfEYV2Qp4HKfZ#TYQTCT)hEaCbp+ZXH0"m5USfHDV1HbL4cCT@41rr
+
+5+d+eL4&+'hR90)iLRp$LYcm)e5McQN@UMR#&$kKqr%eHU-DBejbUCC-k+P4N5r%
+
+Iha+Uc5aj)kVfm*'ej*8Dali5ULfHDLah-l$Zfer1#G9@6l8TTf*r,RKTZ2#Q8'h
+
+MA2&i%MYq(0aCicHKfPlfDYLeJ3*FFEG3l@"HmfJbqFrdHU&IU+jRHE95BmQFkJF
+
+29)qp)93hX!aCGLfYP0!jSEU4HF9)-e8M9rADGfC4U(BbVVC66+8XR2Hj2RAmGk'
+
+kLDNk8`@p0[6F"hrG,e3h`kmm(BhDMQjBm@`ejDH1pG)YbUXYM'Y'5aD`-H(VPZ)
+
+,*i6A,Nqe)D1Y'5@UV@HM3VAE)a3$3MT+9jAGa)HI#%*E@9ie+jmf-PA9dY#66`Z
+
+[fkMA!l&$eZ3)bP996crcal6`ZRdT$9NG0S#+V([`rRZ&eae,A%dMGB2V4H%9YPL
+
+LfZ3B194,NC[ik!QKZSYlaE"deVc1$3[9(XVeFJIG0T,9**@'AVXJZ2Db$%'!,$a
+
+e+d2+8SES`Z&RD1(C`m,VlM*Aj)cP#M@ZlJI#Djp(U28`fl)VL9dKY+IXeFM!HRJ
+
+MVc0#YCpj6@!,M0VrHYh,CMQN!FBjl1ZVEPhjaCK)``"6,6JiU@@ekMjdmEEPI@M
+
+3DpXKj3pi+f`LFFpIUPrF058)N4X)f4ZQ*P5c1[&!pGhC4i@Ue2BCE"bRL&haLRk
+
+Thb#ZUK&ZK-Kc9k4Z-[QKhdaf&1KhN!#*#IdZ-XfJhdPQ)I6l#![SYjD'HXp$hdA
+
+f$1LhNlN-r4DbV8$I8iS[RSEqj#URqY@$9b3dJG1XG))%khUHJMX,Vh896Z%"I%B
+
+PFK1MejpP2[@,$LpbTe[Q%h#[hhai0BBHF+r-MrTeL9G6k!!IKHa1rmf2qMf,9c6
+
+d)%I[5Hq$1hVVq60(`H@-9fb&cfkb$BBDc1-Ck@@#jrVH%0cXH$@cIK[C#F&2Q9X
+
+[qpl(HTpEQ9F`KqVA3&iYS3Pl6#ARpIXMVpCP6[+ma`PkbJPkbJPkbJPkbJPkbJP
+
+kbJPkbJPkbJPk1MHKTlbJTlbJpqGlF2RNe4CD`1XDTfUZEYjDHE@[F0T$,KbK"Vc
+
+mA!9AAPiGS3Qjm[HQi+l-LraVj'p1i3&mcNKce1@eZ4pFX(PY@1(66rD18)Im"eF
+
+YAJ1K#AYcK92peXpVBfM#AZAIKi*r&r$U$"h)dkhp2[JI!kp0S3GjhdZZV))A!43
+
+jH4kk(TLQKF4pTXhHI!ITRb%hcX3KfeN#**1EI54a"'@Z8(9Dm%D@b"Y#qhm!N!-
+
+0!!PRBfaTBLda,VPM8&"$,VN!N"#ah3#3%!9X!!!I``#3"2q3"&"56dT,38K-!3#
+
+TY1))Uc!eD!!!@F-!N!B563#3"2$I!*!)22J1`2KbNQaPEr+hGEX``Jk!Vpa0&eT
+
+RDl*eSGZ&%EEAc@iGG+hAYBDRapHZd6ETQH'lV2AbpMVJ4lN,ck0G4lMb)fcKAQi
+
+*AeLhm1)VRfPGM,"Zi8pBG1%a3VYZi@m,@rM#2'iAfhjHacE,K"[bJGYB,ZcNP&#
+
+"$cqJ[fRG`SmXR'aMC-H6r-)AXTaNHE+Fj"HkN!"0"R[G!H4jITB&`!(!dKX"PZ#
+
+Z+PX+S(dCS&YGZI3,cN3L+P4H)V5R@D3p,54$JD"3'!j')mhRcl%mUJ)9e2PVUaF
+
+j[6lNX)ll!4,jajb6UrZK!hSTX[caD`$ZIHl,pdeVm&EaLeKG-YjQB6AKT)84pF,
+
+kB$+55%ID`b-4QF0T19ckfSl,d['15$X-4cTr0"2!dIR5%1j[S4JQa0,J4lT!pkc
+
+"EjcQ2ZmmNDF36,1DH)X!8($N3ihbR+mcX1GC!E!0fi)+ra)rCUL`#HU&V9)ke`6
+
+IhTB!b&RK%B!&4fA8Ecr8+8IBcr)4Z8L+$bmVaA0$-Lr)$3+SMf0Xkh!%1L(hiM$
+
+H56i!P'Q(V3ZXrmCRE,f[6f'0N!"Z$E6%fl(AqCL20Ka-#kRdjh`qA&CRACe[!5i
+
++PSiKjh)6PJM4H$#5%&U%HF#GqF0F$MM6fH)T68dFSQ!hQ*["e3hGME'TS#e`Fmq
+
+Sl`'0qRTZMfEcM@b8M`(hV,a,kqB4N8iZ[4Sh5b!9ddQpT9YP#5UK!NX`BDbr,"E
+
+!TME)X#08Bm,*$)fP2Ci@G1bTGUbETe@@q%4QL60h[2d5)BQGX-U5,*6)q)99'NX
+
+bP3a1pJZTH#BC&"!P%4'5XP`!Fm82LidDE@#h&eejC#m'cSQd"k1C&S(CD`*"Va"
+
+S%C+TmmkE6aJ*6S3kTd8)4GS&PNjQ"#DY1419T&!JQT+cV-0*5@'9$$5+K-58Y"%
+
+N8Ea'&)q3!*!!UeBZ'qd'!&14D",LQVJ'$qTI1DUU3$%0cAD!e9HMkl`KaGAASBj
+
+TJ#pMhSb5Rq0c+LJ3l3LJkD2dcrJM2Q%3Kh&mZL-JR(&m+L$L-)j29b,%B4br8)j
+
+X!Y$j4ZUh`)[eI!A!R(d!4AHG`LH[d[f@re6*b2mAI`)H5F0aI+2XYq2iC)+N`6M
+
+qC$b5"Z2ij,N%KHI*24K!$k@Plm*Hm'Rd8-bci0h@*rK6m%JDM[-[aZ1Nhq+IKNH
+
+UJA&mE-V&'KM(2a129!2Mq2,5(2qIrSHmNfTSR2rTH+3D'XHRfL81irM8FE,Ep4r
+
+eTUeM[5Ra8bilkJJ6f!)lF0e(0'p*Cke+2Nq9ccEjh#UIZq6c&[RmM(3ZV*!!cL0
+
+k&5l"Jp4$Ilc)-m$9BDMqeV0m$l6LhM(EAX9A,10lG,aR)2GNb6Sm29&b0@CfmMd
+
+&Mr!pHLh'hX&p"qiPVV#h)jIcaN(YAHVY!-im,lH&lp&Fc$pX!KD$+,qKqbMQh",
+
+@BjDAX[M-KFF0&bH!le%r'GC@E`LVXP9mKXdeG)3QcED[U18Vq4jY2c-fD8XFl$a
+
+Jb0pEdXPRCYXVR!e1c(f%qF`GKAUQcPT3T6E-YjCF2GYHhq#[aqa0'*p@XJl4r*8
+
+qM(Fa(e1(MAb2DUZDVTq-SD2mJ+kFAj*ldAQmX-KFQf"C5i,E1fA&P2jHj`!8*c4
+
+Cbq,eU+LUqmriLrQ-H$8"RJ(GXC,YKXYCKk(M!EcN!3MV-HG3b@DB@MEAd"P5,9[
+
+2CjDYplkH1ckr$1D5aNf'jH[,p0ehXaPCKe@(eI0#11SC',UQT)X9K3qD(G8hK#c
+
+C@GQUfADhU*AQPE#2X"A&i-9KaAUdDe$"bpQU)@mfJNfL,U61YQ4RBFiKFac+[hC
+
+Y@49Fi(Ye4UjKII9Fl[b`UM[(Ca+6ZhF[@mq`0Seer)R3*#Y$$IcK`pPc%EI6FKZ
+
+I`IV"'%bLZK'Mdl!5jqQ+3J!feU'k*f(FZf(EGY@@N!!CGAmMqd9@CrDD68d'jf(
+
+3TlQV6AYhAEJlGh4$epjV3bSqBiDXKA!BPjeTVUYp1pI,DPfESAK1"2eSD[B-elh
+
+H#"KCEIFl0K-Um0E-CFr[,$HC6Hhc`fDr-eb-HmN5*`iSE-8)!#TL+mfKpUV"jrc
+
+$X6fMXIlRYZ5'5$I94YXX-&C(`""L$Dkf)VmVe*%)GZr'mh(#3i3EqlYKNKblRf*
+
+'9fi`h"aV43`ejERI0DPfA"MDB``XX)HHa#bYS3h1c!hCcPlQ0+mDh0Yr`mEU8Hk
+
+YrAmUXCIMj8SFBkA%6iNVCjRI%C(IMj&E3@l3G[C&a#hGId-rBQbXrT)c0e6q'2p
+
+eC)89`[fJmPd62,qrh"5fBCA-$%rb1d1R5hbj`ddQ1G,60%Q1l'T#EqB1)110@)h
+
+%i!95M+ekEiM0HfqSHM1k9UQY&%V$jTQPB&VZFVm*4FmG"[Acbff$#qbZ,a3IKUr
+
+B"VZ2A1J-[B%elK$paa&k8Z63JaakNVNdL$c1fP%+A`QGIJ'bm6iH0ZklkX(0S"E
+
+8jP*3Mb,[3pbE@&fLD'2RS@ZY1`pG"kj1X1j#2R9*X*QX*TAMbYcVef*YX2)T6FA
+
+Q@D$Hf'AE5@VBGSP+2*elSqN#9T4Gc"`I)"SMr!P3K8hPL)Se--@E+!*#j8qBAdA
+
+F)f`H'*JMT!TSH@V*`'V2IZI1K@DpeEljYRXA2YJ9eU,IcfjLaVQJjXS%LTUELM'
+
+UNU1Q*M@HTVX(FV[-AA`QqadqFr3i9[JU81PlSB$r%d$A3iqhZfXV+KG!GjBeeU(
+
+[-cfI+9deX0(XqqDqeeCrEqGcqm6iUPf$i$#AQd`B@p0rSjJ6NR2d'hX'fX5-"MQ
+
+MU,pRS%(-F-NCDZeUk[$*BA*h$2XG9RaZHj-D6bq3!1YJC6AD61@QEFZ@lXi09,[
+
+#3r`40LMRE"V0'C!!FecYKJh1Q(D[`hN%90BLbX@@Y!c8C8j3QmY!ApD)[GhVGTJ
+
+**CcApF6MTA!ZjkemqUrh9AKG,PI[cVeVI+q#h6`$QIm$kKcXmZ"@c&ph+[pbaRf
+
++-2[6I1-)JqV1YQR9UpZ-&Cd9Uc'6i5P6JCdV6"8c-TKV%$1eQ*@af2(L22GJCe"
+
+VaTDFcfaEffcXh1Pef-$Pm$Vic)0VQmqbL$(+mRVQJpGcr8kVcZZakIJ-9F5"VJ2
+
+A)XVacTfpDfd&ZhSY"9l2XleH6rpD3Epa6E1D10FlQJjH!G34SPGS&qM3*fC3Pe2
+
+L`2L%lVY,CV!*T39qcpXH[fHHVQRU'%UAhk2&Qk`VKaD[,i2ZHk`cX2[6K&iQRrQ
+
+lbPXmS@QX)1Y!&RH`da"Y"8BfPYDc4GPC#3lV4AhlG+E(2&HTGaMM!VD)&65CaPL
+
+Dr4lQB&J09`k9kE(,mhf[0f[T[[2#[mfpH2-6*6k4bk,U5Z`kcd%Ia$UcfEZ2Z!G
+
+1&'%PEF2B1aKl$'0hBH`R',X1BjX`pP1-h6AD-aHa8TJD0Z"T@[KdIJ$5L*0!R+1
+
+)NmCi#mDEj(J5i`fS4KaV[49[Y[ASjjGJCfSIkdaR)f+)e-#cLpMMH4iTJQFE+B$
+
+RFiN4RXfXNFpBZGXAc[3QM,G2Yh*CMh@3!(q8lFE6#ID-P'YZ"AefKT9M99N2Re%
+
+Z5UJ[cKd0UjR$Y@%N5eQr[bVdDANH1X3[2[#XjcJ0%Se1!jKa'U#f[M%BE`p&`TC
+
+@-mfEF*1J""c`J'Sc4b0!`0Q1cH9X!e(3aCl!)H`k4qIhpfYS1)*',+EMMLJR'JM
+
+*XAVRp4,L3*6EFHJLENI+bThcfZ@BBX$BV8U1Sr-@+@iljX&F'M+D6*J-'5#(%1k
+
+[1&EhlT'("@L3!%(&RA-a6V0,2#9X9%3D8*&8fT'k`V(k5V),NCZX$kh*MY@GDYV
+
+4Y-8%c[bAlh!l-U6&69c*e@N4Mj-C)C2d+XbiMLZjUSJ3--Aq8HQ-$[R0RcMaPa8
+
+e&lLqlpUj[TGS[iMVqri'VZr9AUl[KhZi[J-YA0r"GUl[d&eFhq'YA0rr0h*pEml
+
+RqYlHa2Ap"212)[Ba!pGh2-6e$Gc+p3dqbr80[FMe`hbZAjA&I4IA2aN0'##DQ-I
+
+F0B%8$M1bX*!!6V&dUi!$KD&N2-DNDAZFBic&F2BrKF2r6-!j%"D+4)8c'q,aD,f
+
+3!-3j51B9SJP@RdlLA(j+(8X++A@L25E3BD9ki@,HV9l@i1F0$6KDbP$RC(bL'2*
+
+%ikP8)(QCZL15MXe30%"dDAVbI)DMURqBCV&i5b4dfDrbrk!LN!!@@#SGL#9B+*j
+
+N3JH#Y3HLV#@5r"fhhq@IS5Jp9LM&BLQF6+PSMTk2cbS%9c)KQ@5a90K#Sf4N5PN
+
+S5M[3da4hiQK)k+XiA(ND$YpSYSe-m)LIZ,6N5rL%!p$M"e)Z2G@JJJ8FXU,((EM
+
+pQ)@$C4*&(*ZN6`SqKSGP)q02Q+F@[iqA@RaFJFBHbCM4qfMF%h!%89`D('LN6e`
+
+k'KDkIh4i5)XM8r4*4)JcM9hKZ+)%Kcj2Rl4%aj+pAcSALTmN,qQmF&6[3Z`$k*0
+
+%H%M18RJEF-b22R&0qM&+6,@P[&-a!BIik*1U!BGKe64B611lY)`iBNHI9"S+Ab9
+
+l)JjKd5HT3V25,H+!P%`9Z`rkT%9kNCS1THY!pHQ6Q&%@$8)T99L%Sfhd5H*hI$J
+
+64C28Y,C`Djl#m$6b!XGfTmrR*X8$d@L`Y6QkdK+%4i(E8[b59GP&,"cqQPC3ih4
+
+MlA''N6k&X1iVfl4IfC%6%hNG3kaD8[4Nmd+LGcpXR+[Xb-XNFZZYEkLS`Q4G+Yd
+
+5L413!'S-T`$1NR'U9P55`+R)+U%aM8!K9-"b-+[Xk$GR5FTkh)hN*rJB5@-L'EP
+
+%j(6IK+GdbSlH-e9"XT!!TkM$335*3-%BFqd`miD+#P4)M`VKJ,5STAS-5DFJ,A9
+
+lRF6mdQ"V)#Q+K-c,[YUNl&M9XNEZ@PkXmY(k8'eCj+P3G[5T%69*)e+cY5@CqV"
+
+#$%SP0969B)9`fR3N*L#-jAfF#50kqURL8%pU-)M3+FmipZBILqkTH!E9YJip)aj
+
+%`mKhi"GMeDhkeqSZq1IU*VIi[,SeRcM3"dM$M['C$j!!BhcZ!m11mCN2&2k,$aK
+
+qi32[Hr5%Rh[d,hX-I&T(k6&F2UIBBc4(!m'9d93k(d+2NBr*-djj`D*SpBJAZ,f
+
+9j!86F'3iZ$+9LDAqShqJf[jh,cLPbr2V[SPKZ8BUA*j'UT'@jR"M,2UIAFerUC*
+
+hbU&Hqqk24KaUB492qKV`$C4!&+Z"V#$rQ"GJ24rmKPrCa6X4KAZ0c$d@5+lmTal
+
+hVejS(qNI[*91V#iSP&p#b,2@2paR1A6E52mJe6FBBMJ1dGJL*2+9p3qIhj!![Bp
+
+M('C8fB"h)XK)5,I&%TpfThIZ`BHa&(9Vm2+9kL#QA,kQIZdYiIaLYrARRVV2f2q
+
+YNG[k'UGr%8DeBN-EK0EmEAlarTd(p5,rIHIa&j&hIpETLXk#R@jbC@-b,9jkj$[
+
+SG20dc3jaep#MG,*Rm*9,kClGd#jFfLM2Qq@TmibVrRcNcU2@95h1CX5Efl"&%5r
+
+8mURGV@U5ZdHGS,k4EYRemG4[EPCrFjZ4PqYQYFV$Li`LB4cI%5Ak4CIabTc4cV5
+
+Z`5pfTSPdXM(B'Xb,d*RQlCVl-6rbfNK(iUpddhemB9))4J14@"k%hM42efh'efl
+
+%*i192U1qBE',qSa81Y2F(%qfjbIV-mbRlM2Dk!QiiGN-X@CeBXhQjHJG2R%#l)P
+
+%*m$r!"'46R)DGS+2k[XNTp(qiGGq@r81$FI)IYZ`[)lZM!cTba)YbQKh2VHq(T'
+
+iYATPahXMf583L9i#-b!5'SA3JP$LMk5FV"eL5P&e,)!2AM(fqq[&rAqqJEX3ZJ0
+
+4GUAcq1#I[$MlrpXrj3jb$ZiY+2BkkdRM@qKR3r"mcb,mia%m2lM89dZ[Vqh!-,f
+
+QqNbpVjjZ29qJCq04M`2d!b+N'UT5MqGLqX832%q[Aej$mA2Gr%)2D,J,T!VQVUK
+
+`%6jhAB9V+HAI4,rjJHFl+Pb,m4eQEZZ5@KrPp5aF@N9GqC2+ql1S&YkPdTmG6Gr
+
+!qEV`09U+&4c&223NLQNk-DpALZNdR1mDqVXNM'QAB`crlBKL%mp(M*G"*FCZ`&J
+
+DZ&cZG*Ki-f,J@mmLMhX`*R29E-FB[Qe,XDNr4DlPFZc[1GrDKlkqQYkKeBBaYUl
+
+YEqK(@E3aM+N[HKM14ThU%2X*Hb(-`McNHXhpB"3j2BDaPJB6I!Ne%&qEaD`r`V`
+
+YU-G"k"3ar)MaKKaEKl'$NQC6hd1-Lq4B$Q0G-XB+e-BRajCJ,+'*V3bd4NrqAp,
+
+B[bJT[kddmXG*R(e#AIa5)9RRT[cr!`!!$3!*Cf0XD@)Y-LkjBe"33bkj!*!3qL)
+
+!N"!0"J!!,h3!N!6rN!438Np+5d&)6!%!UE6L#+X`0A!!!#*k!*!'$d%!N!43[J#
+
+3#1j"$F$iCXbcEQ9ffFS2dS@*jbZl63NYVcACZY$0##1XPDZ$V[@ke[$dmVQ6K5h
+
+FYGEmE+(Rmc@246PGf0D9hF)@VNAi`VhS`KGM(GQA+lmmdfiI)f`c`Tq`63P23V[
+
+Y`VEH`KHqX)9f(@(E*!Zrf-)@IZi)AhKXi3[E,M3j*432"&!HrHaD@&$M#f(,qq3
+
+@XL1hN!$"3Rk6AcKCb%+1%di@J&@""TeG+a&(42abSQ*m9@@VL(4[%29TUPEGj%S
+
+NfN09'd1a&"q0T8,*F(-`0#85E)pZZ-eZrEB+Z[80G6A,A6ir2'5jYd$i*mlPdrI
+
+-@8-1XA6I6r6dUG[h&cAjUSAPI(dbhQEPDb0*+mqX6fN-*U1*9$3@'8GN$c0%(%0
+
+GelfTH&Fd4Q0)jLrR%MNc2aM&pcf8d``Y,Ak!B(cHb*GQH1E2Phb'JLQq0Yi5)P*
+
+IZ&DMccNrDX`mDiN1BLbSE&MC!)B+3p!!(FM4Z3"pmf##5,64Fd39&fA9Eck6N4(
+
+q-Kr+TK`qGQ`-&dGPAb51%'Q'J"dB3bK$iZYMHPIm%$'QJ`j8f2l6cq5j@TmTYD&
+
+8Dh0,2)CCjkGqG*&J+Y5CqU@IDmIQUUrh9q!`X*4GG$59b(1#DBYLrXT3Hc`B6B4
+
+D3NZ)Zr'(SNLFq4ETPX+0#01J@-c9Mci&E"ETe"lZK'B2D682F5pVpcl#6cM0`cF
+
+VIh2RdI%LA6N'$6l@jXi1I@kfp+LX3395@i-*Bq1p(FdBDS-m*N)0#&FB@QXXRJV
+
+TqHr&d$F[UDca!YiDjchaf-C3%T1`bTUFNM26%1V@@T1GbH#dKP"R2*d-KU#5L)D
+
+5FVQ)&NXr0"XEY)Prh,6j`NN!Fk+aB(Zk*F3lDTZ$[P"c5bMC1Arq8UD4i#5T15f
+
+KF$3@iP2*G)M2RB8&#LRFh0iTXfaMT'5S@aDD8))aK6DZ*"9[2BV(P+51c4hG,L+
+
+c53S*k44Xa8Acmd49U9R$Xk-p6,4P'e,Rh4bZH3"e6"(G$Pjab5Ikh&MNk*3JKBH
+
+am`[rd,p4KJ)IdrpGAkQ!SYrdArSB+K6p(4q-kaYR%DeiK@MHTTrT+airpFpf(!c
+
+C6D6hMrH[fSGq[SpSi@NLdj2ApC8!q05rrM0pH5A%p,FGr*AqP!RpYPrTjl,kIr)
+
+Mrc0p)kiXJcl9Cb(1%'6hP`BRQ0MP'EU4U`lF@CCrSLp0(%#3!"HAp98B52*lSGq
+
+&ZrfkrM3CD5@kEp'%2R+m!*ldPFM#f(9p0R-`C#rdT5&)cLr`#Kk#rMULrlIXZ[j
+
+d'6P$Y0N+!(Y!54rDdc&h'$"brDYqB3l4$[hhr$0$4PE$2eXNb2ieb2fErJLM)1T
+
+RZCa*(rQIH68r2Xk[*I+#iKreEj!!r52r-kc1XRmYjSpI3ai@B(RaKIqI,BSqG$#
+
+E'MkH69X[ckB'iJEe$Qi`RhhAFB-&cq&lKKZFKRc"-D9m50)#'Z6Fp%2+jFLffS0
+
+N5Tj%4@C5"GI&cC(ZFcD,h$e838lFZmM*m-eX'F$dP%A,,mqff[SF8$&N-KPiM91
+
+9NF2XSa0J@f1fH(J8"hGPCVYkTSRLJ,V55r6R486P'%J,"U5PdFrVi(p*UM20Z#1
+
+AjGIGE[0r"EdLeqdcjp[mNSplX,Y)hCYJ5aj0I@@G*jb-Gm65lHf-'iiR1d+aG!I
+
+M4Q-YACfKpTEfZ,40CpQLY-XkZ5B+lNFp6BS(cVppFXHLm)JE3biI%jRZ4TD29iR
+
+SY!R1P$QEBbjeBD*lqi'1GccMbIje'bEC1H@a56dI1a@*I@9pEqBF-qYcdaaAM`b
+
+5FjP9B(QLVT*e4Aa$'kXN*T*FX[j[jrbLXcJ8Me@X&Eh%AL-JTT!!Gd4B3#S&rjI
+
+6(0UBDSje*M'BT4+G-9BhC9*@-5jcH$[1@!XpJKl'$ZGDCHXmRb03ICB4reapCC!
+
+!(Mqj("6&rGSNfp+B@FQGKfZV'cfXb6ZLR8&V%2h"l5[mJ8hjJPR%eT0&kPUA"r-
+
+MPcHq*D-)FI[,GTp4[[$$5jiqJ&BGP+G#UkjaI6!H#dFM9NbNa28pDebXI1(,,(N
+
+ED'bUV!CChjPULFDCN!"U8NG00mXke@ZV@1Ge4VY$ke-3#PpeT"PAmJT`"+9)V,N
+
+pTl6IHLkVI,'RZ6PAIkpR2HXM[+GCRdK'0dVZpqGr6kpmXC'CT5KCd3'NL33K%LA
+
+eT(2pQ21Q5[3dR+GDX116UUkC9$)S5UXm2KGcINq`Y6NTP421bhiMS(ba5j&Vj+N
+
+6f#aTQ1JNeElPhNVPLj`GVbDV%DYQDdZbmeS[j5Xpee4GLelLG+PS4`JbeUXka[&
+
+k0V$H4$f6H2FMHFHjNP0bI"Sd(Fh4'2DERk5`R-%10TmaEFjrI`$I68b$mrG)kq6
+
+aHBBP*&LlQC0%8Xl9HQQfr9b!L@&XcMHPT*eJ*QI3,1Ibj`$iNqZ&q@YbPJ1Ha&!
+
+Tc3P+,rc(E-IjIaGE%9QEH@4l"'92bccba&FiN!#)&l6[jHikPAbI*GrYmVe9[[I
+
+)phhbr86Z2U8bGeIk!)'b%TGV)mAiNDCMGeGHc9GI%IUT&GqZ"BjUSA+ed+mA[-2
+
+LXC)(FAZaC"ZB'D&IrCc3Ep!"HarI&r!YF8GmAD,SLj2'YmVA4CaPLEK2k0IH*6a
+
+V*Vk$fS9GI4I"H5aL!-[(@%*ka9$HA3N5qMA()VUDA4&9YPT)mi[cZX*6&cM@eJP
+
+93VpZN!!h"R3P6RiqmI$[+mN)k3@15PH6#pcRH,qPD`T@&9NVUY3'[UeNf`)(%Um
+
+4l0h!LdSHK&T$P4pi$qrR04'Md+mkS'(0E3aI&)EejF*+mAAAd"56T5l"Ckd*lZ6
+
+dYG-("ec$9*M3CUehlN4&9Aer+0`PT+AR#H3GeRp3FMK[%pq9er8Y223JLKM!HEY
+
+N,mdU@jbA#DY@la65UhIkhK'(PTE4BPEM30kDR@@'[UIiiUc6TNIh["CTp`k2hPr
+
+5`jXLjbc1QSI$eZbmE28#KdHUPIB[)RkQV95-AKqV@,pZ+bUiLHmHp@@M''(eB8f
+
+f*6X2R,FYF5Vrc4ePeE6)rfDaf,5cCM&h@d69*`VTa,5qikYhmZK0Ble`+6c9aU-
+
+'$C(cf9ZKQl&q68LMIi$490Bh%PU%6PbL0f'aB1Hl9(X5aT1l$Kj@l3YE82GhXer
+
+JkbdqLcQ3!1Fk6iB8YmemmZL+iq,&A6dRGi493YT#@5[6iERXA%YphBr&!El1[CF
+
++&dD44l1b0lLIpNA*b0Ie[@mhS`,[c9hpkT&bXm8F@aUa0,JLKIL@V(3KLJm!)8*
+
+&l+8LDUmD1G8`KVdmJ3fHfLH1XVUTHZhcb&J6TE``hq4Z-c@i`ef*B0pah)HB(K3
+
+H'HbMU6,f$BBChH*)C%0(+c3dM1IjL9Re`SV`bmEQ#NIi'&Lk[$Dk84behl,DCHN
+
+H16RiF'r0K2I@`Gr,ZCIaFJ8(9XVm+EKbPreGN!$mr6@mUF84qbhVQ,I8i-1$d1L
+
+YqD*,(#erAVJEVY!Kh&Y92c(6UfI+c4%lZQ4ZC'U$+c`cjjFl(c$,5(pJUS`F$5#
+
+EZE0`h)YZC!jHBaAMZcmFjCGm1&U$M9+Ne&j+T4(,h&)bVh&lrSC-Tmk6jY8epT%
+
++KrZQ`[0dKhfNlm)+9rKGp,K6bKpRq*MNS4mHqT0LLL3I0lp35RH%Cbk#'pph)mE
+
+6[h0S,fP#'NXTD5D86d2hbhap`Y5EHAZ(lFME$j!!1d1fSr"6Rb5lf@C@BB2jcJl
+
+d"Pmq29"SQ8HDhKll%9B0qe'T%Lq*l`B@mDEXREcc)d9M9,K%USLj(+VSJHQqK)Q
+
+BUR$*mLCd,r",+)phKPA01S'YCFRQb(lRkmXX"TYMlpHHARDS*k*$hLm)m'`$`C@
+
+&''S*&!*9bDJjS-&YYQGB2'VT%G,Cl`MTLd2Sm'j5'3C),I`f)I@3!2%1,)HU+UJ
+
+[bkq[4qlc"L&GfMhFDr(rrZQrf[,p)kG15hMhd4&b@XV0CQ"E"aq41''CBqMY(fk
+
+6'%db`c6B2p`N-G`b3k2E`LC4PM$L%f0jKiiA$`FdZ,h'8JHGYGjZ,MFIA,hUZ$K
+
+Fiik-#KIi%CQcHi)c,(2FXEaGVJlG5DIV!UPX*XE&5&T'QM)AD5aPC#KEMpRZ(3F
+
+@d#@FcrhLGd[T9XjApG)IRkldZGhZJ5-RYrVI*)HP'-lr3A8KTMck#[J2AZG[`VV
+
+Jha3@r)a[((G3NfNVUYR5CUc-9'i"NmFYABR*P@C*M$5iH4*6"eEDLVfl+"l+"(8
+
+@M14#qZ$f$FE-%Cr66QkRcbQN$fhIF,09`KM,jee+2Zp$4fakRpHZ&p+X)mlfR0d
+
+"PD(-NB(YG[A4!D[DjheP`1FGh"ibp'lGS''H'jf"FrF4Q`L4&ES+2A+LQ%dj*8l
+
+JqAe2P46cqDAU"Zq2[3hH*IV!V%Q9RJD[$Y[IcD0hlLbM[MffBNarf[!E,'IqV1S
+
+aElL)9fHGF2%%2`0UDi(dPMEbbl2c%Kck4I2iE0i!RV[80kDaL&r1U`2Q5CH@"Lr
+
+[j0%0QdI,$*Mbr0mIb&Vl[VlL6mAA(hfaa#pj@9j6KDPc$R)3I@Chp&h`$&mbSC-
+
+1!RXIf22!RJ6fYm!H!,BEf0m"Hh*LCMEaT63VNSGE8@5Q-%`Tk#5JFa%k+H!Y`!-
+
+bRJ6HK'V%dHZYf,SBN!$R'c'C1LBRd`93$,0Ui1jQlR&I`LU#Zje9!2GEQ52F,Ia
+
+k)@hM(PmfejF`2MlEaQ@pYK(Kfraah#la*h*F5bXCXX8fMUr1HS@dXLKKFl&i-D,
+
+KRHjGikbVar'Y9la$l2RB6pmR,LdS'+0CVLaC,H`"dT@r%Z!F2cScr3P3LVMhU0$
+
+RDQ6lXmIBIJ6h2FZaT-(pd#Tr(GX$[`!BEfIS4+1rNEepHBe0*1LCXfaR!QFkYKh
+
+"[C!!E89`RpfiTTEKYhU%C9l5FSYb1eVZ[NShdqFHU(5[B[`[Xmd%lNp8ZZr%``V
+
+Z`-Sk2q2e,eY9c6DeamCH2MPq""hf),AJ0Z`'mAk4BHU,`2"fN@(D$$6B3eKJHLe
+
+ijh+BEJhfCmrNX"X@BR0iMP35pJI3b"!RLM2TKUm#`jj4mR%B@%X1Qrhh`&k8X3q
+
+"I82'4(M5h,f&[F[64H#l[1e2f"XKA3FdhPMh,0f#,XX(PR*-SARJ23cXC6*+rTj
+
+($GBeQHQ,U+Ad,JkXA`G[(hJpP*%d'S#PC1a"B'rNDPDX"RC'a[6!hT)eeX&I3XE
+
+f-%rDMYpUEQfrmLafmJQYmYTfr+%XjmL[Mpm65YCl'2rr!!d!#'GMG'9cG#kjZ@0
+
+38%-ZZ3#3%%0D!*!3(m-!!%+&!*!%rj!%8&*25NY"5%`"!+QdiJLV-$9B!!"5l3#
+
+3"K+K!*!%$I3!N!Me"!i!pCQCc1abX2*Ef-,&mj8EA@KjV4fRQfkf--,fZP@[Eld
+
+Z$dq2VmN'A5Bp-hbAY9lHAJFXfQdl+AG,Z2)ME*&GEJRrA-libQIDl@-,fic`*fc
+
+6K5HKhAEKE`YIq-)mEQiRK(pXXmb@iapGq-+kKCfFELT3q1c,IZ&ZXPf1@pl#b%)
+
+ffjdZC,)F@FK#&m,)B+r,!D4[CPq-FBbaqZ@-eH&@A,@%-I9,M(@V+THFE3i'I@,
+
+PFV%p`R[E)f,)lA5*'SmV)SBMaKm`"H(DkkSAQQdeb1%*lP8%I"Kcj(3rX&H6m0M
+
+IZTkaqjrj`UCT$PZ9X*!!V`m&fSamV5GNj#ReR!CAb"Z-H0XpDBqF`ePa(%eGaiT
+
+)S-2EcP+HcTr1B+bXmm9Kh'q$6Mf`X[$"KF4R$RhYV2*CXk3m49H%V`fdL)`T"cl
+
+J+-2j13Fpcq@-E8&E8'&IE%H%!Ne3,pZF#1HDf2Hf""Q,&l1('*Yr8%EphJ1GXSF
+
+r%JrNr)3rGBV*(aq@mf,a)FC8Kq$ER2+`6KCr)B9h0"r'+0,%0Xm[rQdqSqFB2cQ
+
+eBU69f4*S4krcbhc8LClZG$iIR'*cIAh0I"abUXM3iXkAEq$(ilQ,49r!j3f+,H)
+
+maNhp56c112ejNK@"P6JkPXIB&fjK8aKcR!drZX6iG+jqq&li[TdQiqM4U(!CR@&
+
+rGU+(,&FBA8QAdZJ+kKT@q*eSAPdm1Mm9!Sj'C"RE!a%aQhqm(IAaK-)B'-FE!ha
+
+jS(fj'%,(Uc#'FK,*f-@9@FC3113DEaI$J@M)*3)Pk"9$i'!+Qm`pccf[0,(*#J2
+
+h%ZcNS8*JE#k(6ij38,[0q$[cVaRB"FIjhRDA,pSLmUCDTmXQ1P[%8(M@V%X))mK
+
+*81HhL'j[ZmK(3P'46jb,ab@$h%jI@)iU6J@&a*8bd!J5%NZ'TC%NDKY",5%K9lA
+
+%%1kQ%f8Z9IE(4kQ5X*9Mq!UPK%dirih2+53-k[E(m!QELQ!-Rl#ccq$6B)6Z-I`
+
+FQ(52iC0Hd6f'2a&QlKPm`YDG`5GX%V)aI-*'%r+rq)3prJ`qB9260)C2f"21i"-
+
+feI!B2QRI@@I`#A[5'Ic*-1NH`dIV+GeMrFY8Q(52j8mG(mdXar#TGUKe(X1R`pq
+
+T1G'EYSlfTT4IFZ446jL-RfpLA2G!eYX*@kf3!1dTXPdLfkfbh5AE'fAlbB5G8j'
+
+`4rJkCZFXKT(SUhpj-0jKc0+KVIl1dd)2DmAG-GY8*93X&AUb"HYJr,'#0E!H,EJ
+
+1NCe#Mr)KS8HMKZmGh)rJ,V"iE"haZ#h!9,BPYJl''HE&0`Sp@9F+$qSClfFqB9h
+
+h3F6FlY%JbNC43[653pSVJdcS86hQ89H[mbKL98+8Rk[YF1I00PeH*e3+2HTqAYH
+
+N,LMMCc%HqGX+1SASE&1&f@&'l%0mMD%M4m1VBND`e)EiiS,VCTXD(2B'40m'rl5
+
+#08#c9pE!hmAAm#U26ZK4E&E48%VR2LJ-CTF+Lq-[Q!rPj"[UJRc-'14f6EKm3Rq
+
+[HC!!63aQaBb,eS*44IHY`T9#9"TN-1YJpRX&fl4AmahDMZpMp-1B4i1Br38Ef*5
+
+LZGT1Yf,T@L'kG+hYpILK5iVBA1+i5A[CfL*0plhmp&KCF6DUCir(CadF[VkJLmr
+
+hl$189GrN0XCQaUTQQmSPVV*HpY33GT)apN++X4le+M"i0Epbf"EcSZR0GUYL,E'
+
+CL0P[#,$5,pp39-AQe,`b2HjB@cfAZmLMk)i,dH$ilTe,er+S69fpF0LG9mb$!l[
+
+R31a#i(BDla#LU"ri@"l9MH5GKNUFPjh[CUb%le$F&p6Y@VGPQf+Mf`$HhiaG`0F
+
+EE!CpNpCmJ'NLh(AkA6XZh4NrZ+jVe`eZK4!eX*L4F(JZ0X03ArHcH#pICpR!*Pl
+
+XK4j0L8ffh'rc-KeIere1L4i-[$eMkE2E5r8'IIXP(S2Gl*Q)Zf#a'@X,Qq&K$)b
+
+8&-E"[@,S'A[+pp5)VrqCMI&KiNfa[Q3Qde9lQGE01baYqAD,Zb2SkYi*qa$K!H(
+
+QrQk@*rZq5ckG*6lNDIDh!N0&FHA[kK@2A1Tq5ZHFEh)rKLLeYSe0M3qAR,I8E&J
+
+jY+[rT[A9)lQhp[p4)R[CAjVd`eG)q5Ap59[1Ed$+lfq3!*Xb2P4bhK@8@k6rTRj
+
+JV+rq[$NqA2U`m"9NK3VKAUem9mqHIDj8lbP"PFc`j0R0lNQ*I,N$6AVCdp18*hY
+
+f0%'EZEh)H$fUN6,B3ica+pmIjZHp2ebp!DT9@&,)#Mf''B9-IjQPr#f@rm`"TRV
+
+fXT+Kq5E,f4-2X#q@$(82A'Tf[iND,j2dTmcpQ*4$$h,S#F8M6-VMR%F+f4IGNqB
+
+J'pZ22,VGhpLkJDP%PD'3!+P'N!"h!rF@[MkB[ljcr`h&frIIb#bGV(J(mUN2X4*
+
+pX9j4GNhmp4Y3'hcTK+D*KTP-YEkVC$Za8E*$BZ+*q*Y0FrMmf#+ql$LLcLXFCJU
+
+2[K5SU)%*YQ!q)e6KX1%9i!l`mjL@,h-VR'U"@M4@E)Vpm1i&"NfaDF-GpbrBfZ9
+
+43qpR0r'kZ8c&&BRN0640K&FKHr90+PMRPJr'GaLkK'MXKd,di#&8q%UQd23bTI"
+
+9"Y@$aT[+kbSUjl2Z'0pB$phR08+dF1AJHN20YhDrGZhcfjrC,IPAlKKLCBC5[4k
+
+q9Idh5c&Z18Dc[QH`6BT`b"(jr6f$$LR#)NHSe0H#a(a5Q2KG+Ee$aFHh0DPJl5(
+
+93@8ePZK,p9Z@,YNC(kbfH)D&!Aj)MVPY*'C3MV'dDpHCrHTGCHB"TLM1TeLdU%9
+
+-9@4Q+N-4da3eSVGlhF4QX!,1CRRd4iAX3Xj@qF4Il+k`@5b@hZfl9Y@m`Nb'kFM
+
+m(e%[4TI(rJ6aDdl'AmecRb,-rM4HPmkJZV0Y@[@eEEU+cSTV%FR$LPDJFf96T)J
+
+SBV95T"T4851Qcr(ieNkAfS!@ABKZ@GfXkpaZ+bYKPM*EQ4$GZVVj(+2NSbLEp4*
+
+QXhjcHh'fc9U5,85T)[CflEd"+)FkYrHZ,P(Zk$8UEGDRHfh@rY@LC[fUCKAPh&$
+
+@Y1rVM$T#D)9kIMCdBMTe139Pm1GfheX`RFmY90UY2l2DVI1bQkD-SR6CVHVV',Y
+
+QH0(D)YCpAr&dG(pClTG)CrkkmRDVHaU[M*8KLl[iXi"f16cV#a[iKE'C33leSVV
+
+cA&k$1%ZK,B8aKer)+j[dSeNDl&DqM%FeA$0FT%'A9r0mEmcBIIHPIa9riGZ2&Y4
+
+)Z5bXVN6AH6jd%(9@BZSH+"mmR)p+fJ,I1r!p$0mpm2dGI$I#GaYmI`rI25-pFcj
+
+Ib+CiY,#QH5B*Jb`#R#"`$J)R!Rm,r%fb2`5r!f`%81ZYQ*CVS1I,dCQD4M[6f8"
+
+d%aZ`,C3pl(R%#1`5BJ$fKC34E!2I+%5,Z6XAc,!&GAHH@mc&V-9$`JriRE!1mdm
+
+QBJfY6"1EAXca96'V%%d15UJ[MKrdU2JbblTde+I(r2fRV)GU*0F[GKFZ'6FZ&@C
+
+!@&e$S`1V*BfZ3,[Ekc'f'QM#1TGaI6mfFAd[dRd&lTYa2mhe[DcQqPkGarAYVFD
+
+pRq[EGj!!kh[Gb2@pdFVerHebVZqYjlLqJ6bZladIehI`(Ul[(a4Fhf(J[@rMqRk
+
+qJHZ,jh2ph!,FAqIkPGrNqY@YA,rQDG`$A2piD5R$)dE#I+49a0+%1a6`miQp3Qa
+
+bq2hBFJaMcC%A-H[Lh9kI1084#2JDa"!f3ALEk![b$C%30K$$+Rp)$+Z#lAk4M'@
+
+U"BZ%FY95Keh3%Y-m5!m&aNNZUbm3$MY$+e3GhSKrHRQY-ib9%UaRb2XM&r&Bb[Q
+
+$#1m2Y(MG+riPr[FUR"'4$dHFrL$[$S4iX30Jl8iIhq)0r5khhm926M)p@LJ6T9)
+
+i'P,4l,[)jI1kP[&L+-6l`aiMMHaaP!k@(kR(!$5jIF64)2HV9c"fkm2Bb8M[NA,
+
+5*ahe$KKB9T9'TSPBKI4**`H4UR2Kk*+M&9J[`FHC*Q&NUD#pVUA83F[45Jadk'0
+
+F3Yf1$dpTM65,Hfl&AGM3!#1U'a&eQabGKF82I&eA%c-D$%HjjT%"U4TMFAb*[&A
+
+h)@)HETXFRBf&$h`V0NVHj1U3!,`K#cY(qL511H*j`3MI14L%iN0H')LU%pY@kEb
+
+e@+I!ap@!&jDr$K6[395bNR+a,%&ISM6!LST@Uj*V5MUX3Y#A)"$4+kM@NKY`il$
+
+S30pF$R`T#q@S*(BHeKMSieHp#Flf)`,0AQTaDcb@&2)PHQQ)5fb5Xdb1cXF+!Vj
+
+N8DB2,Ic5f4Kjid'T!M!XRlE0,$48%8&NcjVeLhiPLG[pfVbedR#BF'qX0CFl+(-
+
+SP#2N$)DCki1*FLTMEYAMF%qMfLlECUkT+5IZR$kIUlACYmcS)YhC12(&iZ3YB9'
+
+@5Q5*+ZHdkID)X$BCAmp+hXKTKT6AHm#U3r4C*hSQB(BrU*ZE[*&EJ[hH"NF&f1H
+
+b`j%@Ei"`&+-i5TRYhSDUbbZ*lE"hTGJB!9#%@0JA5pj3Yh-5l&V,'fQFRq0a03C
+
+$hZ956TYb(mp1hP#k+8NN)bQBbZ-#L*FT4c0ATc*h9&5!)3dB`XSCTF08SdMC5D3
+
+Pj6BcCAk9Up8CNNK#jN9IDNVH8!QCSr)k39+0G(N`aFD&eSVN$99-XdNF%CZY,D(
+
+`"a@L69D5SkS@&F+T)ekr#"MM-CcF0*pfUMM`5Hd-*A450pjlk`mPT8VU"Y9h0R3
+
+Mi#,4b)#J'D-9V[Mh#PIqZX**-8jAH0BrUp"aT*4UR0)#8Sh6@T!!8Se6@T!!maX
+
+Yd(kN"FGd1[HIG2TA[3DH,8Mf'TBDXp4V02ZFVQ8q2,U3!#'KemM%T"XRp@#KVcU
+
+Y"q@f5Y+$A#aMZCD&Srj`4S3qiL3hckljPY445pa8@+b09#FYcCj'[bpc@BGcr'Q
+
+!%69iq@)m[C*8URU(RG4!'ib%'PfYVS`*8j,-6"h[aReIXbG[D8k5c,e@cYh[$#h
+
+lT)pilFFr65[(JLU"+N',p`QF2Y40KM[Pq2-plHN1e&CT4R@a((P61@0C"rU4'Q`
+
+blVmMh8FNDTaTr9MRD@`4JjR-qSM6-pGM1,T84T8160L3!*%BDI-(2jh'hIh8YR5
+
+r8BZ42Y@"2cR5GhfQ,m$+0,B(FZ(*qFCchdR[JG5Dl3[K98[0EFBhc6Jf!k'Hj$p
+
+R)(rUIIG)ebZT#lVHd,,'8%3DJQ5UfdlEP"@LKiU5A8P9!ff@U2hH-(@biF`FQ[(
+
+KV+6++NJeiI9JS(a#A@K@FPTGe,p@Pj4QR&)AdSc6kT,5M&2U3T15dqU5QT4mULl
+
+T5FPrl#eaeipXJ`L95k4YN!"fmDV'M(FlXp`hrMJpBDZc9%XlCB(Q0M6#dJJhdpT
+
+%2bZdFd30'KTT[d-6#2rA22prCQFCZHEjar[pNj2C69PYp)K@DM)V+8'fT!3C%RU
+
+0$!Sc%%F&0K8NII&jQb@NScQPp1@%DKc0DD4,rDbV-ccd@PV(lCAPY$H4%a*G2UI
+
+ARl'MdM)(c3+5MpDF8)f1Rr4*kNc)faB*9I4DMcVDlZfJPej1UXfAEck8RMde1"C
+
+Ci0@')p(QjN#S(A*Mr%a[J*8"E)T3G!%pL5YhHBl+"RVj4bhpa)5,Y@G#d)*M[FH
+
+rp@3IGap(N9*kF+TlbrUSQrlA5IIaD[aidXeYj&CVNMH83&CM+!&9RaC+%&Q"[`%
+
+!PM5C'9(,)ph(*fUTr9!YMqT9DV2iP&iGfErj4+r'r8D[mMkHFibb02iMPNjf1PA
+
+[d("$VLh(CI8d(p1LX&VN*cJbP(8k[pfF2kE#ZPqTX(51-%LC%ZXU[a22)[*i8[E
+
+rZJ[cIcUGL4G#pHMBk,e2kCF0VX,2PP#E5Iik[#T1$qmHrqXJc[6'Fa2`XLUETTM
+
+$*YV-$D3cYp12%m#qEb(qhJ$feL8eGE5PqJMF0!YqXU&'QZAY39+9b(8[r8`"-MX
+
+Ah$6![T!!ITF!pTb'bfV*EbNA&PMaKL[H#UA+i@kTX"!qGeH&C3R&EkCI&X"$k6d
+
+9PN9@f#m[VUY"R%+aB%N90%@4PhahPUZj([c3IkY-$A%eUr''+[Q8"m(LQS3[kcE
+
+1G+!PiF[1j8b6mBiYqG4I![EZK'rFji"Ab"55leDmdYV+9*,[$[MHa&2kj,XIH(K
+
+90KkIa-Ep'I$!Tj5(&h&2b4cN`,G2pSf$$kqZ5Vi*m(hh+pHLCV(B#pqMEAp*2`L
+
+K$S-ce482X[1!F4&mDd`jE#EL`-(e-DD6q,X(FCd12IXm1+#IdU#-2SFi1q)HB*d
+
+54KI`ANVie'C`8jVJFZTNa%85A%ip'ebqP1"bkZr$jj-acJ0'8-Di!,i@'@-Q-2E
+
+*q68KTiMXZ`ja[9RqCFj@hp%rG"RpQjINMlqNrpQ&-qA@"ki53rAP&2rr!!!0$3p
+
+YGbpRBh4PFh3Z0MK,,VN!N""453#3%#pd!!"+8`#3"2q3"%e08&*0680$!3#V,jH
+
+ZUc!jB!!!"M%!!"R%!!!"V3!!"E(*MaZS!*!'[VXM4!iL+Pj0j%)PIdhl9fbRBC!
+
+!DR1(JAFp3hUJ2KNcZ@(k&LeHlIYc*cMM1X2GRCf"!*`N(81C&iAQNTm4&Ifii1"
+
+EpGII4h6#PiP+'R-jb[e$&IeM12rA3hh-XBk+D2XK9#@U!P9e!@eRU22XRT!!%ar
+
+%6jaP3[FjFKhiIjQ@hidE$&25cAm$`-IrIXai*1U*jZd88q%pXX1%F$M`RNJbAQS
+
+ih%%N0J*@A""6p[pE#%1,cL9X%K8j[Z%i38$F)*'R%8!QpTQQT&06TCMf4amme9+
+
+jii[1iC(HE43E%aa#QlrCjZ4[GSL(8*!!e8D-E"#r6LR@&GN3aF6F'028K*cdTGk
+
+aT$fkUhhK6F,P(Tj11!CFTLJ+QQSXDINp,M$RL-+Cm9q6j"VK+Hr'rhrjXB16b1@
+
+iec&AC&Z,)bAP)A[QZNkT`brFF9bj0@L(b*(4H3)$i*YCbh9`YK90aj%$0a!Gm&!
+
+,de[B3!XlC'%$"-Eme,D0'(Z229-8DlB`9Q$FC!Y6@9L'KA%@PQm[")V0YM#PKBP
+
+$[mI#m!L#i#MfjAH50i4eE512Q3bj@@90I4m!N!--!'XcXfpJlh2Ij$4lRaZHF-P
+
+a`Tr-D)4&@%FjIAiV9hi5rZ3i@3NqRhV5`hI'm8m[3MNjENHi%AjN`!NMR"`rbB$
+
+bTrc)FA,m$%r*F51Fm*03FTa`FTa`-Q#%%hlN'4R`Pa`RA(+FF+mMamRa)mq2m$2
+
+#bB!#GjN8B'@Y6-+0iUpN*rl)-F)*2m)*8[#%!j-9H"9SN!!()1QkKK#+`Hm@K$S
+
+HJ&m,rN[#E`hmIJLEJ,q0bk)PQTCS@&q4J@q@4d"9U,FU)md-(0Yrf-'kLSC3Ech
+
+QTZ6PDfM!,6kXTJh48"8c3%-B$Af2ZR8CG9Ip2$-35k-p#&9[4Zd)$4`EE%%G46!
+
+,R0"9-23T99CN34j4,-#2%@HJ4P(6T'aDQa#N[iMDX5G2a3J5j8hqU`G8AI)J-HU
+
+[2pc+8DXTel3Q5K1DDDe`rC'MeMLS#5QV5"2QC-jFKV@(Y,XiDUf$'TI6Q941+fY
+
+NIrEXmabeMLSdTZC&6Ae8m48krm8h(,@HFXdUSU`BRMk!q[lRHBlD3,RQ4#QENT@
+
+#"cXRI2X+4ie6jif)dMfM+mkEUadrc9%E(G5'h+TKlGFqRHHS#3He,LFDrPe`h($
+
+QCBlDa(3e*P+'jG["RP9riDM0,PI9V"`8d09SikJYP'YH1C5kHVfHlZ'SDkKIpI4
+
+i+LIkaJ28)bpbe,88e9!N694cCG6ZNqFjkMUUN!"T6DE6ZT(h&AViKGmikRVU"NX
+
+TAdR(H9q1FY4@bY@D,XL9SfF2rY6286HiPp,*+'9G,aJIFG50p#Uce14Gj3Y'd81
+
+Ek"h5cFV&)blrQ+1f8B8b8UTJU&0#eN-9cVh+8GXGe*U-j!-kU)P6p4b9*UB'dj*
+
+PCDb-E#IIrF$K4qBkCkfIRK)eFi@ZrFEXr4ae-h@$T1I(e%`C&K,!AUi3T&L#1U`
+
+I'P&bCG3h(rRp#Fje+d8&50fBrKHeFp&j@4Q5M3GV$pea1eGSfk+(0$9pa80R1GF
+
+ZCkfce*a5FDbGI1mKMRSpifUSq482fFRj!BlD6Id+#UPkaDr(MfcMU0YGVSSeRLY
+
+8Z0V[F05H43q4)19lk0aM"lL(GMKViS"LkT1'T(MH+rPeTkZ3!*U"!([&H8FjkLl
+
++0@RS306mKfX[64ZJ+`31D"5@fGUaCaUiVRd8Y@!C+5NVP42Ef6h&a0E[S,D5e*Z
+
+k$e*4k[,4R"1qUq@S0cKV-k$Hk86c@fiEqT2V*rYSlLHcfePEppip1YM9Hl2Del9
+
+2!&`"@TQ,U#F1&[Z''jdelZ4b1(ZHmdimH"0(45eR)(&!*q9f)f6q6PCX0VTUBad
+
+IAd$pf!@`[ik1Br'KUlR)+fakrN"cHF(H36)2h%jb&H(+NrX0&jMF9VMIj$*$&L)
+
+T"p)0cLf`Yq1%"AXR6JQ`Yq'FKMf0GB,GdbRXPYLiZ+lq4#IBL8k`%jeJ*cV"6R5
+
+#RHJ%1p%*GU)6l%3Rf)P1h%qc#+[@Y15RS-eL8qhT&"fJcd&k4dVkK,dC'pb'AVi
+
+MRZjKXmB'HccD3(IrcJ8G(KYmfk)&p1R"5Hkrqa'fKQc`$Bdfm0&Ek'dF5*Cm&25
+
+6E"T+qQc(16M5i"iI4FpKHCCb3p#-XSR6I3[1YF$(e@dVrAm(hAhGA,f#1a4fVQ`
+
+D)a0bM1IcX19PNiJXd-QrQrjp$rTP0Nh4$ljDEE6C0*GdfSPEQNJ$[AaI"9dkQjE
+
+)"&rjZ5PSlpQXL6c)65I42'&jkHi((6HE659pGY(F%GhJrk#CBp-AQC!!QcfG`RF
+
+BE0C'2GbTm18(Qh@4"hI+cbI"'a-fkb-2I05,Qq*VI86`ZS90Dq6"IEUNPpZrZ6d
+
+IkmP@hp@`f9$5UmK,"LjZ2dGjKIdd'pTRSrf,Re6[[[HdcbYXX0R3aK[KcVI)#mr
+
+A-dm"R8jJFcLjAc2T0r!1Xr%Ph(NRKdhm"Y1PM9qd9#9(PFc#![X)[SNKr!e@jAm
+
+!N!-0$3pYGbpRBh4PFh3Z8&"$,VN!N"!4c!#3%%+&!*!)rj!%68e38Ne33d-"!+X
+
+[PkkV-$P&!!!'-3!!'Z!!!!'T!!!&bE5F%03!N!B"fL0%$L)UANhN3L9r6IYAE+G
+
+KN!"UFiH"Gce$HU!q'61jBIV#iB$[cjhJM1X-GhH'!`%ib6Q'-Lm+c58r)bVkFF(
+
+"YqU[[irS4$#9MENFjIkKL[iaR2rVS6lQ@%G&Y2d3UK*9JDUkJ,Bce(Pf6fJm&6R
+
+b2Z8HRJiXa'A+ir""h#2TreqK*11PKX-G4'@dI[MrP@fl(cXiL9b1Haec4BbeKmP
+
+aeJj"iNA$iL1d#Y1J+HR89#QQrG%86l98l[LLFhLNlhad)NaL2JK&0pZFr-d1m4!
+
++XYS)fcSm[diTeKAC%-A8h"M6e)5Fp+AHXD3p1ZNm1FY%rabj$[`E!$0bi`E$P26
+
+rG@!p"$aQr-JXH*CjLX,-Um9UPGj1-5VH)fY@`*(4VHaDSf,&r6CPrlq&--R1K6X
+
+*#r!9a`Q#"HZ@0$hdcLR&Z$Fm-LN%a%6I)NG'j`NF&EkCY9`(CaX9iFL4(fpK!IC
+
+B8#c-*P,XP1dG-@D4KE%@0XR#9"C'PcdhhF,ZXE"3#eYVB3-&a[CDHNU"FB-@YXI
+
+#PPJD!bcX5f0T(aH0)DaV'hR-C-M0+Q[Uq``!Da0l'f3fmMSr"jhCCQZ%N3NRNdf
+
+14LJRP"rPR[a@3Sqr%8D1NjAJmk5Hp2#G-Ic6Le"1MJm)Pachb(2###I(6c*J%8k
+
+j%8k1RiHRj,J46[K*+$P11$P11"P3`JNrmS`-q)!-Z'6!D6eKj2L4C`f-F$+J`(8
+
+Q"D$m9QE4e,T1r"&qK,q%%k6J#3FQ+c!qS%%HJ+LU#N)S",rE%'S`i2Fjq"D$ha,
+
+iI4qf+2P[K53BJQi)Q['&0I#IjBQL)Y4CP"42pjcUHm,'ZSf'8'HBF--Ck@qdLS0
+
+b3K-d'HXH'L*+S#ZS9C93Dp(hThY##E32SH*'Y!@KRP2p0@MV!TJ"6QM*DZUi,'%
+
+T+JeJ!r"$PM03TD!SBLUKM%E&Qd60d0-c)3Z*mVDqK3&9&I13!!6eTfr[iUM&P'Y
+
+#%F4446G@Z(l(88YXe)LB`Z+S2TE@Pf(0!mTp(,A84Uf3!",*H&STD'4qmZcc(,@
+
+-+M3XC`4&IJbl#Phql%Z1'UCF8eL3!,#@e`G3hrdd`e(,+GHd)+EL%XlQBDHLAlh
+
+-85ZSm`B%mB'K&HG0PBjFj+L90QTjHXf`jUXI6h28L)eDPKBdpblBE[Mm*BjDaA6
+
+94Z1DiGV"R*4rj+M9$PGCAcdSS+ZfQD2@8+iCI$4qqhSpdmj4DkPIeF4)2#fiaJ2
+
+8Bbpbe(889F1L)XMT!QVVKFXFYBiUT"YMLB5UC9b&(RhKCikkRVV"`)8VDEQKZf-
+
+V4kfRA)f*V,4kp-cqUedFGB0c+Hf-8Y$eQ[B"4pe)Vc*,6IQVI%eEm0!QHSG8IIA
+
+L@5lrN!#MEUCQhhrr8(p,Ec3@kie#4,V"pIbK)a`9-T!!GBlk-`E@KJ84,f%LG'i
+
+f[T!!!'KLUKNie$XiPM(N0&lQH[KU'dGYS"j+L['X*Sp(8hPGSl0R1'UMM9U5&&e
+
+!'c8b%qDSN!"L,3rTKL6Ki3+b'A[l2CZI0G[Y06`a,LMk#PhcpFQ(1'S6pDZSCSE
+
+PC!&fUR[r&Uj3-eASS(Td!+F,U1H1r2)8jpT#83&5e5EZS1kBGej+JZb9Kc82h(-
+
+h9kKehN1+R,MPS8ZFDjZpPPPABF@aCZbG`abeRA(9j-b+KmcBG!p(lD"q"B9NGG@
+
+[CimeFp5G$PGXM+5cUec0YcMUVRN2@9(2pG$Xii2F3jhf'KR%ZMUQL6M[[CaIGcX
+
++b8Q)f,HFGj+MGP'Z-8d&S[SrA2I32!5k3L5#cQ1CV4NkAXjer4pehS"JT*BMJmh
+
+eq+jHMVUAFXdD@Pa-LB8NHQRI3K)PI3p-0D6jHqhb!-,0lkJIrAq#kpTYIkZh1S$
+
+iJj!!%H0,"hSUqR8TjiTU6d$LH!3qd"l'QVp5(*Z0MQj%N5IR8$IK#2YVk#b4%AU
+
+KAhRjVG*[D*cA0T*HB1mJp`hf9R+*B@mR9a,f0R*MBGp1mJVX655"`0j)XK,X1mL
+
+pKlf"*+irG2*l,$B1E[#"6T2S$,#X@[56ejba+FlV&"bJcm2dMZ6dm6Xk0U4jAES
+
+MHGhp&Sp0DH#"lZkGmrT#0Q@"!rVX)TRXhr[K0j4X`S%(2RS$[3RXDKCpj(@KE-T
+
+cqZ`NL6E3i"kI4160319LE["D@$B9G'mQ#4Ai1,Ued1qGG(GeFr6blT!!hqqbU3a
+
+-b$&jRrZ0-TY)B)&1lYedll[ACE1T#Rl`e9TlcUBkTp0ZdVF%-H4Z[lGR8a1Bi#X
+
+h0hN["GM8"KlNTJYSfQ*jrHjlI6UE66PpZQMZ#'l`[pH@XGNEQ*!!Qr-kq@mqf+`
+
+,HVK6rLX60R@""hI+c5IHHaBfk`-2I,5(G,jrpK(H5aSfpB%(pqQkANlrj[4mV#G
+
+EHm2$CN01V`9H%R"aqMR+bhpj`iDqe%&p8bAIR!qTj%[4$kpFY(MK'lcmYcPXk&Z
+
+H1lcmlTi0lIT[mPVbJIFUL!elGjRM4BM8c8"+#$@"@kr%qK5GrJGH8d5JeDSp%6Z
+
+S`aY94TZmpLQ+$H(Nh"cl%r`RK-KrL#Vr!3#3!aq$!!!"!*!$!43!N!-8!*!$-Tr
+
+lRLe!rr#`!,K[$#eZd!6rm2rdd"lm`FAKdkSV8FY+$deKBe"bEfTPBh4c,R0TG!)
+
+!N!06594%8dP8)3#3"P0*9%46593K!*!BUc!jI3!!8M8!!!&'"1"2l'mDG@6JrHc
+
+K@5U#NI*HN@GK!Z"2kQ`FG&2UN!"S!!,L@5[48(adA`CdC!EJ6qj[8hJS!!EJEHl
+
+LEe5!)D$!FJC1ANl!*IrX51FI-#D`jL63G!*&0K!+1Li!&Ri!)VX-S"lbUKQJ(Z`
+
+3!+SDI!$!#3ZT8,aIE!!!Q$!'8!6"aG!!N!-3!#X!"3%B!J#3"`-!N!-"!*!$!43
+
+!N!-8!*!$-J$j(l!@#J#3!a`!-J!!8f9dC`#3!`S!!2rr!*!&q@G%'@B:
+
diff --git a/Mac_files/MacOS_Test_config.h b/Mac_files/MacOS_Test_config.h
new file mode 100644
index 00000000..94db03f0
--- /dev/null
+++ b/Mac_files/MacOS_Test_config.h
@@ -0,0 +1,88 @@
+/*
+ MacOS_Test_config.h
+
+ Configuration flags for Macintosh development systems.
+
+ Test version.
+
+ <Revision History>
+
+ 11/16/95 pcb Updated compilation flags to reflect latest 4.6 Makefile.
+
+ by Patrick C. Beard.
+ */
+/* Boehm, November 17, 1995 12:05 pm PST */
+
+#ifdef __MWERKS__
+#if defined(__powerc)
+#include <MacHeadersPPC>
+#else
+#include <MacHeaders68K>
+#endif
+#endif
+
+// these are defined again in gc_priv.h.
+#undef TRUE
+#undef FALSE
+
+#define ALL_INTERIOR_POINTERS // follows interior pointers.
+//#define SILENT // want collection messages.
+//#define DONT_ADD_BYTE_AT_END // no padding.
+//#define SMALL_CONFIG // whether to a smaller heap.
+#define NO_SIGNALS // signals aren't real on the Macintosh.
+#define USE_TEMPORARY_MEMORY // use Macintosh temporary memory.
+
+// CFLAGS= -O -DNO_SIGNALS -DALL_INTERIOR_POINTERS -DSILENT
+//
+//LIBGC_CFLAGS= -O -DNO_SIGNALS -DSILENT \
+// -DREDIRECT_MALLOC=GC_malloc_uncollectable \
+// -DDONT_ADD_BYTE_AT_END -DALL_INTERIOR_POINTERS
+// Flags for building libgc.a -- the last two are required.
+//
+// Setjmp_test may yield overly optimistic results when compiled
+// without optimization.
+// -DSILENT disables statistics printing, and improves performance.
+// -DCHECKSUMS reports on erroneously clear dirty bits, and unexpectedly
+// altered stubborn objects, at substantial performance cost.
+// Use only for incremental collector debugging.
+// -DFIND_LEAK causes the collector to assume that all inaccessible
+// objects should have been explicitly deallocated, and reports exceptions.
+// Finalization and the test program are not usable in this mode.
+// -DSOLARIS_THREADS enables support for Solaris (thr_) threads.
+// (Clients should also define SOLARIS_THREADS and then include
+// gc.h before performing thr_ or GC_ operations.)
+// This is broken on nonSPARC machines.
+// -DALL_INTERIOR_POINTERS allows all pointers to the interior
+// of objects to be recognized. (See gc_priv.h for consequences.)
+// -DSMALL_CONFIG tries to tune the collector for small heap sizes,
+// usually causing it to use less space in such situations.
+// Incremental collection no longer works in this case.
+// -DLARGE_CONFIG tunes the collector for unusually large heaps.
+// Necessary for heaps larger than about 500 MB on most machines.
+// Recommended for heaps larger than about 64 MB.
+// -DDONT_ADD_BYTE_AT_END is meaningful only with
+// -DALL_INTERIOR_POINTERS. Normally -DALL_INTERIOR_POINTERS
+// causes all objects to be padded so that pointers just past the end of
+// an object can be recognized. This can be expensive. (The padding
+// is normally more than one byte due to alignment constraints.)
+// -DDONT_ADD_BYTE_AT_END disables the padding.
+// -DNO_SIGNALS does not disable signals during critical parts of
+// the GC process. This is no less correct than many malloc
+// implementations, and it sometimes has a significant performance
+// impact. However, it is dangerous for many not-quite-ANSI C
+// programs that call things like printf in asynchronous signal handlers.
+// -DOPERATOR_NEW_ARRAY declares that the C++ compiler supports the
+// new syntax "operator new[]" for allocating and deleting arrays.
+// See gc_cpp.h for details. No effect on the C part of the collector.
+// This is defined implicitly in a few environments.
+// -DREDIRECT_MALLOC=X causes malloc, realloc, and free to be defined
+// as aliases for X, GC_realloc, and GC_free, respectively.
+// Calloc is redefined in terms of the new malloc. X should
+// be either GC_malloc or GC_malloc_uncollectable.
+// The former is occasionally useful for working around leaks in code
+// you don't want to (or can't) look at. It may not work for
+// existing code, but it often does. Neither works on all platforms,
+// since some ports use malloc or calloc to obtain system memory.
+// (Probably works for UNIX, and win32.)
+// -DNO_DEBUG removes GC_dump and the debugging routines it calls.
+// Reduces code size slightly at the expense of debuggability. \ No newline at end of file
diff --git a/Mac_files/MacOS_config.h b/Mac_files/MacOS_config.h
new file mode 100644
index 00000000..838be591
--- /dev/null
+++ b/Mac_files/MacOS_config.h
@@ -0,0 +1,86 @@
+/*
+ MacOS_config.h
+
+ Configuration flags for Macintosh development systems.
+
+ <Revision History>
+
+ 11/16/95 pcb Updated compilation flags to reflect latest 4.6 Makefile.
+
+ by Patrick C. Beard.
+ */
+/* Boehm, November 17, 1995 12:10 pm PST */
+
+#ifdef __MWERKS__
+#if defined(__powerc)
+#include <MacHeadersPPC>
+#else
+#include <MacHeaders68K>
+#endif
+#endif
+
+// these are defined again in gc_priv.h.
+#undef TRUE
+#undef FALSE
+
+#define ALL_INTERIOR_POINTERS // follows interior pointers.
+#define SILENT // no collection messages.
+//#define DONT_ADD_BYTE_AT_END // no padding.
+//#define SMALL_CONFIG // whether to use a smaller heap.
+#define NO_SIGNALS // signals aren't real on the Macintosh.
+#define USE_TEMPORARY_MEMORY // use Macintosh temporary memory.
+
+// CFLAGS= -O -DNO_SIGNALS -DSILENT -DALL_INTERIOR_POINTERS
+//
+//LIBGC_CFLAGS= -O -DNO_SIGNALS -DSILENT \
+// -DREDIRECT_MALLOC=GC_malloc_uncollectable \
+// -DDONT_ADD_BYTE_AT_END -DALL_INTERIOR_POINTERS
+// Flags for building libgc.a -- the last two are required.
+//
+// Setjmp_test may yield overly optimistic results when compiled
+// without optimization.
+// -DSILENT disables statistics printing, and improves performance.
+// -DCHECKSUMS reports on erroneously clear dirty bits, and unexpectedly
+// altered stubborn objects, at substantial performance cost.
+// Use only for incremental collector debugging.
+// -DFIND_LEAK causes the collector to assume that all inaccessible
+// objects should have been explicitly deallocated, and reports exceptions.
+// Finalization and the test program are not usable in this mode.
+// -DSOLARIS_THREADS enables support for Solaris (thr_) threads.
+// (Clients should also define SOLARIS_THREADS and then include
+// gc.h before performing thr_ or GC_ operations.)
+// This is broken on nonSPARC machines.
+// -DALL_INTERIOR_POINTERS allows all pointers to the interior
+// of objects to be recognized. (See gc_priv.h for consequences.)
+// -DSMALL_CONFIG tries to tune the collector for small heap sizes,
+// usually causing it to use less space in such situations.
+// Incremental collection no longer works in this case.
+// -DLARGE_CONFIG tunes the collector for unusually large heaps.
+// Necessary for heaps larger than about 500 MB on most machines.
+// Recommended for heaps larger than about 64 MB.
+// -DDONT_ADD_BYTE_AT_END is meaningful only with
+// -DALL_INTERIOR_POINTERS. Normally -DALL_INTERIOR_POINTERS
+// causes all objects to be padded so that pointers just past the end of
+// an object can be recognized. This can be expensive. (The padding
+// is normally more than one byte due to alignment constraints.)
+// -DDONT_ADD_BYTE_AT_END disables the padding.
+// -DNO_SIGNALS does not disable signals during critical parts of
+// the GC process. This is no less correct than many malloc
+// implementations, and it sometimes has a significant performance
+// impact. However, it is dangerous for many not-quite-ANSI C
+// programs that call things like printf in asynchronous signal handlers.
+// -DOPERATOR_NEW_ARRAY declares that the C++ compiler supports the
+// new syntax "operator new[]" for allocating and deleting arrays.
+// See gc_cpp.h for details. No effect on the C part of the collector.
+// This is defined implicitly in a few environments.
+// -DREDIRECT_MALLOC=X causes malloc, realloc, and free to be defined
+// as aliases for X, GC_realloc, and GC_free, respectively.
+// Calloc is redefined in terms of the new malloc. X should
+// be either GC_malloc or GC_malloc_uncollectable.
+// The former is occasionally useful for working around leaks in code
+// you don't want to (or can't) look at. It may not work for
+// existing code, but it often does. Neither works on all platforms,
+// since some ports use malloc or calloc to obtain system memory.
+// (Probably works for UNIX, and win32.)
+// -DNO_DEBUG removes GC_dump and the debugging routines it calls.
+// Reduces code size slightly at the expense of debuggability. \ No newline at end of file
diff --git a/Mac_files/dataend.c b/Mac_files/dataend.c
new file mode 100644
index 00000000..a3e3fe84
--- /dev/null
+++ b/Mac_files/dataend.c
@@ -0,0 +1,9 @@
+/*
+ dataend.c
+
+ A hack to get the extent of global data for the Macintosh.
+
+ by Patrick C. Beard.
+ */
+
+long __dataend;
diff --git a/Mac_files/datastart.c b/Mac_files/datastart.c
new file mode 100644
index 00000000..a9e0dd59
--- /dev/null
+++ b/Mac_files/datastart.c
@@ -0,0 +1,9 @@
+/*
+ datastart.c
+
+ A hack to get the extent of global data for the Macintosh.
+
+ by Patrick C. Beard.
+ */
+
+long __datastart;
diff --git a/Makefile b/Makefile
index feb83e60..5ba9978f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,64 +1,113 @@
# Primary targets:
# gc.a - builds basic library
-# c++ - adds C++ interface to library and include directory
-# cords - adds cords (heavyweight strings) to library and include directory
-# test - prints porting information, then builds basic version of gc.a, and runs
-# some tests of collector and cords. Does not add cords or c++ interface to gc.a
+# libgc.a - builds library for use with g++ "-fgc-keyword" extension
+# c++ - adds C++ interface to library
+# cords - adds cords (heavyweight strings) to library
+# test - prints porting information, then builds basic version of gc.a,
+# and runs some tests of collector and cords. Does not add cords or
+# c++ interface to gc.a
# cord/de - builds dumb editor based on cords.
CC= cc
-CXX=g++
-# Needed only for "make c++", which adds the c++ interface
+CXX=gcc
+AS=as
+# The above doesn't work with gas, which doesn't run cpp.
+# Define AS as `gcc -c -x assembler-with-cpp' instead.
+
+CFLAGS= -O -DNO_SIGNALS -DSILENT -DALL_INTERIOR_POINTERS
+
+LIBGC_CFLAGS= -O -DNO_SIGNALS -DSILENT \
+ -DREDIRECT_MALLOC=GC_malloc_uncollectable \
+ -DDONT_ADD_BYTE_AT_END -DALL_INTERIOR_POINTERS
+# Flags for building libgc.a -- the last two are required.
-CFLAGS= -O -DALL_INTERIOR_POINTERS -DSILENT
# Setjmp_test may yield overly optimistic results when compiled
# without optimization.
# -DSILENT disables statistics printing, and improves performance.
# -DCHECKSUMS reports on erroneously clear dirty bits, and unexpectedly
# altered stubborn objects, at substantial performance cost.
+# Use only for incremental collector debugging.
# -DFIND_LEAK causes the collector to assume that all inaccessible
-# objects should have been explicitly deallocated, and reports exceptions
+# objects should have been explicitly deallocated, and reports exceptions.
+# Finalization and the test program are not usable in this mode.
# -DSOLARIS_THREADS enables support for Solaris (thr_) threads.
# (Clients should also define SOLARIS_THREADS and then include
-# gc.h before performing thr_ or GC_ operations.)
+# gc.h before performing thr_ or dl* or GC_ operations.)
+# This is broken on nonSPARC machines.
# -DALL_INTERIOR_POINTERS allows all pointers to the interior
-# of objects to be recognized. (See gc_private.h for consequences.)
+# of objects to be recognized. (See gc_priv.h for consequences.)
# -DSMALL_CONFIG tries to tune the collector for small heap sizes,
# usually causing it to use less space in such situations.
# Incremental collection no longer works in this case.
+# -DLARGE_CONFIG tunes the collector for unusually large heaps.
+# Necessary for heaps larger than about 500 MB on most machines.
+# Recommended for heaps larger than about 64 MB.
# -DDONT_ADD_BYTE_AT_END is meaningful only with
# -DALL_INTERIOR_POINTERS. Normally -DALL_INTERIOR_POINTERS
# causes all objects to be padded so that pointers just past the end of
# an object can be recognized. This can be expensive. (The padding
# is normally more than one byte due to alignment constraints.)
# -DDONT_ADD_BYTE_AT_END disables the padding.
-
+# -DNO_SIGNALS does not disable signals during critical parts of
+# the GC process. This is no less correct than many malloc
+# implementations, and it sometimes has a significant performance
+# impact. However, it is dangerous for many not-quite-ANSI C
+# programs that call things like printf in asynchronous signal handlers.
+# -DOPERATOR_NEW_ARRAY declares that the C++ compiler supports the
+# new syntax "operator new[]" for allocating and deleting arrays.
+# See gc_cpp.h for details. No effect on the C part of the collector.
+# This is defined implicitly in a few environments.
+# -DREDIRECT_MALLOC=X causes malloc, realloc, and free to be defined
+# as aliases for X, GC_realloc, and GC_free, respectively.
+# Calloc is redefined in terms of the new malloc. X should
+# be either GC_malloc or GC_malloc_uncollectable.
+# The former is occasionally useful for working around leaks in code
+# you don't want to (or can't) look at. It may not work for
+# existing code, but it often does. Neither works on all platforms,
+# since some ports use malloc or calloc to obtain system memory.
+# (Probably works for UNIX, and win32.)
+# -DNO_DEBUG removes GC_dump and the debugging routines it calls.
+# Reduces code size slightly at the expense of debuggability.
+
+CXXFLAGS= $(CFLAGS)
AR= ar
RANLIB= ranlib
# Redefining srcdir allows object code for the nonPCR version of the collector
-# to be generated in different directories
+# to be generated in different directories. In this case, the destination directory
+# should contain a copy of the original include directory.
srcdir = .
VPATH = $(srcdir)
-OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dyn_load.o dbg_mlc.o malloc.o stubborn.o checksums.o solaris_threads.o typd_mlc.o
+OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o dbg_mlc.o malloc.o stubborn.o checksums.o solaris_threads.o typd_mlc.o ptr_chck.o
-CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c solaris_threads.c typd_mlc.c
+CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c solaris_threads.c typd_mlc.c ptr_chck.c
-CORD_SRCS= cord/cordbscs.c cord/cordxtra.c cord/cordprnt.c cord/de.c cord/cordtest.c cord/cord.h cord/ec.h cord/cord_pos.h cord/de_win.c cord/de_win.h cord/de_cmds.h cord/de_win.ICO cord/de_win.RC
+CORD_SRCS= cord/cordbscs.c cord/cordxtra.c cord/cordprnt.c cord/de.c cord/cordtest.c cord/cord.h cord/ec.h cord/private/cord_pos.h cord/de_win.c cord/de_win.h cord/de_cmds.h cord/de_win.ICO cord/de_win.RC cord/SCOPTIONS.amiga cord/SMakefile.amiga
CORD_OBJS= cord/cordbscs.o cord/cordxtra.o cord/cordprnt.o
-SRCS= $(CSRCS) mips_mach_dep.s rs6000_mach_dep.s alpha_mach_dep.s sparc_mach_dep.s gc.h gc_typed.h gc_hdrs.h gc_priv.h gc_private.h config.h gc_mark.h gc_inl.h gc_inline.h gc.man if_mach.c if_not_there.c gc_c++.cc gc_c++.h $(CORD_SRCS)
-
-OTHER_FILES= Makefile PCR-Makefile OS2_MAKEFILE NT_MAKEFILE \
- README test.c setjmp_t.c SMakefile.amiga SCoptions.amiga \
- README.amiga README.win32 cord/README include/gc.h \
- include/gc_typed.h README.QUICK callprocs pc_excludes \
- barrett_diagram README.OS2
+SRCS= $(CSRCS) mips_mach_dep.s rs6000_mach_dep.s alpha_mach_dep.s \
+ sparc_mach_dep.s gc.h gc_typed.h gc_hdrs.h gc_priv.h gc_private.h \
+ config.h gc_mark.h include/gc_inl.h include/gc_inline.h gc.man \
+ if_mach.c if_not_there.c gc_cpp.cc gc_cpp.h weakpointer.h \
+ gcc_support.c $(CORD_SRCS)
+
+OTHER_FILES= Makefile PCR-Makefile OS2_MAKEFILE NT_MAKEFILE BCC_MAKEFILE \
+ README test.c test_cpp.cc setjmp_t.c SMakefile.amiga \
+ SCoptions.amiga README.amiga README.win32 cord/README \
+ cord/gc.h include/gc.h include/gc_typed.h include/cord.h \
+ include/ec.h include/private/cord_pos.h include/private/config.h \
+ include/private/gc_hdrs.h include/private/gc_priv.h include/gc_cpp.h \
+ include/weakpointer.h README.QUICK callprocs pc_excludes \
+ barrett_diagram README.OS2 README.Mac MacProjects.sit.hqx \
+ MacOS.c EMX_MAKEFILE makefile.depend README.debugging \
+ include/gc_cpp.h Mac_files/datastart.c Mac_files/dataend.c \
+ Mac_files/MacOS_config.h Mac_files/MacOS_Test_config.h \
+ add_gc_prefix.c
CORD_INCLUDE_FILES= $(srcdir)/gc.h $(srcdir)/cord/cord.h $(srcdir)/cord/ec.h \
- $(srcdir)/cord/cord_pos.h
+ $(srcdir)/cord/private/cord_pos.h
# Libraries needed for curses applications. Only needed for de.
CURSES= -lcurses -ltermlib
@@ -74,16 +123,13 @@ SPECIALCFLAGS =
# not time-critical anyway.
# Set SPECIALCFLAGS to -q nodirect_code on Encore.
-ALPHACFLAGS = -non_shared
-# Extra flags for linking compilation on DEC Alpha
-
all: gc.a gctest
pcr: PCR-Makefile gc_private.h gc_hdrs.h gc.h config.h mach_dep.o $(SRCS)
make -f PCR-Makefile depend
make -f PCR-Makefile
-$(OBJS) test.o: $(srcdir)/gc_priv.h $(srcdir)/gc_hdrs.h $(srcdir)/gc.h \
+$(OBJS) test.o dyn_load.o dyn_load_sunos53.o: $(srcdir)/gc_priv.h $(srcdir)/gc_hdrs.h $(srcdir)/gc.h \
$(srcdir)/config.h $(srcdir)/gc_typed.h Makefile
# The dependency on Makefile is needed. Changing
# options such as -DSILENT affects the size of GC_arrays,
@@ -91,37 +137,72 @@ $(OBJS) test.o: $(srcdir)/gc_priv.h $(srcdir)/gc_hdrs.h $(srcdir)/gc.h \
mark.o typd_mlc.o finalize.o: $(srcdir)/gc_mark.h
-gc.a: $(OBJS)
- $(AR) ru gc.a $(OBJS)
- $(RANLIB) gc.a || cat /dev/null
+gc.a: $(OBJS) dyn_load.o
+ rm -f on_sparc_sunos5
+ ./if_mach SPARC SUNOS5 touch on_sparc_sunos5
+ ./if_mach SPARC SUNOS5 $(AR) rus gc.a $(OBJS) dyn_load.o
+ ./if_not_there on_sparc_sunos5 $(AR) ru gc.a $(OBJS) dyn_load.o
+ ./if_not_there on_sparc_sunos5 $(RANLIB) gc.a || cat /dev/null
# ignore ranlib failure; that usually means it doesn't exist, and isn't needed
+libgc.a:
+ make CFLAGS="$(LIBGC_CFLAGS)" clean gc.a gcc_support.o
+ mv gc.a libgc.a
+ rm -f on_sparc_sunos5
+ ./if_mach SPARC SUNOS5 touch on_sparc_sunos5
+ ./if_mach SPARC SUNOS5 $(AR) rus libgc.a gcc_support.o
+ ./if_not_there on_sparc_sunos5 $(AR) ru libgc.a gcc_support.o
+ ./if_not_there on_sparc_sunos5 $(RANLIB) libgc.a || cat /dev/null
+
cords: $(CORD_OBJS) cord/cordtest
- $(AR) ru gc.a $(CORD_OBJS)
- $(RANLIB) gc.a || cat /dev/null
- cp $(srcdir)/cord/cord.h include/cord.h
- cp $(srcdir)/cord/ec.h include/ec.h
- cp $(srcdir)/cord/cord_pos.h include/cord_pos.h
-
-gc_c++.o: $(srcdir)/gc_c++.cc $(srcdir)/gc_c++.h
- $(CXX) -c -O $(srcdir)/gc_c++.cc
+ rm -f on_sparc_sunos5
+ ./if_mach SPARC SUNOS5 touch on_sparc_sunos5
+ ./if_mach SPARC SUNOS5 $(AR) rus gc.a $(CORD_OBJS)
+ ./if_not_there on_sparc_sunos5 $(AR) ru gc.a $(CORD_OBJS)
+ ./if_not_there on_sparc_sunos5 $(RANLIB) gc.a || cat /dev/null
+
+gc_cpp.o: $(srcdir)/gc_cpp.cc $(srcdir)/gc_cpp.h $(srcdir)/gc.h Makefile
+ $(CXX) -c $(CXXFLAGS) $(srcdir)/gc_cpp.cc
-c++: gc_c++.o $(srcdir)/gc_c++.h
- $(AR) ru gc.a gc_c++.o
- $(RANLIB) gc.a || cat /dev/null
- cp $(srcdir)/gc_c++.h include/gc_c++.h
+test_cpp: $(srcdir)/test_cpp.cc $(srcdir)/gc_cpp.h gc_cpp.o $(srcdir)/gc.h gc.a
+ rm -f test_cpp
+ ./if_mach SPARC SUNOS5 $(CXX) $(CXXFLAGS) -o test_cpp $(srcdir)/test_cpp.cc gc_cpp.o gc.a -lthread -ldl
+ ./if_not_there test_cpp $(CXX) $(CXXFLAGS) -o test_cpp $(srcdir)/test_cpp.cc gc_cpp.o gc.a
+
+c++: gc_cpp.o $(srcdir)/gc_cpp.h test_cpp
+ rm -f on_sparc_sunos5
+ ./if_mach SPARC SUNOS5 touch on_sparc_sunos5
+ ./if_mach SPARC SUNOS5 $(AR) rus gc.a gc_cpp.o
+ ./if_not_there on_sparc_sunos5 $(AR) ru gc.a gc_cpp.o
+ ./if_not_there on_sparc_sunos5 $(RANLIB) gc.a || cat /dev/null
+ ./test_cpp 1
+
+dyn_load_sunos53.o: dyn_load.c
+ $(CC) $(CFLAGS) -DSUNOS53_SHARED_LIB -c $(srcdir)/dyn_load.c -o $@
+
+# SunOS5 shared library version of the collector
+libgc.so: $(OBJS) dyn_load_sunos53.o
+ $(CC) -G -o libgc.so $(OBJS) dyn_load_sunos53.o -ldl
+
+# Alpha/OSF shared library version of the collector
+libalphagc.so: $(OBJS)
+ ld -shared -o libalphagc.so $(OBJS) dyn_load.o -lc
+
+# IRIX shared library version of the collector
+libirixgc.so: $(OBJS) dyn_load.o
+ ld -shared -o libirixgc.so $(OBJS) dyn_load.o -lc
mach_dep.o: $(srcdir)/mach_dep.c $(srcdir)/mips_mach_dep.s $(srcdir)/rs6000_mach_dep.s if_mach if_not_there
rm -f mach_dep.o
- ./if_mach MIPS "" as -o mach_dep.o $(srcdir)/mips_mach_dep.s
- ./if_mach RS6000 "" as -o mach_dep.o $(srcdir)/rs6000_mach_dep.s
- ./if_mach ALPHA "" as -o mach_dep.o $(srcdir)/alpha_mach_dep.s
- ./if_mach SPARC SUNOS5 as -o mach_dep.o $(srcdir)/sparc_mach_dep.s
+ ./if_mach MIPS "" $(AS) -o mach_dep.o $(srcdir)/mips_mach_dep.s
+ ./if_mach RS6000 "" $(AS) -o mach_dep.o $(srcdir)/rs6000_mach_dep.s
+ ./if_mach ALPHA "" $(AS) -o mach_dep.o $(srcdir)/alpha_mach_dep.s
+ ./if_mach SPARC SUNOS5 $(AS) -o mach_dep.o $(srcdir)/sparc_mach_dep.s
./if_not_there mach_dep.o $(CC) -c $(SPECIALCFLAGS) $(srcdir)/mach_dep.c
mark_rts.o: $(srcdir)/mark_rts.c if_mach if_not_there
rm -f mark_rts.o
- ./if_mach ALPHA "" $(CC) -c $(CFLAGS) -Wo,-notail $(srcdir)/mark_rts.c
+ -./if_mach ALPHA "" $(CC) -c $(CFLAGS) -Wo,-notail $(srcdir)/mark_rts.c
./if_not_there mark_rts.o $(CC) -c $(CFLAGS) $(srcdir)/mark_rts.c
# work-around for DEC optimizer tail recursion elimination bug
@@ -140,14 +221,17 @@ cord/cordprnt.o: $(srcdir)/cord/cordprnt.c $(CORD_INCLUDE_FILES)
cord/cordtest: $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a
rm -f cord/cordtest
- ./if_mach SPARC SUNOS5 $(CC) $(CFLAGS) -o cord/cordtest $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a -lthread
- ./if_not_there cord/cord_test $(CC) $(CFLAGS) -o cord/cordtest $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a
+ ./if_mach SPARC SUNOS5 $(CC) $(CFLAGS) -o cord/cordtest $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a -lthread -ldl
+ ./if_mach SPARC DRSNX $(CC) $(CFLAGS) -o cord/cordtest $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a -lucb
+ ./if_not_there cord/cordtest $(CC) $(CFLAGS) -o cord/cordtest $(srcdir)/cord/cordtest.c $(CORD_OBJS) gc.a
-cord/de: $(srcdir)/cord/de.c $(srcdir)/cord/cordbscs.o $(srcdir)/cord/cordxtra.o gc.a
+cord/de: $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a
rm -f cord/de
- ./if_mach SPARC SUNOS5 $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c $(srcdir)/cord/cordbscs.o $(srcdir)/cord/cordxtra.o gc.a $(CURSES) -lthread
- ./if_mach RS6000 "" $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c $(srcdir)/cord/cordbscs.o $(srcdir)/cord/cordxtra.o gc.a -lcurses
- ./if_not_there cord/de $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c $(srcdir)/cord/cordbscs.o $(srcdir)/cord/cordxtra.o gc.a $(CURSES)
+ ./if_mach SPARC SUNOS5 $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a $(CURSES) -lthread -ldl
+ ./if_mach SPARC DRSNX $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a $(CURSES) -lucb
+ ./if_mach RS6000 "" $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses
+ ./if_mach I386 LINUX $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a -lcurses
+ ./if_not_there cord/de $(CC) $(CFLAGS) -o cord/de $(srcdir)/cord/de.c cord/cordbscs.o cord/cordxtra.o gc.a $(CURSES)
if_mach: $(srcdir)/if_mach.c $(srcdir)/config.h
$(CC) $(CFLAGS) -o if_mach $(srcdir)/if_mach.c
@@ -156,50 +240,82 @@ if_not_there: $(srcdir)/if_not_there.c
$(CC) $(CFLAGS) -o if_not_there $(srcdir)/if_not_there.c
clean:
- rm -f gc.a test.o gctest output-local output-diff $(OBJS) \
+ rm -f gc.a *.o gctest gctest_dyn_link test_cpp \
setjmp_test mon.out gmon.out a.out core if_not_there if_mach \
$(CORD_OBJS) cord/cordtest cord/de
-rm -f *~
gctest: test.o gc.a if_mach if_not_there
rm -f gctest
- ./if_mach ALPHA "" $(CC) $(CFLAGS) -o gctest $(ALPHACFLAGS) test.o gc.a
- ./if_mach SPARC SUNOS5 $(CC) $(CFLAGS) -o gctest $(CFLAGS) test.o gc.a -lthread
+ ./if_mach SPARC SUNOS5 $(CC) $(CFLAGS) -o gctest test.o gc.a -lthread -ldl
+ ./if_mach SPARC DRSNX $(CC) $(CFLAGS) -o gctest test.o gc.a -lucb
./if_not_there gctest $(CC) $(CFLAGS) -o gctest test.o gc.a
# If an optimized setjmp_test generates a segmentation fault,
# odds are your compiler is broken. Gctest may still work.
# Try compiling setjmp_t.c unoptimized.
setjmp_test: $(srcdir)/setjmp_t.c $(srcdir)/gc.h if_mach if_not_there
- rm -f setjmp_test
- ./if_mach ALPHA "" $(CC) $(CFLAGS) -o setjmp_test $(ALPHACFLAGS) $(srcdir)/setjmp_t.c
- ./if_not_there setjmp_test $(CC) $(CFLAGS) -o setjmp_test $(srcdir)/setjmp_t.c
+ $(CC) $(CFLAGS) -o setjmp_test $(srcdir)/setjmp_t.c
-test: setjmp_test gctest
+test: KandRtest cord/cordtest
+ cord/cordtest
+
+# Those tests that work even with a K&R C compiler:
+KandRtest: setjmp_test gctest
./setjmp_test
./gctest
- make cord/cordtest
- cord/cordtest
-gc.tar: $(SRCS) $(OTHER_FILES)
- tar cvf gc.tar $(SRCS) $(OTHER_FILES)
+add_gc_prefix: add_gc_prefix.c
+ $(CC) -o add_gc_prefix add_gc_prefix.c
+
+gc.tar: $(SRCS) $(OTHER_FILES) add_gc_prefix
+ tar cvfh gc.tar `add_gc_prefix $(SRCS) $(OTHER_FILES)`
pc_gc.tar: $(SRCS) $(OTHER_FILES)
tar cvfX pc_gc.tar pc_excludes $(SRCS) $(OTHER_FILES)
floppy: pc_gc.tar
-mmd a:/cord
+ -mmd a:/cord/private
-mmd a:/include
+ -mmd a:/include/private
mkdir /tmp/pc_gc
cat pc_gc.tar | (cd /tmp/pc_gc; tar xvf -)
-mcopy -tmn /tmp/pc_gc/* a:
-mcopy -tmn /tmp/pc_gc/cord/* a:/cord
-mcopy -mn /tmp/pc_gc/cord/de_win.ICO a:/cord
- -mcopy -tmn /tmp/pc_gc/include/* a:/cord
+ -mcopy -tmn /tmp/pc_gc/cord/private/* a:/cord/private
+ -mcopy -tmn /tmp/pc_gc/include/* a:/include
+ -mcopy -tmn /tmp/pc_gc/include/private/* a:/include/private
rm -r /tmp/pc_gc
gc.tar.Z: gc.tar
compress gc.tar
+gc.tar.gz: gc.tar
+ gzip gc.tar
+
lint: $(CSRCS) test.c
lint -DLINT $(CSRCS) test.c | egrep -v "possible pointer alignment problem|abort|exit|sbrk|mprotect|syscall"
+
+# BTL: added to test shared library version of collector.
+# Currently works only under SunOS5. Requires GC_INIT call from statically
+# loaded client code.
+ABSDIR = `pwd`
+gctest_dyn_link: test.o libgc.so
+ $(CC) -L$(ABSDIR) -R$(ABSDIR) -o gctest_dyn_link test.o -lgc -ldl -lthread
+
+gctest_irix_dyn_link: test.o libirixgc.so
+ $(CC) -L$(ABSDIR) -o gctest_irix_dyn_link test.o -lirixgc
+
+reserved_namespace: $(SRCS)
+ for file in $(SRCS) test.c test_cpp.cc; do \
+ sed s/GC_/_GC_/g < $$file > tmp; \
+ cp tmp $$file; \
+ done
+
+user_namespace: $(SRCS)
+ for file in $(SRCS) test.c test_cpp.cc; do \
+ sed s/_GC_/GC_/g < $$file > tmp; \
+ cp tmp $$file; \
+ done
diff --git a/NT_MAKEFILE b/NT_MAKEFILE
index 2817aa5b..3aa57d9b 100644
--- a/NT_MAKEFILE
+++ b/NT_MAKEFILE
@@ -2,36 +2,58 @@
# DLLs are included in the root set under NT, but not under win32S.
# Use "nmake nodebug=1 all" for optimized versions of library, gctest and editor.
+CPU= i386
!include <ntwin32.mak>
-# We also haven't figured out how to do partial links or build static libraries. Hence a
-# client currently needs to link against all of the following:
+OBJS= alloc.obj reclaim.obj allchblk.obj misc.obj mach_dep.obj os_dep.obj mark_rts.obj headers.obj mark.obj obj_map.obj blacklst.obj finalize.obj new_hblk.obj dbg_mlc.obj malloc.obj stubborn.obj dyn_load.obj typd_mlc.obj ptr_chck.obj gc_cpp.obj
-OBJS= alloc.obj reclaim.obj allchblk.obj misc.obj mach_dep.obj os_dep.obj mark_rts.obj headers.obj mark.obj obj_map.obj blacklst.obj finalize.obj new_hblk.obj dbg_mlc.obj malloc.obj stubborn.obj dyn_load.obj typd_mlc.obj
-
-all: gctest.exe cord\de.exe
+all: gctest.exe cord\de.exe test_cpp.exe
.c.obj:
- $(cc) $(cdebug) $(cflags) $(cvars) -DSMALL_CONFIG -DSILENT -DALL_INTERIOR_POINTERS $*.c /Fo$*.obj
+ $(cc) $(cdebug) $(cflags) $(cvars) -DSMALL_CONFIG -DSILENT -DALL_INTERIOR_POINTERS -D__STDC__ $*.c /Fo$*.obj
+
+.cpp.obj:
+ $(cc) $(cdebug) $(cflags) $(cvars) -DSMALL_CONFIG -DSILENT -DALL_INTERIOR_POINTERS $*.CPP /Fo$*.obj
$(OBJS) test.obj: gc_priv.h gc_hdrs.h gc.h
gc.lib: $(OBJS)
- lib32 /MACHINE:i386 /out:gc.lib $(OBJS)
+ lib /MACHINE:i386 /out:gc.lib $(OBJS)
+# The original NT SDK used lib32 instead of lib
gctest.exe: test.obj gc.lib
# The following works for win32 debugging. For win32s debugging use debugtype:coff
# and add mapsympe line.
- $(link) -debug:full -debugtype:cv $(guiflags) -stack:131072 -out:$*.exe test.obj $(conlibs) gc.lib
+# This produces a "GUI" applications that opens no windows and writes to the log file
+# "gc.log". This is done to make the result runnable under win32s.
+ $(link) -debug:full -debugtype:cv $(guiflags) -stack:131072 -out:$*.exe test.obj $(guilibs) gc.lib
# mapsympe -n -o gctest.sym gctest.exe
cord\de_win.rbj: cord\de_win.res
cvtres -$(CPU) cord\de_win.res -o cord\de_win.rbj
-cord\de.obj cord\de_win.obj: cord\cord.h cord\cord_pos.h cord\de_win.h cord\de_cmds.h
+cord\de.obj cord\de_win.obj: cord\cord.h cord\private\cord_pos.h cord\de_win.h cord\de_cmds.h
cord\de_win.res: cord\de_win.rc cord\de_win.h cord\de_cmds.h
$(rc) $(rcvars) -r -fo cord\de_win.res $(cvars) cord\de_win.rc
+# Cord/de is a real win32 gui application.
cord\de.exe: cord\cordbscs.obj cord\cordxtra.obj cord\de.obj cord\de_win.obj cord\de_win.rbj gc.lib
- $(link) -debug:full -debugtype:cv $(guiflags) -stack:16384 -out:cord\de.exe cord\cordbscs.obj cord\cordxtra.obj cord\de.obj cord\de_win.obj cord\de_win.rbj gc.lib $(guilibs) \ No newline at end of file
+ $(link) -debug:full -debugtype:cv $(guiflags) -stack:16384 -out:cord\de.exe cord\cordbscs.obj cord\cordxtra.obj cord\de.obj cord\de_win.obj cord\de_win.rbj gc.lib $(guilibs)
+
+gc_cpp.obj: gc_cpp.h gc.h
+
+gc_cpp.cpp: gc_cpp.cc
+ copy gc_cpp.cc gc_cpp.cpp
+
+test_cpp.cpp: test_cpp.cc
+ copy test_cpp.cc test_cpp.cpp
+
+# This generates the C++ test executable. The executable expects
+# a single numeric argument, which is the number of iterations.
+# The output appears in the file "gc.log".
+test_cpp.exe: test_cpp.obj gc_cpp.h gc.h gc.lib
+ $(link) -debug:full -debugtype:cv $(guiflags) -stack:16384 -out:test_cpp.exe test_cpp.obj gc.lib $(guilibs)
+
+
+
diff --git a/OS2_MAKEFILE b/OS2_MAKEFILE
index 6e0a0ac7..46ee5e48 100644
--- a/OS2_MAKEFILE
+++ b/OS2_MAKEFILE
@@ -3,10 +3,9 @@
# Adding thread support may be nontrivial, since we haven't yet figured out how to
# look at another thread's registers.
-# We also haven't figured out how to do partial links or build static libraries. Hence a
-# client currently needs to link against all of the following:
+# Significantly revised for GC version 4.4 by Mark Boulter (Jan 1994).
-OBJS= alloc.obj reclaim.obj allchblk.obj misc.obj mach_dep.obj os_dep.obj mark_rts.obj headers.obj mark.obj obj_map.obj blacklst.obj finalize.obj new_hblk.obj dbg_mlc.obj malloc.obj stubborn.obj typd_mlc.obj
+OBJS= alloc.obj reclaim.obj allchblk.obj misc.obj mach_dep.obj os_dep.obj mark_rts.obj headers.obj mark.obj obj_map.obj blacklst.obj finalize.obj new_hblk.obj dbg_mlc.obj malloc.obj stubborn.obj typd_mlc.obj ptr_chck.obj
CORDOBJS= cord\cordbscs.obj cord\cordxtra.obj cord\cordprnt.obj
@@ -20,20 +19,27 @@ all: $(OBJS) gctest.exe cord\cordtest.exe
$(OBJS) test.obj: gc_priv.h gc_hdrs.h gc.h
+## ERASE THE LIB FIRST - if it is already there then this command will fail
+## (make sure its there or erase will fail!)
+gc.lib: $(OBJS)
+ echo . > gc.lib
+ erase gc.lib
+ LIB gc.lib $(OBJS), gc.lst
+
mach_dep.obj: mach_dep.c
- $(CC) $(CFLAGS) /C mach_dep.c
+ $(CC) $(CFLAGS) /C mach_dep.c
-gctest.exe: test.obj $(OBJS)
- $(CC) $(CFLAGS) /B"/STACK:524288" /Fegctest test.obj $(OBJS)
+gctest.exe: test.obj gc.lib
+ $(CC) $(CFLAGS) /B"/STACK:524288" /Fegctest test.obj gc.lib
-cord\cordbscs.obj: cord\cordbscs.c cord\cord.h cord\cord_pos.h
- $(CC) $(CFLAGS) /C /Focord\cordbscs cord\cordbscs.c
+cord\cordbscs.obj: cord\cordbscs.c cord\cord.h cord\private\cord_pos.h
+ $(CC) $(CFLAGS) /C /Focord\cordbscs cord\cordbscs.c
-cord\cordxtra.obj: cord\cordxtra.c cord\cord.h cord\cord_pos.h cord\ec.h
- $(CC) $(CFLAGS) /C /Focord\cordxtra cord\cordxtra.c
+cord\cordxtra.obj: cord\cordxtra.c cord\cord.h cord\private\cord_pos.h cord\ec.h
+ $(CC) $(CFLAGS) /C /Focord\cordxtra cord\cordxtra.c
-cord\cordprnt.obj: cord\cordprnt.c cord\cord.h cord\cord_pos.h cord\ec.h
- $(CC) $(CFLAGS) /C /Focord\cordprnt cord\cordprnt.c
+cord\cordprnt.obj: cord\cordprnt.c cord\cord.h cord\private\cord_pos.h cord\ec.h
+ $(CC) $(CFLAGS) /C /Focord\cordprnt cord\cordprnt.c
-cord\cordtest.exe: cord\cordtest.c cord\cord.h cord\cord_pos.h cord\ec.h $(CORDOBJS)
- $(CC) $(CFLAGS) /B"/STACK:65536" /Fecord\cordtest cord\cordtest.c $(OBJS) $(CORDOBJS) \ No newline at end of file
+cord\cordtest.exe: cord\cordtest.c cord\cord.h cord\private\cord_pos.h cord\ec.h $(CORDOBJS) gc.lib
+ $(CC) $(CFLAGS) /B"/STACK:65536" /Fecord\cordtest cord\cordtest.c gc.lib $(CORDOBJS) \ No newline at end of file
diff --git a/PCR-Makefile b/PCR-Makefile
index 637ceb7e..a5d04dd4 100644
--- a/PCR-Makefile
+++ b/PCR-Makefile
@@ -1,46 +1,68 @@
-OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o pcr_interface.o blacklst.o finalize.o new_hblk.o real_malloc.o dynamic_load.o dbg_mlc.o malloc.o stubborn.o
+#
+# Default target
+#
-CSRCS= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dynamic_load.c debug_mlc.c malloc.c stubborn.c
+default: gc.o
-SHELL= /bin/sh
+include ../config/common.mk
-# Fix to point to local pcr installation directory.
-PCRDIR= /project/ppcr/dev
-CC= gcc
-CFLAGS= -g -DPCR -I$(PCRDIR) -I$(PCRDIR)/ansi -I$(PCRDIR)/posix
+#
+# compilation flags, etc.
+#
-# We assume that mach_dep.o has already been built by top level makefile. It doesn't
-# care about pcr vs UNIX, and we don't want to repeat that cruft.
-default: gc.o
+CPPFLAGS = $(INCLUDE) $(CONFIG_CPPFLAGS) \
+ -DPCR_NO_RENAME -DPCR_NO_HOSTDEP_ERR
+#CFLAGS = -DPCR -DSILENT $(CONFIG_CFLAGS)
+CFLAGS = -DPCR $(CONFIG_CFLAGS)
+SPECIALCFLAGS = # For code involving asm's
+
+ASPPFLAGS = $(INCLUDE) $(CONFIG_ASPPFLAGS) \
+ -DPCR_NO_RENAME -DPCR_NO_HOSTDEP_ERR -DASM
-all: gc.o test.o gcpcr
+ASFLAGS = $(CONFIG_ASFLAGS)
-gcpcr: gc.o test.o $(PCRDIR)/base/pcr.o $(PCRDIR)/base/PCR_BaseMain.o
- $(CC) -o gcpcr $(PCRDIR)/base/pcr.o $(PCRDIR)/base/PCR_BaseMain.o gc.o test.o -ldl
+LDRFLAGS = $(CONFIG_LDRFLAGS)
-gc.o: $(OBJS)
- -ld -r -o gc.o $(OBJS)
+LDFLAGS = $(CONFIG_LDFLAGS)
#
-# Dependency construction
#
-# NOTE: the makefile must include "# DO NOT DELETE THIS LINE" after the
-# last target. "make depend" will replace everything following that line
-# by a newly-constructed list of dependencies.
#
-depend: $(CSRCS)
- rm -f makedep eddep ; \
- $(CC) -M $(CFLAGS) $(CSRCS) \
- | sed -e '/:$$/d' > makedep ; \
- echo '/^# DO NOT DELETE THIS LINE/+1,$$d' >eddep ; \
- echo '$$r makedep' >>eddep ; \
- echo 'w' >>eddep ; \
- cp PCR-Makefile PCR-Makefile.bak ; \
- ex - PCR-Makefile < eddep ; \
- rm -f eddep makedep
- touch depend
-
-# DO NOT DELETE THIS LINE
+#
+# BEGIN PACKAGE-SPECIFIC PART
+#
+#
+#
+#
+
+# Fix to point to local pcr installation directory.
+PCRDIR= ..
+
+COBJ= alloc.o reclaim.o allchblk.o misc.o os_dep.o mark_rts.o headers.o mark.o obj_map.o pcr_interface.o blacklst.o finalize.o new_hblk.o real_malloc.o dyn_load.o dbg_mlc.o malloc.o stubborn.o checksums.o solaris_threads.o typd_mlc.o ptr_chck.o
+
+CSRC= reclaim.c allchblk.c misc.c alloc.c mach_dep.c os_dep.c mark_rts.c headers.c mark.c obj_map.c pcr_interface.c blacklst.c finalize.c new_hblk.c real_malloc.c dyn_load.c dbg_mlc.c malloc.c stubborn.c checksums.c solaris_threads.c typd_mlc.c ptr_chck.c
+
+SHELL= /bin/sh
+
+default: gc.o
+
+gc.o: $(COBJ) mach_dep.o
+ $(LDR) $(CONFIG_LDRFLAGS) -o gc.o $(COBJ) mach_dep.o
+
+
+mach_dep.o: mach_dep.c mips_mach_dep.s rs6000_mach_dep.s if_mach if_not_there
+ rm -f mach_dep.o
+ ./if_mach MIPS "" as -o mach_dep.o mips_mach_dep.s
+ ./if_mach RS6000 "" as -o mach_dep.o rs6000_mach_dep.s
+ ./if_mach ALPHA "" as -o mach_dep.o alpha_mach_dep.s
+ ./if_mach SPARC SUNOS5 as -o mach_dep.o sparc_mach_dep.s
+ ./if_not_there mach_dep.o $(CC) -c $(SPECIALCFLAGS) mach_dep.c
+
+if_mach: if_mach.c config.h
+ $(CC) $(CFLAGS) -o if_mach if_mach.c
+
+if_not_there: if_not_there.c
+ $(CC) $(CFLAGS) -o if_not_there if_not_there.c
diff --git a/README b/README
index 8cb1c44f..663e899b 100644
--- a/README
+++ b/README
@@ -1,5 +1,5 @@
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
-Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -10,7 +10,7 @@ Permission to modify the code and to distribute modified code is granted,
provided the above notices are retained, and a notice that the code was
modified is included with the above copyright notice.
-This is version 4.1 of a conservative garbage collector for C and C++.
+This is version 4.8 of a conservative garbage collector for C and C++.
HISTORY -
@@ -28,8 +28,11 @@ Robert Brazile (brazile@diamond.bbn.com) originally supplied the ULTRIX code.
Al Dosser (dosser@src.dec.com) and Regis Cridlig (Regis.Cridlig@cl.cam.ac.uk)
subsequently provided updates and information on variation between ULTRIX
systems. Parag Patel (parag@netcom.com) supplied the A/UX code.
-Jesper Peterson(jep@mtiame.mtia.oz.au) supplied the Amiga port.
-Thomas Funke (thf@zelator.in-berlin.de(?)) supplied the NeXT port.
+Jesper Peterson(jep@mtiame.mtia.oz.au) and
+Michel Schinz supplied the Amiga port.
+Thomas Funke (thf@zelator.in-berlin.de(?)) and
+Brian D.Carlstrom (bdc@clark.lcs.mit.edu) supplied the NeXT ports.
+Douglas Steel (doug@wg.icl.co.uk) provided ICL DRS6000 code.
Bill Janssen (janssen@parc.xerox.com) supplied the SunOS dynamic loader
specific code. Manuel Serrano (serrano@cornas.inria.fr) supplied linux and
Sony News specific code. Al Dosser provided Alpha/OSF/1 code. He and
@@ -40,6 +43,7 @@ Brent Benson (brent@jade.ssd.csd.harris.com) ported the collector to
a Motorola 88K processor running CX/UX (Harris NightHawk).
Ari Huttunen (Ari.Huttunen@hut.fi) generalized the OS/2 port to
nonIBM development environments (a nontrivial task).
+Patrick Beard (beard@cs.ucdavis.edu) provided the initial MacOS port.
David Chase, then at Olivetti Research, suggested several improvements.
Scott Schwartz (schwartz@groucho.cse.psu.edu) supplied some of the
code to save and print call stacks for leak detection on a SPARC.
@@ -64,6 +68,13 @@ Boehm, H., "Space Efficient Conservative Garbage Collection", Proceedings
of the ACM SIGPLAN '91 Conference on Programming Language Design and
Implementation, SIGPLAN Notices 28, 6 (June 1993), pp. 197-206.
+ Possible interactions between the collector and optimizing compilers are
+discussed in
+
+Boehm, H., and D. Chase, "A Proposal for GC-safe C Compilation",
+The Journal of C Language Translation 4, 2 (December 1992).
+(Also available from parcftp.xerox.com:pub/gc, among other places.)
+
Unlike the collector described in the second reference, this collector
operates either with the mutator stopped during the entire collection
(default) or incrementally during allocations. (The latter is supported
@@ -82,7 +93,7 @@ is a fairly sophisticated string package "cord" that makes use of the collector.
GENERAL DESCRIPTION
- This is a garbage colecting storage allocator that is intended to be
+ This is a garbage collecting storage allocator that is intended to be
used as a plug-in replacement for C's malloc.
Since the collector does not require pointers to be tagged, it does not
@@ -109,7 +120,7 @@ cause any pointer into an object (or one past the end) to retain the
object. A routine GC_register_displacement is provided to allow for
more controlled interior pointer use in the heap. Defining
ALL_INTERIOR_POINTERS is somewhat dangerous, in that it can result
-in unnecessary memroy retention. However this is much less of a
+in unnecessary memory retention. However this is much less of a
problem than with older collector versions. The routine
GC_register_displacement is described in gc.h.
@@ -133,17 +144,16 @@ read-only data. However if the shared library mechanism can introduce
discontiguous data areas that may contain pointers, then the collector does
need to be informed.
- Signal processing for most signals is normally deferred during collection,
+ Signal processing for most signals may be deferred during collection,
and during uninterruptible parts of the allocation process. Unlike
-standard ANSI C mallocs, it is intended to be safe to invoke malloc
+standard ANSI C mallocs, it can be safe to invoke malloc
from a signal handler while another malloc is in progress, provided
the original malloc is not restarted. (Empirically, many UNIX
-applications already asssume this.) Even this modest level of signal-
-safety may be too expensive on some systems. If so, ENABLE_SIGNALS
-and DISABLE_SIGNALS may be redefined to the empty statement in gc_private.h.
+applications already assume this.) To obtain this level of signal
+safety, remove the definition of -DNO_SIGNALS in Makefile.
The allocator/collector can also be configured for thread-safe operation.
-(Full signal safety can also be acheived, but only at the cost of two system
+(Full signal safety can also be achieved, but only at the cost of two system
calls per malloc, which is usually unacceptable.)
INSTALLATION AND PORTABILITY
@@ -189,9 +199,11 @@ trademarks of their respective holders):
Sun 3
Sun 4 under SunOS 4.X or Solaris2.X (with or without threads)
Vax under 4.3BSD, Ultrix
- Intel 386 or 486 under many operating systems, but not MSDOS.
+ Intel 386 or 486 under most operating systems, but not MSDOS.
(Win32S is somewhat supported, so it is possible to
- build applications for Windows 3.1)
+ build applications for Windows 3.1. There exists a port
+ to DOS + 32 bit extender for at least one 32 bit extender.
+ However, I don't have source for this.)
Sequent Symmetry (single threaded)
Encore Multimax (single threaded)
MIPS M/120 (and presumably M/2000) (RISC/os 4.0 with BSD libraries)
@@ -201,13 +213,14 @@ trademarks of their respective holders):
HP9000/700
DECstations under Ultrix
DEC Alpha running OSF/1
- SGI workstations under IRIX
+ SGI workstations under IRIX 4 & 5
Sony News
- Apple MacIntosh under A/UX
+ Apple Macintosh under A/UX or MacOS
Commodore Amiga (see README.amiga)
NeXT machines
- In a few cases (Amiga, OS/2, Win32) a separate makefile is supplied.
+ In a few cases (Amiga, OS/2, Win32, MacOS) a separate makefile
+or equivalent is supplied.
Dynamic libraries are completely supported only under SunOS
(and even that support is not functional on the last Sun 3 release),
@@ -225,19 +238,17 @@ On other machines we recommend that you do one of the following:
In all cases we assume that pointer alignment is consistent with that
enforced by the standard C compilers. If you use a nonstandard compiler
-you may have to adjust the alignment parameters defined in gc_private.h.
+you may have to adjust the alignment parameters defined in gc_priv.h.
A port to a machine that is not byte addressed, or does not use 32 bit
-addresses will require a major effort. (Parts of the code try to anticipate
-64 bit addresses. Others will need to be rewritten, since different data
-structures are needed.) A port to MSDOS is hopeless, unless you are willing
-to assume an 80386 or better, and that only flat 32 bit pointers will ever be
-used.
+or 64 bit addresses will require a major effort. A port to MSDOS is hard,
+unless you are willing to assume an 80386 or better, and that only flat
+32 bit pointers will ever need to be seen by the collector.
For machines not already mentioned, or for nonstandard compilers, the
following are likely to require change:
-1. The parameters at the top of gc_private.h.
+1. The parameters in config.h.
The parameters that will usually require adjustment are
STACKBOTTOM, ALIGNMENT and DATASTART. Setjmp_test
prints its guesses of the first two.
@@ -254,44 +265,56 @@ following are likely to require change:
On some machines, it is difficult to obtain such a value that is
valid across a variety of MMUs, OS releases, etc. A number of
alternatives exist for using the collector in spite of this. See the
- discussion in config.h.h immediately preceding the various
+ discussion in config.h immediately preceding the various
definitions of STACKBOTTOM.
2. mach_dep.c.
The most important routine here is one to mark from registers.
The distributed file includes a generic hack (based on setjmp) that
happens to work on many machines, and may work on yours. Try
- compiling and running setjmp_test.c to see whether it has a chance of
+ compiling and running setjmp_t.c to see whether it has a chance of
working. (This is not correct C, so don't blame your compiler if it
doesn't work. Based on limited experience, register window machines
are likely to cause trouble. If your version of setjmp claims that
all accessible variables, including registers, have the value they
had at the time of the longjmp, it also will not work. Vanilla 4.2 BSD
- makes such a claim. SunOS does not.)
+ on Vaxen makes such a claim. SunOS does not.)
If your compiler does not allow in-line assembly code, or if you prefer
not to use such a facility, mach_dep.c may be replaced by a .s file
(as we did for the MIPS machine and the PC/RT).
-
-3. mark_roots.c.
- These are the top level mark routines that determine which sections
- of memory the collector should mark from. This is normally not
- architecture specific (aside from the macros defined in gc_private.h and
- referenced here), but it can be programming language and compiler
- specific. The supplied routine should work for most C compilers
- running under UNIX. Calls to GC_add_roots may sometimes be used
- for similar effect.
-
-4. The sigsetmask call does not appear to exist under early system V UNIX.
- It is used by the collector to block and unblock signals at times at
- which an asynchronous allocation inside a signal handler could not
- be tolerated. Under system V, it is possible to remove these calls,
- provided no storage allocation is done by signal handlers. The
- alternative is to issue a sequence of system V system calls, one per
- signal that is actually used. This may be a bit slow.
-
- For a different versions of Berkeley UN*X or different machines using the
+ At this point enough architectures are supported by mach_dep.c
+ that you will rarely need to do more than adjust for assembler
+ syntax.
+
+3. os_dep.c (and gc_priv.h).
+ Several kinds of operating system dependent routines reside here.
+ Many are optional. Several are invoked only through corresponding
+ macros in gc_priv.h, which may also be redefined as appropriate.
+ The routine GC_register_data_segments is crucial. It registers static
+ data areas that must be traversed by the collector. (User calls to
+ GC_add_roots may sometimes be used for similar effect.)
+ Routines to obtain memory from the OS also reside here.
+ Alternatively this can be done entirely by the macro GET_MEM
+ defined in gc_priv.h. Routines to disable and reenable signals
+ also reside here if they are need by the macros DISABLE_SIGNALS
+ and ENABLE_SIGNALS defined in gc_priv.h.
+ In a multithreaded environment, the macros LOCK and UNLOCK
+ in gc_priv.h will need to be suitably redefined.
+ The incremental collector requires page dirty information, which
+ is acquired through routines defined in os_dep.c. Unless directed
+ otherwise by config.h, these are implemented as stubs that simply
+ treat all pages as dirty. (This of course makes the incremental
+ collector much less useful.)
+
+4. dyn_load.c
+ This provides a routine that allows the collector to scan data
+ segments associated with dynamic libraries. Often it is not
+ necessary to provide this routine unless user-written dynamic
+ libraries are used.
+
+ For a different version of UN*X or different machines using the
Motorola 68000, Vax, SPARC, 80386, NS 32000, PC/RT, or MIPS architecture,
-it should frequently suffice to change definitions in gc_private.h.
+it should frequently suffice to change definitions in config.h.
THE C INTERFACE TO THE ALLOCATOR
@@ -304,7 +327,7 @@ collector doesn't already understand them.) On some machines, it may
be desirable to set GC_stacktop to a good approximation of the stack base.
(This enhances code portability on HP PA machines, since there is no
good way for the collector to compute this value.) Client code may include
-"gc.h", which defines all of the following, plus a few others.
+"gc.h", which defines all of the following, plus many others.
1) GC_malloc(nbytes)
- allocate an object of size nbytes. Unlike malloc, the object is
@@ -321,7 +344,7 @@ good way for the collector to compute this value.) Client code may include
2) GC_malloc_atomic(nbytes)
- allocate an object of size nbytes that is guaranteed not to contain any
- pointers. The returned object is not guaranteed to be cleeared.
+ pointers. The returned object is not guaranteed to be cleared.
(Can always be replaced by GC_malloc, but results in faster collection
times. The collector will probably run faster if large character
arrays, etc. are allocated with GC_malloc_atomic than if they are
@@ -333,7 +356,7 @@ good way for the collector to compute this value.) Client code may include
the old object. The new object is taken to be atomic iff the old one
was. If the new object is composite and larger than the original object,
then the newly added bytes are cleared (we hope). This is very likely
- to allocate a new object, unless MERGE_SIZES is defined in gc_private.h.
+ to allocate a new object, unless MERGE_SIZES is defined in gc_priv.h.
Even then, it is likely to recycle the old object only if the object
is grown in small additive increments (which, we claim, is generally bad
coding practice.)
@@ -341,31 +364,36 @@ good way for the collector to compute this value.) Client code may include
4) GC_free(object)
- explicitly deallocate an object returned by GC_malloc or
GC_malloc_atomic. Not necessary, but can be used to minimize
- collections if performance is critical.
+ collections if performance is critical. Probably a performance
+ loss for very small objects (<= 8 bytes).
-5) GC_expand_hp(number_of_4K_blocks)
+5) GC_expand_hp(bytes)
- Explicitly increase the heap size. (This is normally done automatically
if a garbage collection failed to GC_reclaim enough memory. Explicit
calls to GC_expand_hp may prevent unnecessarily frequent collections at
program startup.)
+
+6) GC_malloc_ignore_off_page(bytes)
+ - identical to GC_malloc, but the client promises to keep a pointer to
+ the somewhere within the first 256 bytes of the object while it is
+ live. (This pointer should nortmally be declared volatile to prevent
+ interference from compiler optimizations.) This is the recommended
+ way to allocate anything that is likely to be larger than 100Kbytes
+ or so. (GC_malloc may result in failure to reclaim such objects.)
+
+7) GC_set_warn_proc(proc)
+ - Can be used to redirect warnings from the collector. Such warnings
+ should be rare, and should not be ignored during code development.
-6) GC_clear_roots()
- - Reset the collectors idea of where static variables containing pointers
- may be located to the empty set of locations. No statically allocated
- variables will be traced from after this call, unless there are
- intervening GC_add_roots calls. The collector will still trace from
- registers and the program stack.
-
-7) GC_add_roots(low_address, high_address_plus_1)
- - Add [low_address, high_address) as an area that may contain root pointers
- and should be traced by the collector. The static data and bss segments
- are considered by default, and should not be added unless GC_clear_roots
- has been called. The number of root areas is currently limited to 50.
- This is intended as a way to register data areas for dynamic libraries,
- or to replace the entire data ans bss segments by smaller areas that are
- known to contain all the roots.
-
-8) Several routines to allow for registration of finalization code.
+8) GC_enable_incremental()
+ - Enables generational and incremental collection. Useful for large
+ heaps on machines that provide access to page dirty information.
+ Some dirty bit implementations may interfere with debugging
+ (by catching address faults) and place restrictions on heap arguments
+ to system calls (since write faults inside a system call may not be
+ handled well).
+
+9) Several routines to allow for registration of finalization code.
User supplied finalization code may be invoked when an object becomes
unreachable. To call (*f)(obj, x) when obj becomes inaccessible, use
GC_register_finalizer(obj, f, x, 0, 0);
@@ -383,7 +411,7 @@ considered as a candidate for collection. Careless use may, of course, result
in excessive memory consumption.
Some additional tuning is possible through the parameters defined
-near the top of gc_private.h.
+near the top of gc_priv.h.
If only GC_malloc is intended to be used, it might be appropriate to define:
@@ -398,16 +426,40 @@ and friends.
To avoid name conflicts, client code should avoid this prefix, except when
accessing garbage collector routines or variables.
- Thre are provisions for allocation with explicit type information.
+ There are provisions for allocation with explicit type information.
This is rarely necessary. Details can be found in gc_typed.h.
+THE C++ INTERFACE TO THE ALLOCATOR:
+
+ The Ellis-Hull C++ interface to the collector is included in
+the collector distribution. If you intend to use this, type
+"make c++" after the initial build of the collector is complete.
+See gc_cpp.h for the definition of the interface. This interface
+tries to approximate the Ellis-Detlefs C++ garbage collection
+proposal without compiler changes.
+
+Cautions:
+1. Arrays allocated without new placement syntax are
+allocated as uncollectable objects. They are traced by the
+collector, but will not be reclaimed.
+
+2. Failure to use "make c++" in combination with (1) will
+result in arrays allocated using the default new operator.
+This is likely to result in disaster without linker warnings.
+
+3. If your compiler supports an overloaded new[] operator,
+then gc_c++.cc and gc_c++.h should be suitably modified.
+
+4. Many current C++ compilers have deficiencies that
+break some of the functionality. See the comments in gc_cpp.h
+for suggested workarounds.
USE AS LEAK DETECTOR:
The collector may be used to track down leaks in C programs that are
intended to run with malloc/free (e.g. code with extreme real-time or
-portability constraints). To do so define FIND_LEAK somewhere in
-gc_priv.h. This will cause the collector to invoke the report_leak
+portability constraints). To do so define FIND_LEAK in Makefile
+This will cause the collector to invoke the report_leak
routine defined near the top of reclaim.c whenever an inaccessible
object is found that has not been explicitly freed.
Productive use of this facility normally involves redefining report_leak
@@ -426,7 +478,9 @@ to symbolic addresses. It was largely supplied by Scott Schwartz.)
Note that the debugging facilities described in the next section can
sometimes be slightly LESS effective in leak finding mode, since in
leak finding mode, GC_debug_free actually results in reuse of the object.
-(Otherwise the object is simply marked invalid.)
+(Otherwise the object is simply marked invalid.) Also note that the test
+program is not designed to run meaningfully in FIND_LEAK mode.
+Use "make gc.a" to build the collector.
DEBUGGING FACILITIES:
@@ -449,7 +503,7 @@ NIL.
GC_debug_malloc checking during garbage collection is enabled
with the first call to GC_debug_malloc. This will result in some
slowdown during collections. If frequent heap checks are desired,
-this can be acheived by explicitly invoking GC_gcollect, e.g. from
+this can be achieved by explicitly invoking GC_gcollect, e.g. from
the debugger.
GC_debug_malloc allocated objects should not be passed to GC_realloc
@@ -553,7 +607,7 @@ may vary.) The incremental/generational collection facility helps,
but is portable only if "stubborn" allocation is used.
Please address bug reports to boehm@parc.xerox.com. If you are
contemplating a major addition, you might also send mail to ask whether
-it's already been done.
+it's already been done (or whether we tried and discarded it).
RECENT VERSIONS:
@@ -595,7 +649,7 @@ for PPCR.
Version 2.2 added debugging allocation, and fixed various bugs. Among them:
- GC_realloc could fail to extend the size of the object for certain large object sizes.
- A blatant subscript range error in GC_printf, which unfortunately
- wasn't excercised on machines with sufficient stack alignment constraints.
+ wasn't exercised on machines with sufficient stack alignment constraints.
- GC_register_displacement did the wrong thing if it was called after
any allocation had taken place.
- The leak finding code would eventually break after 2048 byte
@@ -650,7 +704,7 @@ for PPCR.
a dynamic library.
- A fix for a bug in GC_base that could result in a memory fault.
- A fix for a performance bug (and several other misfeatures) pointed
- out by Dave Detelfs and Al Dosser.
+ out by Dave Detlefs and Al Dosser.
- Use of dirty bit information for static data under Solaris 2.X.
- DEC Alpha/OSF1 support (thanks to Al Dosser).
- Incremental collection on more platforms.
@@ -705,7 +759,7 @@ for PPCR.
that the old version was correct.
- Fixed an incremental collection bug that prevented it from
working at all when HBLKSIZE != getpagesize()
-- Changed dynamic_loading.c to include gc_private.h before testing
+- Changed dynamic_loading.c to include gc_priv.h before testing
DYNAMIC_LOADING. SunOS dynamic library scanning
must have been broken in 3.4.
- Object size rounding now adapts to program behavior.
@@ -733,7 +787,7 @@ Version 3.7:
Version 4.0:
- Added support for Solaris threads (which was possible
- only be reimplementing some fraction of Solaris threads,
+ only by reimplementing some fraction of Solaris threads,
since Sun doesn't currently make the thread debugging
interface available).
- Added non-threads win32 and win32S support.
@@ -749,7 +803,7 @@ Version 4.0:
tables it maintains. (This probably does not matter for well-
-written code. It no doubt does for C++ code that overuses
destructors.)
-- Added typed allocation primitves. Rewrote the marker to
+- Added typed allocation primitives. Rewrote the marker to
accommodate them with more reasonable efficiency. This
change should also speed up marking for GC_malloc allocated
objects a little. See gc_typed.h for new primitives.
@@ -813,3 +867,269 @@ Since version 4.0:
in 4.0. Worked around what appears to be CSet/2 V1.0
optimizer bug.
- Fixed a Makefile bug for target "c++".
+
+Since version 4.1:
+- Multiple bug fixes/workarounds in the Solaris threads version.
+ (It occasionally failed to locate some register contents for
+ marking. It also turns out that thr_suspend and friends are
+ unreliable in Solaris 2.3. Dirty bit reads appear
+ to be unreliable under some weird
+ circumstances. My stack marking code
+ contained a serious performance bug. The new code is
+ extremely defensive, and has not failed in several cpu
+ hours of testing. But no guarantees ...)
+- Added MacOS support (thanks to Patrick Beard.)
+- Fixed several syntactic bugs in gc_c++.h and friends. (These
+ didn't bother g++, but did bother most other compilers.)
+ Fixed gc_c++.h finalization interface. (It didn't.)
+- 64 bit alignment for allocated objects was not guaranteed in a
+ few cases in which it should have been.
+- Added GC_malloc_atomic_ignore_off_page.
+- Added GC_collect_a_little.
+- Added some prototypes to gc.h.
+- Some other minor bug fixes (notably in Makefile).
+- Fixed OS/2 / EMX port (thanks to Ari Huttunen).
+- Fixed AmigaDOS port. (thanks to Michel Schinz).
+- Fixed the DATASTART definition under Solaris. There
+ was a 1 in 16K chance of the collector missing the first
+ 64K of static data (and thus crashing).
+- Fixed some blatant anachronisms in the README file.
+- Fixed PCR-Makefile for upcoming PPCR release.
+
+Since version 4.2:
+- Fixed SPARC alignment problem with GC_DEBUG.
+- Fixed Solaris threads /proc workaround. The real
+ problem was an interaction with mprotect.
+- Incorporated fix from Patrick Beard for gc_c++.h.
+- Slightly improved allocator space utilization by
+ fixing the GC_size_map mechanism.
+- Integrated some Sony News and MIPS RISCos 4.51
+ patches. (Thanks to Nobuyuki Hikichi of
+ Software Research Associates, Inc. Japan)
+- Fixed HP_PA alignment problem. (Thanks to
+ xjam@cork.cs.berkeley.edu.)
+- Added GC_same_obj and friends. Changed GC_base
+ to return 0 for pointers past the end of large objects.
+ Improved GC_base performance with ALL_INTERIOR_POINTERS
+ on machines with a slow integer mod operation.
+ Added GC_PTR_ADD, GC_PTR_STORE, etc. to prepare
+ for preprocessor.
+- changed the default on most UNIX machines to be that
+ signals are not disabled during critical GC operations.
+ This is still ANSI-conforming, though somewhat dangerous
+ in the presence of signal handlers. But the performance
+ cost of the alternative is sometimes problematic.
+ Can be changed back with a minor Makefile edit.
+- renamed IS_STRING in gc.h, to CORD_IS_STRING, thus
+ following my own naming convention. Added the function
+ CORD_to_const_char_star.
+- Fixed a gross bug in GC_finalize. Symptom: occasional
+ address faults in that function. (Thanks to Anselm
+ Baird-Smith (Anselm.BairdSmith@inria.fr)
+- Added port to ICL DRS6000 running DRS/NX. Restructured
+ things a bit to factor out common code, and remove obsolete
+ code. Collector should now run under SUNOS5 with either
+ mprotect or /proc dirty bits. (Thanks to Douglas Steel
+ (doug@wg.icl.co.uk)).
+- More bug fixes and workarounds for Solaris 2.X. (These were
+ mostly related to putting the collector in a dynamic library,
+ which didn't really work before. Also SOLARIS_THREADS
+ didn't interact well with dl_open.) Thanks to btlewis@eng.sun.com.
+- Fixed a serious performance bug on the DEC Alpha. The text
+ segment was getting registered as part of the root set.
+ (Amazingly, the result was still fast enough that the bug
+ was not conspicuous.) The fix works on OSF/1, version 1.3.
+ Hopefully it also works on other versions of OSF/1 ...
+- Fixed a bug in GC_clear_roots.
+- Fixed a bug in GC_generic_malloc_words_small that broke
+ gc_inl.h. (Reported by Antoine de Maricourt. I broke it
+ in trying to tweak the Mac port.)
+- Fixed some problems with cord/de under Linux.
+- Fixed some cord problems, notably with CORD_riter4.
+- Added DG/UX port.
+ Thanks to Ben A. Mesander (ben@piglet.cr.usgs.gov)
+- Added finalization registration routines with weaker ordering
+ constraints. (This is necessary for C++ finalization with
+ multiple inheritance, since the compiler often adds self-cycles.)
+- Filled the holes in the SCO port. (Thanks to Michael Arnoldus
+ <chime@proinf.dk>.)
+- John Ellis' additions to the C++ support: From John:
+
+* I completely rewrote the documentation in the interface gc_c++.h.
+I've tried to make it both clearer and more precise.
+
+* The definition of accessibility now ignores pointers from an
+finalizable object (an object with a clean-up function) to itself.
+This allows objects with virtual base classes to be finalizable by the
+collector. Compilers typically implement virtual base classes using
+pointers from an object to itself, which under the old definition of
+accessibility prevented objects with virtual base classes from ever
+being collected or finalized.
+
+* gc_cleanup now includes gc as a virtual base. This was enabled by
+the change in the definition of accessibility.
+
+* I added support for operator new[]. Since most (all?) compilers
+don't yet support operator new[], it is conditionalized on
+-DOPERATOR_NEW_ARRAY. The code is untested, but its trivial and looks
+correct.
+
+* The test program test_gc_c++ tries to test for the C++-specific
+functionality not tested by the other programs.
+- Added <unistd.h> include to misc.c. (Needed for ppcr.)
+- Added PowerMac port. (Thanks to Patrick Beard again.)
+- Fixed "srcdir"-related Makefile problems. Changed things so
+ that all externally visible include files always appear in the
+ include subdirectory of the source. Made gc.h directly
+ includable from C++ code. (These were at Per
+ Bothner's suggestion.)
+- Changed Intel code to also mark from ebp (Kevin Warne's
+ suggestion).
+- Renamed C++ related files so they could live in a FAT
+ file system. (Charles Fiterman's suggestion.)
+- Changed Windows NT Makefile to include C++ support in
+ gc.lib. Added C++ test as Makefile target.
+
+Since version 4.3:
+ - ASM_CLEAR_CODE was erroneously defined for HP
+ PA machines, resulting in a compile error.
+ - Fixed OS/2 Makefile to create a library. (Thanks to
+ Mark Boulter (mboulter@vnet.ibm.com)).
+ - Gc_cleanup objects didn't work if they were created on
+ the stack. Fixed.
+ - One copy of Gc_cpp.h in the distribution was out of
+ synch, and failed to document some known compiler
+ problems with explicit destructor invocation. Partially
+ fixed. There are probably other compilers on which
+ gc_cleanup is miscompiled.
+ - Fixed Makefile to pass C compiler flags to C++ compiler.
+ - Added Mac fixes.
+ - Fixed os_dep.c to work around what appears to be
+ a new and different VirtualQuery bug under newer
+ versions of win32S.
+ - GC_non_gc_bytes was not correctly maintained by
+ GC_free. Fixed. Thanks to James Clark (jjc@jclark.com).
+ - Added GC_set_max_heap_size.
+ - Changed allocation code to ignore blacklisting if it is preventing
+ use of a very large block of memory. This has the advantage
+ that naive code allocating very large objects is much more
+ likely to work. The downside is you might no
+ longer find out that such code should really use
+ GC_malloc_ignore_off_page.
+ - Changed GC_printf under win32 to close and reopen the file
+ between calls. FAT file systems otherwise make the log file
+ useless for debugging.
+ - Added GC_try_to_collect and GC_get_bytes_since_gc. These
+ allow starting an abortable collection during idle times.
+ This facility does not require special OS support. (Thanks to
+ Michael Spertus of Geodesic Systems for suggesting this. It was
+ actually an easy addition. Kumar Srikantan previously added a similar
+ facility to a now ancient version of the collector. At the time
+ this was much harder, and the result was less convincing.)
+ - Added some support for the Borland development environment. (Thanks
+ to John Ellis and Michael Spertus.)
+ - Removed a misfeature from checksums.c that caused unexpected
+ heap growth. (Thanks to Scott Schwartz.)
+ - Changed finalize.c to call WARN if it encounters a finalization cycle.
+ WARN is defined in gc_priv.h to write a message, usually to stdout.
+ In many environments, this may be inappropriate.
+ - Renamed NO_PARAMS in gc.h to GC_NO_PARAMS, thus adhering to my own
+ naming convention.
+ - Added GC_set_warn_proc to intercept warnings.
+ - Fixed Amiga port. (Thanks to Michel Schinz (schinz@alphanet.ch).)
+ - Fixed a bug in mark.c that could result in an access to unmapped
+ memory from GC_mark_from_mark_stack on machines with unaligned
+ pointers.
+ - Fixed a win32 specific performance bug that could result in scanning of
+ objects allocated with the system malloc.
+ - Added REDIRECT_MALLOC.
+
+Since version 4.4:
+ - Fixed many minor and one major README bugs. (Thanks to Franklin Chen
+ (chen@adi.com) for pointing out many of them.)
+ - Fixed ALPHA/OSF/1 dynamic library support. (Thanks to Jonathan Bachrach
+ (jonathan@harlequin.com)).
+ - Added incremental GC support (MPROTECT_VDB) for Linux (with some
+ help from Bruno Haible).
+ - Altered SPARC recognition tests in gc.h and config.h (mostly as
+ suggested by Fergus Henderson).
+ - Added basic incremental GC support for win32, as implemented by
+ Windows NT and Windows 95. GC_enable_incremental is a noop
+ under win32s, which doesn't implement enough of the VM interface.
+ - Added -DLARGE_CONFIG.
+ - Fixed GC_..._ignore_off_page to also function without
+ -DALL_INTERIOR_POINTERS.
+ - (Hopefully) fixed RS/6000 port. (Only the test was broken.)
+ - Fixed a performance bug in the nonincremental collector running
+ on machines supporting incremental collection with MPROTECT_VDB
+ (e.g. SunOS 4, DEC AXP). This turned into a correctness bug under
+ win32s with win32 incremental collection. (Not all memory protection
+ was disabled.)
+ - Fixed some ppcr related bit rot.
+ - Caused dynamic libraries to be unregistered before reregistering.
+ The old way turned out to be a performance bug on some machines.
+ - GC_root_size was not properly maintained under MSWIN32.
+ - Added -DNO_DEBUGGING and GC_dump.
+ - Fixed a couple of bugs arising with SOLARIS_THREADS +
+ REDIRECT_MALLOC.
+ - Added NetBSD/M68K port. (Thanks to Peter Seebach
+ <seebs@taniemarie.solon.com>.)
+ - Fixed a serious realloc bug. For certain object sizes, the collector
+ wouldn't scan the expanded part of the object. (Thanks to Clay Spence
+ (cds@peanut.sarnoff.com) for noticing the problem, and helping me to
+ track it down.)
+
+Since version 4.5:
+ - Added Linux ELF support. (Thanks to Arrigo Triulzi <arrigo@ic.ac.uk>.)
+ - GC_base crashed if it was called before any other GC_ routines.
+ This could happen if a gc_cleanup object was allocated outside the heap
+ before any heap allocation.
+ - The heap expansion heuristic was not stable if all objects had finalization
+ enabled. Fixed finalize.c to count memory in finalization queue and
+ avoid explicit deallocation. Changed alloc.c to also consider this count.
+ (This is still not recommended. It's expensive if nothing else.) Thanks
+ to John Ellis for pointing this out.
+ - GC_malloc_uncollectable(0) was broken. Thanks to Phong Vo for pointing
+ this out.
+ - The collector didn't compile under Linux 1.3.X. (Thanks to Fred Gilham for
+ pointing this out.) The current workaround is ugly, but expected to be
+ temporary.
+ - Fixed a formatting problem for SPARC stack traces.
+ - Fixed some '=='s in os_dep.c that should have been assignments.
+ Fortunately these were in code that should never be executed anyway.
+ (Thanks to Fergus Henderson.)
+ - Fixed the heap block allocator to only drop blacklisted blocks in small
+ chunks. Made BL_LIMIT self adjusting. (Both of these were in response
+ to heap growth observed by Paul Graham.)
+ - Fixed the Metrowerks/68K Mac code to also mark from a6. (Thanks
+ to Patrick Beard.)
+ - Significantly updated README.debugging.
+ - Fixed some problems with longjmps out of signal handlers, especially under
+ Solaris. Added a workaround for the fact that siglongjmp doesn't appear to
+ do the right thing with -lthread under Solaris.
+ - Added MSDOS/djgpp port. (Thanks to Mitch Harris (maharri@uiuc.edu).)
+ - Added "make reserved_namespace" and "make user_namespace". The
+ first renames ALL "GC_xxx" identifiers as "_GC_xxx". The second is the
+ inverse transformation. Note that doing this is guaranteed to break all
+ clients written for the other names.
+ - descriptor field for kind NORMAL in GC_obj_kinds with ADD_BYTE_AT_END
+ defined should be -ALIGNMENT not WORDS_TO_BYTES(-1). This is
+ a serious bug on machines with pointer alignment of less than a word.
+ - GC_ignore_self_finalize_mark_proc didn't handle pointers to very near the
+ end of the object correctly. Caused failures of the C++ test on a DEC Alpha
+ with g++.
+ - gc_inl.h still had problems. Partially fixed. Added warnings at the
+ beginning to hopefully specify the remaining dangers.
+ - Added DATAEND definition to config.h.
+ - Fixed some of the .h file organization. Fixed "make floppy".
+
+Since version 4.6:
+ - Fixed some compilation problems with -DCHECKSUMS (thanks to Ian Searle)
+ - Updated some Mac specific files to synchronize with Patrick Beard.
+ - Fixed a serious bug for machines with non-word-aligned pointers.
+ (Thanks to Patrick Beard for pointing out the problem. The collector
+ should fail almost any conceivable test immediately on such machines.)
+
+Since version 4.7:
+ - Changed a "comment" in a MacOS specific part of mach-dep.c that caused
+ gcc to fail on other platforms.
diff --git a/README.Mac b/README.Mac
new file mode 100644
index 00000000..180de7e5
--- /dev/null
+++ b/README.Mac
@@ -0,0 +1,50 @@
+README.Mac
+----------
+
+v4.3 of the collector now runs under Symantec C++/THINK C v7.0.4, and
+Metrowerks C/C++ v4.5 both 68K and PowerPC. Project files are provided
+to build and test the collector under both development systems.
+
+Configuration
+-------------
+
+To configure the collector, under both development systems, a prefix file
+is used to set preprocessor directives. This file is called "MacOS_config.h".
+Also to test the collector, "MacOS_Test_config.h" is provided.
+
+Testing
+-------
+
+To test the collector (always a good idea), build one of the gctest projects,
+gctest.¹ (Symantec C++/THINK C), mw/gctest.68K.¹, or mw/gctest.PPC.¹. The
+test will ask you how many times to run; 1 should be sufficient.
+
+Building
+--------
+
+For your convenience project files for the major Macintosh development
+systems are provided.
+
+For Symantec C++/THINK C, you must build the two projects gclib-1.¹ and
+gclib-2.¹. It has to be split up because the collector has more than 32k
+of static data and no library can have more than this in the Symantec
+environment. (Future versions will probably fix this.)
+
+For Metrowerks C/C++ 4.5 you build gc.68K.¹/gc.PPC.¹ and the result will
+be a library called gc.68K.lib/gc.PPC.lib.
+
+Using
+-----
+
+Under Symantec C++/THINK C, you can just add the gclib-1.¹ and gclib-2.¹
+projects to your own project. Under Metrowerks, you add gc.68K.lib or
+gc.PPC.lib and two additional files. You add the files called datastart.c
+and dataend.c to your project, bracketing all files that use the collector.
+See mw/gctest.¹ for an example.
+
+Include the projects/libraries you built above into your own project,
+#include "gc.h", and call GC_malloc. You don't have to call GC_free.
+
+
+Patrick C. Beard
+January 4, 1995
diff --git a/README.QUICK b/README.QUICK
index 98947660..3273c8ba 100644
--- a/README.QUICK
+++ b/README.QUICK
@@ -37,3 +37,5 @@ and calls to realloc by calls to GC_REALLOC. If the object is known
to never contain pointers, use GC_MALLOC_ATOMIC instead of
GC_MALLOC.
+Define GC_DEBUG before including gc.h for additional checking.
+
diff --git a/README.amiga b/README.amiga
index cfb1fe81..865642be 100644
--- a/README.amiga
+++ b/README.amiga
@@ -1,4 +1,54 @@
+===========================================================================
+ Michel Schinz's notes
+===========================================================================
+WHO DID WHAT
+
+The original Amiga port was made by Jesper Peterson. I (Michel Schinz)
+modified it slightly to reflect the changes made in the new official
+distributions, and to take advantage of the new SAS/C 6.x features. I also
+created a makefile to compile the "cord" package (see the cord
+subdirectory).
+
+TECHNICAL NOTES
+
+In addition to Jesper's notes, I have the following to say:
+
+- Starting with version 4.3, gctest checks to see if the code segment is
+ added to the root set or not, and complains if it is. Previous versions
+ of this Amiga port added the code segment to the root set, so I tried to
+ fix that. The only problem is that, as far as I know, it is impossible to
+ know which segments are code segments and which are data segments (there
+ are indeed solutions to this problem, like scanning the program on disk
+ or patch the LoadSeg functions, but they are rather complicated). The
+ solution I have chosen (see os_dep.c) is to test whether the program
+ counter is in the segment we are about to add to the root set, and if it
+ is, to skip the segment. The problems are that this solution is rather
+ awkward and that it works only for one code segment. This means that if
+ your program has more than one code segment, all of them but one will be
+ added to the root set. This isn't a big problem in fact, since the
+ collector will continue to work correctly, but it may be slower.
+
+ Anyway, the code which decides whether to skip a segment or not can be
+ removed simply by not defining AMIGA_SKIP_SEG. But notice that if you do
+ so, gctest will complain (it will say that "GC_is_visible produced wrong
+ failure indication"). However, it may be useful if you happen to have
+ pointers stored in a code segment (you really shouldn't).
+
+ If anyone has a good solution to the problem of finding, when a program
+ is loaded in memory, whether a segment is a code or a data segment,
+ please let me know.
+
+PROBLEMS
+
+If you have any problem with this version, please contact me at
+schinz@alphanet.ch (but do *not* send long files, since we pay for
+every mail!).
+
+===========================================================================
+ Jesper Peterson's notes
+===========================================================================
+
ADDITIONAL NOTES FOR AMIGA PORT
These notes assume some familiarity with Amiga internals.
diff --git a/README.debugging b/README.debugging
new file mode 100644
index 00000000..bd8dc459
--- /dev/null
+++ b/README.debugging
@@ -0,0 +1,55 @@
+Debugging suggestions:
+
+****If you get warning messages informing you that the collector needed to allocate blacklisted blocks:
+
+0) Ignore these warnings while you are using GC_DEBUG. Some of the routines mentioned below don't have debugging equivalents. (Alternatively, write the missing routines and send them to me.)
+
+1) Replace allocator calls that request large blocks with calls to GC_malloc_ignore_off_page or GC_malloc_atomic_ignore_off_page. You may want to set a breakpoint in GC_default_warn_proc to help you identify such calls. Make sure that a pointer to somewhere near the beginning of the resulting block is maintained in a (preferably volatile) variable as long as the block is needed.
+
+2) If the large blocks are allocated with realloc, I suggest instead allocating them with something like the following. Note that the realloc size increment should be fairly large (e.g. a factor of 3/2) for this to exhibit reasonable performance. But we all know we should do that anyway.
+
+void * big_realloc(void *p, size_t new_size)
+{
+ size_t old_size = GC_size(p);
+ void * result;
+
+ if (new_size <= 10000) return(GC_realloc(p, new_size));
+ if (new_size <= old_size) return(p);
+ result = GC_malloc_ignore_off_page(new_size);
+ if (result == 0) return(0);
+ memcpy(result,p,old_size);
+ GC_free(p);
+ return(result);
+}
+
+3) In the unlikely case that even relatively small object (<20KB) allocations are triggering these warnings, then your address space contains lots of "bogus pointers", i.e. values that appear to be pointers but aren't. Usually this can be solved by using GC_malloc_atomic or the routines in gc_typed.h to allocate large pointerfree regions of bitmaps, etc. Sometimes the problem can be solved with trivial changes of encoding in certain values. It is possible, though not pleasant, to identify the source of the bogus pointers by setting a breakpoint in GC_add_to_black_list_stack, and looking at the value of current_p in the GC_mark_from_mark_stack frame. Current_p contains the address of the bogus pointer.
+
+4) If you get only a fixed number of these warnings, you are probably only introducing a bounded leak by ignoring them. If the data structures being allocated are intended to be permanent, then it is also safe to ignore them. The warnings can be turned off by calling GC_set_warn_proc with a procedure that ignores these warnings (e.g. by doing absolutely nothing).
+
+
+****If the collector dies in GC_malloc while trying to remove a free list element:
+
+1) With > 99% probability, you wrote past the end of an allocated object. Try setting GC_DEBUG and using the debugging facilities in gc.h.
+
+
+****If the heap grows too much:
+
+1) Consider using GC_malloc_atomic for objects containing nonpointers. This is especially important for large arrays containg compressed data, pseudo-random numbers, and the like. (This isn't all that likely to solve your problem, but it's a useful and easy optimization anyway, and this is a good time to try it.) If you allocate large objects containg only one or two pointers at the beginning, either try the typed allocation primitives is gc.h, or separate out the pointerfree component.
+2) If you are using the collector in its default mode, with interior pointer recognition enabled, consider using GC_malloc_ignore_off_page to allocate large objects. (See gc.h and above for details. Large means > 100K in most environments.)
+3) GC_print_block_list() will print a list of all currently allocated heap blocks and what size objects they contain. GC_print_hblkfreelist() will print a list of free heap blocks, and whether they are blacklisted. GC_dump calls both of these, and also prints information about heap sections, and root segments.
+4) Write a tool that traces back references to the appropriate root. Send me the code. (I have code that does this for old PCR.)
+
+
+****If the collector appears to be losing objects:
+
+1) Replace all calls to GC_malloc_atomic and typed allocation by GC_malloc calls. If this fixes the problem, gradually reinsert your optimizations.
+2) You may also want to try the safe(r) pointer manipulation primitives in gc.h. But those are hard to use until the preprocessor becomes available.
+3) Try using the GC_DEBUG facilities. This is less likely to be successful here than if the collector crashes.
+[The rest of these are primarily for wizards. You shouldn't need them unless you're doing something really strange, or debugging a collector port.]
+4) Don't turn on incremental collection. If that fixes the problem, suspect a bug in the dirty bit implementation. Try compiling with -DCHECKSUMS to check for modified, but supposedly clean, pages.
+5) On a SPARC, in a single-threaded environment, GC_print_callers(GC_arrays._last_stack) prints a cryptic stack trace as of the time of the last collection. (You will need a debugger to decipher the result.) The question to ask then is "why should this object have been accessible at the time of the last collection? Where was a pointer to it stored?". This facility should be easy to add for some other collector ports (namely if it's easy to traverse stack frames), but will be hard for others.
+6) "print *GC_find_header(p)" in dbx or gdb will print the garbage collector block header information associated with the object p (e.g. object size, etc.)
+7) GC_is_marked(p) determines whether p is the base address of a marked object. Note that objects allocated since the last collection should not be marked, and that unmarked objects are reclaimed incrementally. It's usually most interesting to set a breakpoint in GC_finish_collection and then to determine how much of the damaged data structure is marked at that point.
+8) Look at the tracing facility in mark.c. (Ignore this suggestion unless you are very familiar with collector internals.)
+
+
diff --git a/README.win32 b/README.win32
index 1eb77668..b8fb3cd2 100644
--- a/README.win32
+++ b/README.win32
@@ -3,13 +3,15 @@ is good reason to believe this is fixable. (SRC M3 works with
NT threads.)
The collector has only been compiled under Windows NT, with the
-Microsoft tools.
+original Microsoft SDK, with Visual C++ 2.0, and with Borland 4.5.
It runs under both win32s and win32, but with different semantics.
Under win32, all writable pages outside of the heaps and stack are
scanned for roots. Thus the collector sees pointers in DLL data
segments. Under win32s, only the main data segment is scanned.
-Thus all accessible objects should be excessible from local variables
+(The main data segment should always be scanned. Under some
+versions of win32s, other regions may also be scanned.)
+Thus all accessible objects should be accessible from local variables
or variables in the main data segment. Alternatively, other data
segments (e.g. in DLLs) may be registered with the collector by
calling GC_init() and then GC_register_root_section(a), where
@@ -18,7 +20,7 @@ registrations are ignored, but not terribly quickly.)
(There are two reasons for this. We didn't want to see many 16:16
pointers. And the VirtualQuery call has different semantics under
-the two systems.)
+the two systems, and under different versions of win32s.)
The collector test program "gctest" is linked as a GUI application,
but does not open any windows. Its output appears in the file
@@ -37,3 +39,39 @@ characters are displayed explicitly, but in this case as red text.
This may be suboptimal for some tastes and/or sets of default
window colors.)
+For Microsoft development tools, rename NT_MAKEFILE as
+MAKEFILE. (Make sure that the CPU environment variable is defined
+to be i386.) For Borland tools, use BCC_MAKEFILE. Note that
+Borland's compiler defaults to 1 byte alignment in structures (-a1),
+whereas Visual C++ appears to default to 8 byte alignment (/Zp8).
+The garbage collector in its default configuration EXPECTS AT
+LEAST 4 BYTE ALIGNMENT. Thus the BORLAND DEFAULT MUST
+BE OVERRIDDEN. (In my opinion, it should usually be anyway.
+I expect that -a1 introduces major performance penalties on a
+486 or Pentium.) Note that this changes structure layouts. (As a last
+resort, config.h can be changed to allow 1 byte alignment. But
+this has significant negative performance implications.)
+
+Incremental collection support was recently added. This is
+currently pretty simpleminded. Pages are protected. Protection
+faults are caught by a handler installed at the bottom of the handler
+stack. This is both slow and interacts poorly with a debugger.
+Whenever possible, I recommend adding a call to
+GC_enable_incremental at the last possible moment, after most
+debugging is complete. Unlike the UNIX versions, no system
+calls are wrapped by the collector itself. It may be necessary
+to wrap ReadFile calls that use a buffer in the heap, so that the
+call does not encounter a protection fault while it's running.
+(As usual, none of this is an issue unless GC_enable_incremental
+is called.)
+
+Note that incremental collection is disabled with -DSMALL_CONFIG,
+which is the default for win32. If you need incremental collection,
+undefine SMALL_CONFIG.
+
+Incremental collection is not supported under win32s, and it may not
+be possible to do so. However, win32 applications that attempt to use
+incremental collection should continue to run, since the
+colllector detects if it's running under win32s and turns calls to
+GC_enable_incremental() into noops.
+
diff --git a/SCoptions.amiga b/SCoptions.amiga
index 9207e13e..a61e0cb1 100644
--- a/SCoptions.amiga
+++ b/SCoptions.amiga
@@ -1,15 +1,16 @@
CPU=68030
NOSTACKCHECK
-ERRORREXX
OPTIMIZE
+VERBOSE
MAPHUNK
NOVERSION
NOICONS
OPTIMIZERTIME
DEFINE SILENT
-IGNORE=105
-IGNORE=304
-IGNORE=154
+DEFINE AMIGA_SKIP_SEG
IGNORE=85
-IGNORE=100
+IGNORE=154
IGNORE=161
+IGNORE=100
+OPTIMIZERCOMPLEXITY=4
+OPTIMIZERDEPTH=3
diff --git a/SMakefile.amiga b/SMakefile.amiga
index 0727f423..40ca7816 100644
--- a/SMakefile.amiga
+++ b/SMakefile.amiga
@@ -1,45 +1,47 @@
-OBJS= alloc.o reclaim.o allochblk.o misc.o mach_dep.o os_dep.o mark_roots.o headers.o mark.o obj_map.o black_list.o finalize.o new_hblk.o real_malloc.o dynamic_load.o debug_malloc.o malloc.o stubborn.o checksums.o
+OBJS= alloc.o reclaim.o allchblk.o misc.o mach_dep.o os_dep.o mark_rts.o headers.o mark.o obj_map.o blacklst.o finalize.o new_hblk.o real_malloc.o dyn_load.o dbg_mlc.o malloc.o stubborn.o checksums.o typd_mlc.o ptr_chck.o
-INC= gc_private.h gc_headers.h gc.h config.h
+INC= gc_private.h gc_hdrs.h gc.h config.h
-all: gctest setjmp_test
+all: gctest setjmp_t
alloc.o : alloc.c $(INC)
reclaim.o : reclaim.c $(INC)
-allochblk.o : allochblk.c $(INC)
+allchblk.o : allchblk.c $(INC)
misc.o : misc.c $(INC)
os_dep.o : os_dep.c $(INC)
-mark_roots.o : mark_roots.c $(INC)
+mark_rts.o : mark_rts.c $(INC)
headers.o : headers.c $(INC)
mark.o : mark.c $(INC)
obj_map.o : obj_map.c $(INC)
-black_list.o : black_list.c $(INC)
+blacklst.o : blacklst.c $(INC)
finalize.o : finalize.c $(INC)
+ sc noopt finalize.c # There seems to be a bug in the optimizer (V6.51).
+ # gctest won't work if you remove this...
new_hblk.o : new_hblk.c $(INC)
real_malloc.o : real_malloc.c $(INC)
-dynamic_load.o : dynamic_load.c $(INC)
-debug_malloc.o : debug_malloc.c $(INC)
+dyn_load.o : dyn_load.c $(INC)
+dbg_mlc.o : dbg_mlc.c $(INC)
malloc.o : malloc.c $(INC)
stubborn.o : stubborn.c $(INC)
checksums.o : checksums.c $(INC)
-test.o : test.c $(INC)
-
+typd_mlc.o: typd_mlc.c $(INC)
mach_dep.o : mach_dep.c $(INC)
- sc noopt mach_dep.c # optimizer mangles reg save hack
+ptr_chck.o: ptr_chck.c $(INC)
+test.o : test.c $(INC)
gc.lib: $(OBJS)
- oml gc.lib r $(OBJS)
+ oml gc.lib r $(OBJS)
clean:
- delete gc.lib gctest setjmp_test \#?.o
+ delete gc.lib gctest setjmp_t \#?.o
gctest: gc.lib test.o
- slink LIB:c.o test.o to $@ lib gc.lib LIB:sc.lib LIB:scm.lib
+ slink LIB:c.o test.o to $@ lib gc.lib LIB:sc.lib LIB:scm.lib
-setjmp_test: setjmp_test.c gc.h
- sc setjmp_test.c
- slink LIB:c.o $@.o to $@ lib LIB:sc.lib
+setjmp_t: setjmp_t.c gc.h
+ sc setjmp_t.c
+ slink LIB:c.o $@.o to $@ lib LIB:sc.lib
-test: setjmp_test gctest
- setjmp_test
- gctest
+test: setjmp_t gctest
+ setjmp_t
+ gctest
diff --git a/add_gc_prefix.c b/add_gc_prefix.c
new file mode 100644
index 00000000..0d1ab6d4
--- /dev/null
+++ b/add_gc_prefix.c
@@ -0,0 +1,14 @@
+# include <stdio.h>
+
+int main(argc, argv, envp)
+int argc;
+char ** argv;
+char ** envp;
+{
+ int i;
+
+ for (i = 1; i < argc; i++) {
+ printf("gc/%s ", argv[i]);
+ }
+ return(0);
+}
diff --git a/allchblk.c b/allchblk.c
index b8b9f890..c08f9c99 100644
--- a/allchblk.c
+++ b/allchblk.c
@@ -11,7 +11,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 1:55 pm PDT */
+/* Boehm, August 9, 1995 5:08 pm PDT */
#define DEBUG
#undef DEBUG
@@ -19,11 +19,11 @@
#include "gc_priv.h"
-/**/
-/* allocate/free routines for heap blocks
-/* Note that everything called from outside the garbage collector
-/* should be prepared to abort at any point as the result of a signal.
-/**/
+/*
+ * allocate/free routines for heap blocks
+ * Note that everything called from outside the garbage collector
+ * should be prepared to abort at any point as the result of a signal.
+ */
/*
* Free heap blocks are kept on a list sorted by address.
@@ -42,6 +42,7 @@ struct hblk *GC_savhbp = (struct hblk *)0; /* heap block preceding next */
/* block to be examined by */
/* GC_allochblk. */
+# if !defined(NO_DEBUGGING)
void GC_print_hblkfreelist()
{
struct hblk * h = GC_hblkfreelist;
@@ -66,6 +67,8 @@ void GC_print_hblkfreelist()
GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
}
+# endif /* NO_DEBUGGING */
+
/* Initialize hdr for a block containing the indicated size and */
/* kind of objects. */
/* Return FALSE on failure. */
@@ -110,7 +113,7 @@ struct hblk *
GC_allochblk(sz, kind, flags)
word sz;
int kind;
-unsigned char flags;
+unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
{
register struct hblk *thishbp;
register hdr * thishdr; /* Header corr. to thishbp */
@@ -164,6 +167,7 @@ unsigned char flags;
(kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
struct hblk * lasthbp = hbp;
ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
+ signed_word orig_avail = size_avail;
signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
HBLKSIZE
: size_needed);
@@ -176,8 +180,8 @@ unsigned char flags;
}
size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
thishbp = lasthbp;
- if (size_avail >= size_needed && thishbp != hbp
- && GC_install_header(thishbp)) {
+ if (size_avail >= size_needed) {
+ if (thishbp != hbp && GC_install_header(thishbp)) {
/* Split the block at thishbp */
thishdr = HDR(thishbp);
/* GC_invalidate_map not needed, since we will */
@@ -191,6 +195,15 @@ unsigned char flags;
phdr = hhdr;
hbp = thishbp;
hhdr = thishdr;
+ }
+ } else if (size_needed > (signed_word)BL_LIMIT
+ && orig_avail - size_needed
+ > (signed_word)BL_LIMIT) {
+ /* Punt, since anything else risks unreasonable heap growth. */
+ WARN("Needed to allocate blacklisted block at 0x%lx\n",
+ (word)hbp);
+ thishbp = hbp;
+ size_avail = orig_avail;
} else if (size_avail == 0
&& size_needed == HBLKSIZE
&& prevhbp != 0) {
@@ -203,19 +216,27 @@ unsigned char flags;
/* blocks are unpopular. */
/* A dropped block will be reconsidered at next GC. */
if ((++count & 3) == 0) {
- /* Allocate and drop the block */
- if (GC_install_counts(hbp, hhdr->hb_sz)) {
- phdr -> hb_next = hhdr -> hb_next;
- (void) setup_header(
+ /* Allocate and drop the block in small chunks, to */
+ /* maximize the chance that we will recover some */
+ /* later. */
+ struct hblk * limit = hbp + (hhdr->hb_sz/HBLKSIZE);
+ struct hblk * h;
+
+ phdr -> hb_next = hhdr -> hb_next;
+ for (h = hbp; h < limit; h++) {
+ if (h == hbp || GC_install_header(h)) {
+ hhdr = HDR(h);
+ (void) setup_header(
hhdr,
- BYTES_TO_WORDS(hhdr->hb_sz - HDR_BYTES),
+ BYTES_TO_WORDS(HBLKSIZE - HDR_BYTES),
PTRFREE, 0); /* Cant fail */
- if (GC_debugging_started) {
- BZERO(hbp + HDR_BYTES, hhdr->hb_sz - HDR_BYTES);
- }
- if (GC_savhbp == hbp) GC_savhbp = prevhbp;
+ if (GC_debugging_started) {
+ BZERO(hbp + HDR_BYTES, HBLKSIZE - HDR_BYTES);
+ }
+ }
}
/* Restore hbp to point at free block */
+ if (GC_savhbp == hbp) GC_savhbp = prevhbp;
hbp = prevhbp;
hhdr = phdr;
if (hbp == GC_savhbp) first_time = TRUE;
diff --git a/alloc.c b/alloc.c
index 33629ab6..7da237dc 100644
--- a/alloc.c
+++ b/alloc.c
@@ -12,14 +12,17 @@
* modified is included with the above copyright notice.
*
*/
-/* Boehm, May 19, 1994 2:02 pm PDT */
+/* Boehm, October 9, 1995 1:03 pm PDT */
-# include <stdio.h>
-# include <signal.h>
-# include <sys/types.h>
# include "gc_priv.h"
+# include <stdio.h>
+# ifndef MACOS
+# include <signal.h>
+# include <sys/types.h>
+# endif
+
/*
* Separate free lists are maintained for different sized objects
* up to MAXOBJSZ.
@@ -62,7 +65,7 @@ int GC_full_freq = 4; /* Every 5th collection is a full */
char * GC_copyright[] =
{"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers",
-"Copyright (c) 1991-1993 by Xerox Corporation. All rights reserved.",
+"Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.",
"THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
" EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK."};
@@ -76,6 +79,29 @@ bool GC_dont_expand = 0;
word GC_free_space_divisor = 4;
+int GC_never_stop_func GC_PROTO((void)) { return(0); }
+
+CLOCK_TYPE GC_start_time;
+
+int GC_timeout_stop_func GC_PROTO((void))
+{
+ CLOCK_TYPE current_time;
+ static unsigned count = 0;
+ unsigned long time_diff;
+
+ if ((count++ & 3) != 0) return(0);
+ GET_TIME(current_time);
+ time_diff = MS_TIME_DIFF(current_time,GC_start_time);
+ if (time_diff >= TIME_LIMIT) {
+# ifdef PRINTSTATS
+ GC_printf0("Abandoning stopped marking after ");
+ GC_printf1("%lu msecs\n", (unsigned long)time_diff);
+# endif
+ return(1);
+ }
+ return(0);
+}
+
/* Return the minimum number of words that must be allocated between */
/* collections to amortize the collection cost. */
static word min_words_allocd()
@@ -118,13 +144,20 @@ word GC_adj_words_allocd()
/* is playing by the rules. */
result = (signed_word)GC_words_allocd
- (signed_word)GC_mem_freed - expl_managed;
- if (result > (signed_word)GC_words_allocd) result = GC_words_allocd;
+ if (result > (signed_word)GC_words_allocd) {
+ result = GC_words_allocd;
/* probably client bug or unfortunate scheduling */
+ }
+ result += GC_words_finalized;
+ /* We count objects enqueued for finalization as though they */
+ /* had been reallocated this round. Finalization is user */
+ /* visible progress. And if we don't count this, we have */
+ /* stability problems for programs that finalize all objects. */
result += GC_words_wasted;
/* This doesn't reflect useful work. But if there is lots of */
/* new fragmentation, the same is probably true of the heap, */
/* and the collection will be correspondingly cheaper. */
- if (result < (signed_word)(GC_words_allocd >> 2)) {
+ if (result < (signed_word)(GC_words_allocd >> 3)) {
/* Always count at least 1/8 of the allocations. We don't want */
/* to collect too infrequently, since that would inhibit */
/* coalescing of free storage blocks. */
@@ -176,16 +209,25 @@ void GC_maybe_gc()
/* We try to mark with the world stopped. */
/* If we run out of time, this turns into */
/* incremental marking. */
- if (GC_stopped_mark(FALSE)) GC_finish_collection();
+ GET_TIME(GC_start_time);
+ if (GC_stopped_mark(GC_timeout_stop_func)) {
+# ifdef SAVE_CALL_CHAIN
+ GC_save_callers(GC_last_stack);
+# endif
+ GC_finish_collection();
+ }
n_partial_gcs++;
}
}
}
+
/*
* Stop the world garbage collection. Assumes lock held, signals disabled.
+ * If stop_func is not GC_never_stop_func, then abort if stop_func returns TRUE.
*/
-void GC_gcollect_inner()
+bool GC_try_to_collect_inner(stop_func)
+GC_stop_func stop_func;
{
# ifdef PRINTSTATS
GC_printf2(
@@ -194,12 +236,37 @@ void GC_gcollect_inner()
(long)WORDS_TO_BYTES(GC_words_allocd));
# endif
GC_promote_black_lists();
- /* GC_reclaim_or_delete_all(); -- not needed: no intervening allocation */
+ /* Make sure all blocks have been reclaimed, so sweep routines */
+ /* don't see cleared mark bits. */
+ /* If we're guaranteed to finish, then this is unnecessary. */
+ if (stop_func != GC_never_stop_func && !GC_reclaim_all(stop_func)) {
+ /* Aborted. So far everything is still consistent. */
+ return(FALSE);
+ }
+ GC_invalidate_mark_state(); /* Flush mark stack. */
GC_clear_marks();
- (void) GC_stopped_mark(TRUE);
+# ifdef SAVE_CALL_CHAIN
+ GC_save_callers(GC_last_stack);
+# endif
+ if (!GC_stopped_mark(stop_func)) {
+ /* We're partially done and have no way to complete or use */
+ /* current work. Reestablish invariants as cheaply as */
+ /* possible. */
+ GC_invalidate_mark_state();
+ GC_unpromote_black_lists();
+ if (GC_incremental) {
+ /* Unlikely. But just invalidating mark state could be */
+ /* expensive. */
+ GC_clear_marks();
+ }
+ return(FALSE);
+ }
GC_finish_collection();
+ return(TRUE);
}
+
+
/*
* Perform n units of garbage collection work. A unit is intended to touch
* roughly a GC_RATE pages. Every once in a while, we do more than that.
@@ -211,7 +278,7 @@ int GC_deficit = 0; /* The number of extra calls to GC_mark_some */
/* Negative values are equivalent to 0. */
extern bool GC_collection_in_progress();
-void GC_collect_a_little(n)
+void GC_collect_a_little_inner(n)
int n;
{
register int i;
@@ -220,7 +287,10 @@ int n;
for (i = GC_deficit; i < GC_RATE*n; i++) {
if (GC_mark_some()) {
/* Need to finish a collection */
- (void) GC_stopped_mark(TRUE);
+# ifdef SAVE_CALL_CHAIN
+ GC_save_callers(GC_last_stack);
+# endif
+ (void) GC_stopped_mark(GC_never_stop_func);
GC_finish_collection();
break;
}
@@ -231,6 +301,20 @@ int n;
}
}
+int GC_collect_a_little GC_PROTO(())
+{
+ int result;
+ DCL_LOCK_STATE;
+
+ DISABLE_SIGNALS();
+ LOCK();
+ GC_collect_a_little_inner(1);
+ result = (int)GC_collection_in_progress();
+ UNLOCK();
+ ENABLE_SIGNALS();
+ return(result);
+}
+
/*
* Assumes lock is held, signals are disabled.
* We stop the world.
@@ -239,17 +323,17 @@ int n;
* Otherwise we may fail and return FALSE if this takes too long.
* Increment GC_gc_no if we succeed.
*/
-bool GC_stopped_mark(final)
-bool final;
+bool GC_stopped_mark(stop_func)
+GC_stop_func stop_func;
{
- CLOCK_TYPE start_time;
- CLOCK_TYPE current_time;
- unsigned long time_diff;
register int i;
+# ifdef PRINTSTATS
+ CLOCK_TYPE start_time, current_time;
+# endif
- GET_TIME(start_time);
STOP_WORLD();
# ifdef PRINTSTATS
+ GET_TIME(start_time);
GC_printf1("--> Marking for collection %lu ",
(unsigned long) GC_gc_no + 1);
GC_printf2("after %lu allocd bytes + %lu wasted bytes\n",
@@ -263,23 +347,17 @@ bool final;
GC_noop(0,0,0,0,0,0);
GC_initiate_partial();
for(i = 0;;i++) {
- if (GC_mark_some()) break;
- if (final) continue;
- if ((i & 3) == 0) {
- GET_TIME(current_time);
- time_diff = MS_TIME_DIFF(current_time,start_time);
- if (time_diff >= TIME_LIMIT) {
- START_WORLD();
+ if ((*stop_func)()) {
# ifdef PRINTSTATS
- GC_printf0("Abandoning stopped marking after ");
- GC_printf2("%lu iterations and %lu msecs\n",
- (unsigned long)i,
- (unsigned long)time_diff);
+ GC_printf0("Abandoned stopped marking after ");
+ GC_printf1("%lu iterations\n",
+ (unsigned long)i);
# endif
- GC_deficit = i; /* Give the mutator a chance. */
+ GC_deficit = i; /* Give the mutator a chance. */
+ START_WORLD();
return(FALSE);
- }
}
+ if (GC_mark_some()) break;
}
GC_gc_no++;
@@ -407,7 +485,7 @@ void GC_finish_collection()
"Immediately reclaimed %ld bytes in heap of size %lu bytes\n",
(long)WORDS_TO_BYTES(GC_mem_found),
(unsigned long)GC_heapsize);
- GC_printf2("%lu (atomic) + %lu (composite) bytes in use\n",
+ GC_printf2("%lu (atomic) + %lu (composite) collectable bytes in use\n",
(unsigned long)WORDS_TO_BYTES(GC_atomic_in_use),
(unsigned long)WORDS_TO_BYTES(GC_composite_in_use));
# endif
@@ -428,8 +506,14 @@ void GC_finish_collection()
}
/* Externally callable routine to invoke full, stop-world collection */
-void GC_gcollect()
+# if defined(__STDC__) || defined(__cplusplus)
+ int GC_try_to_collect(GC_stop_func stop_func)
+# else
+ int GC_try_to_collect(stop_func)
+ GC_stop_func stop_func;
+# endif
{
+ int result;
DCL_LOCK_STATE;
GC_invoke_finalizers();
@@ -438,10 +522,16 @@ void GC_gcollect()
if (!GC_is_initialized) GC_init_inner();
/* Minimize junk left in my registers */
GC_noop(0,0,0,0,0,0);
- GC_gcollect_inner();
+ result = (int)GC_try_to_collect_inner(stop_func);
UNLOCK();
ENABLE_SIGNALS();
- GC_invoke_finalizers();
+ if(result) GC_invoke_finalizers();
+ return(result);
+}
+
+void GC_gcollect GC_PROTO(())
+{
+ (void)GC_try_to_collect(GC_never_stop_func);
}
word GC_n_heap_sects = 0; /* Number of sections currently in heap. */
@@ -485,6 +575,29 @@ word bytes;
}
}
+# if !defined(NO_DEBUGGING)
+void GC_print_heap_sects()
+{
+ register unsigned i;
+
+ GC_printf1("Total heap size: %lu\n", (unsigned long) GC_heapsize);
+ for (i = 0; i < GC_n_heap_sects; i++) {
+ unsigned long start = (unsigned long) GC_heap_sects[i].hs_start;
+ unsigned long len = (unsigned long) GC_heap_sects[i].hs_bytes;
+ struct hblk *h;
+ unsigned nbl = 0;
+
+ GC_printf3("Section %ld from 0x%lx to 0x%lx ", (unsigned long)i,
+ start, (unsigned long)(start + len));
+ for (h = (struct hblk *)start; h < (struct hblk *)(start + len); h++) {
+ if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
+ }
+ GC_printf2("%lu/%lu blacklisted\n", (unsigned long)nbl,
+ (unsigned long)(len/HBLKSIZE));
+ }
+}
+# endif
+
ptr_t GC_least_plausible_heap_addr = (ptr_t)ONES;
ptr_t GC_greatest_plausible_heap_addr = 0;
@@ -500,6 +613,16 @@ ptr_t x, y;
return(x < y? x : y);
}
+# if defined(__STDC__) || defined(__cplusplus)
+ void GC_set_max_heap_size(GC_word n)
+# else
+ void GC_set_max_heap_size(n)
+ GC_word n;
+# endif
+{
+ GC_max_heapsize = n;
+}
+
/*
* this explicitly increases the size of the heap. It is used
* internally, but may also be invoked from GC_expand_hp by the user.
@@ -517,6 +640,11 @@ word n;
if (n < MINHINCR) n = MINHINCR;
bytes = n * HBLKSIZE;
+
+ if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
+ /* Exceeded self-imposed limit */
+ return(FALSE);
+ }
space = GET_MEM(bytes);
if( space == 0 ) {
return(FALSE);
@@ -555,8 +683,12 @@ word n;
/* Really returns a bool, but it's externally visible, so that's clumsy. */
/* Arguments is in bytes. */
-int GC_expand_hp(bytes)
-size_t bytes;
+# if defined(__STDC__) || defined(__cplusplus)
+ int GC_expand_hp(size_t bytes)
+# else
+ int GC_expand_hp(bytes)
+ size_t bytes;
+# endif
{
int result;
DCL_LOCK_STATE;
@@ -590,13 +722,18 @@ word needed_blocks;
}
if (!GC_expand_hp_inner(blocks_to_get)
&& !GC_expand_hp_inner(needed_blocks)) {
- if (count++ < 5) {
- WARN("Out of Memory! Trying to continue ...\n");
+ if (count++ < 10) {
+ WARN("Out of Memory! Trying to continue ...\n", 0);
GC_gcollect_inner();
} else {
- WARN("Out of Memory! Returning NIL!\n");
+ WARN("Out of Memory! Returning NIL!\n", 0);
return(FALSE);
}
+ } else if (count) {
+# ifdef PRINTSTATS
+ GC_printf0("Memory available again ...\n");
+# endif
+ count = 0;
}
}
return(TRUE);
@@ -619,7 +756,7 @@ int kind;
while (*flh == 0) {
/* Do our share of marking work */
- if(GC_incremental && !GC_dont_gc) GC_collect_a_little(1);
+ if(GC_incremental && !GC_dont_gc) GC_collect_a_little_inner(1);
/* Sweep blocks for objects of this size */
GC_continue_reclaim(sz, kind);
if (*flh == 0) {
diff --git a/blacklst.c b/blacklst.c
index 9c2fac8f..f4f70694 100644
--- a/blacklst.c
+++ b/blacklst.c
@@ -11,7 +11,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 1:56 pm PDT */
+/* Boehm, August 9, 1995 6:09 pm PDT */
# include "gc_priv.h"
/*
@@ -46,6 +46,10 @@ word * GC_incomplete_normal_bl;
word * GC_old_stack_bl;
word * GC_incomplete_stack_bl;
+word GC_total_black_listed;
+
+word GC_black_list_spacing = 10000000;
+
void GC_clear_bl();
void GC_bl_init()
@@ -79,6 +83,14 @@ word *doomed;
BZERO(doomed, sizeof(page_hash_table));
}
+void GC_copy_bl(old, new)
+word *new, *old;
+{
+ BCOPY(old, new, sizeof(page_hash_table));
+}
+
+static word total_black_listed();
+
/* Signal the completion of a collection. Turn the incomplete black */
/* lists into new black lists, etc. */
void GC_promote_black_lists()
@@ -94,6 +106,25 @@ void GC_promote_black_lists()
GC_clear_bl(very_old_stack_bl);
GC_incomplete_normal_bl = very_old_normal_bl;
GC_incomplete_stack_bl = very_old_stack_bl;
+ GC_total_black_listed = total_black_listed();
+# ifdef PRINTSTATS
+ GC_printf1("%ld blacklisted bytes in heap\n",
+ (unsigned long)GC_total_black_listed);
+# endif
+ if (GC_total_black_listed != 0) {
+ GC_black_list_spacing = HBLKSIZE*(GC_heapsize/GC_total_black_listed);
+ }
+ if (GC_black_list_spacing < 3 * HBLKSIZE) {
+ GC_black_list_spacing = 3 * HBLKSIZE;
+ }
+}
+
+void GC_unpromote_black_lists()
+{
+# ifndef ALL_INTERIOR_POINTERS
+ GC_copy_bl(GC_old_normal_bl, GC_incomplete_normal_bl);
+# endif
+ GC_copy_bl(GC_old_stack_bl, GC_incomplete_stack_bl);
}
# ifndef ALL_INTERIOR_POINTERS
@@ -179,3 +210,38 @@ word len;
return(0);
}
+
+/* Return the number of blacklisted blocks in a given range. */
+/* Used only for statistical purposes. */
+/* Looks only at the GC_incomplete_stack_bl. */
+word GC_number_stack_black_listed(start, endp1)
+struct hblk *start, *endp1;
+{
+ register struct hblk * h;
+ word result = 0;
+
+ for (h = start; h < endp1; h++) {
+ register int index = PHT_HASH((word)h);
+
+ if (get_pht_entry_from_index(GC_old_stack_bl, index)) result++;
+ }
+ return(result);
+}
+
+
+/* Return the total number of (stack) black-listed bytes. */
+static word total_black_listed()
+{
+ register unsigned i;
+ word total = 0;
+
+ for (i = 0; i < GC_n_heap_sects; i++) {
+ struct hblk * start = (struct hblk *) GC_heap_sects[i].hs_start;
+ word len = (word) GC_heap_sects[i].hs_bytes;
+ struct hblk * endp1 = start + len/HBLKSIZE;
+
+ total += GC_number_stack_black_listed(start, endp1);
+ }
+ return(total * HBLKSIZE);
+}
+
diff --git a/callprocs b/callprocs
index 4f105cc2..a8793f0b 100755
--- a/callprocs
+++ b/callprocs
@@ -1,3 +1,4 @@
#!/bin/sh
GC_DEBUG=1
+export GC_DEBUG
$* 2>&1 | awk '{print "0x3e=c\""$0"\""};/^\t##PC##=/ {if ($2 != 0) {print $2"?i"}}' | adb $1 | sed "s/^ >/>/"
diff --git a/checksums.c b/checksums.c
index 2cc37e41..98ef08fe 100644
--- a/checksums.c
+++ b/checksums.c
@@ -10,7 +10,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:07 pm PDT */
+/* Boehm, March 29, 1995 12:51 pm PST */
# ifdef CHECKSUMS
# include "gc_priv.h"
@@ -22,7 +22,7 @@
/* safe under other conditions.) */
# define NSUMS 2000
-# define OFFSET 100000
+# define OFFSET 0x10000
typedef struct {
bool new_valid;
@@ -44,7 +44,7 @@ struct hblk *h;
while (p < lim) {
result += *p++;
}
- return(result);
+ return(result | 0x80000000 /* doesn't look like pointer */);
}
# ifdef STUBBORN_ALLOC
@@ -80,13 +80,19 @@ int index;
if (pe -> block != 0 && pe -> block != h + OFFSET) ABORT("goofed");
pe -> old_sum = pe -> new_sum;
pe -> new_sum = GC_checksum(h);
+# ifndef MSWIN32
+ if (pe -> new_sum != 0 && !GC_page_was_ever_dirty(h)) {
+ GC_printf1("GC_page_was_ever_dirty(0x%lx) is wrong\n",
+ (unsigned long)h);
+ }
+# endif
if (GC_page_was_dirty(h)) {
GC_n_dirty++;
} else {
GC_n_clean++;
}
if (pe -> new_valid && pe -> old_sum != pe -> new_sum) {
- if (!GC_page_was_dirty(h)) {
+ if (!GC_page_was_dirty(h) || !GC_page_was_ever_dirty(h)) {
/* Set breakpoint here */GC_n_dirty_errors++;
}
# ifdef STUBBORN_ALLOC
@@ -105,14 +111,53 @@ int index;
pe -> block = h + OFFSET;
}
+word GC_bytes_in_used_blocks;
+
+void GC_add_block(h, dummy)
+struct hblk *h;
+word dummy;
+{
+ register hdr * hhdr = HDR(h);
+ register bytes = WORDS_TO_BYTES(hhdr -> hb_sz);
+
+ bytes += HDR_BYTES + HBLKSIZE-1;
+ bytes &= ~(HBLKSIZE-1);
+ GC_bytes_in_used_blocks += bytes;
+}
+
+void GC_check_blocks()
+{
+ word bytes_in_free_blocks = 0;
+ struct hblk * h = GC_hblkfreelist;
+ hdr * hhdr = HDR(h);
+ word sz;
+
+ GC_bytes_in_used_blocks = 0;
+ GC_apply_to_all_blocks(GC_add_block, (word)0);
+ while (h != 0) {
+ sz = hhdr -> hb_sz;
+ bytes_in_free_blocks += sz;
+ h = hhdr -> hb_next;
+ hhdr = HDR(h);
+ }
+ GC_printf2("GC_bytes_in_used_blocks = %ld, bytes_in_free_blocks = %ld ",
+ GC_bytes_in_used_blocks, bytes_in_free_blocks);
+ GC_printf1("GC_heapsize = %ld\n", GC_heapsize);
+ if (GC_bytes_in_used_blocks + bytes_in_free_blocks != GC_heapsize) {
+ GC_printf0("LOST SOME BLOCKS!!\n");
+ }
+}
+
/* Should be called immediately after GC_read_dirty and GC_read_changed. */
void GC_check_dirty()
{
register int index;
- register int i;
+ register unsigned i;
register struct hblk *h;
register ptr_t start;
+ GC_check_blocks();
+
GC_n_dirty_errors = 0;
GC_n_changed_errors = 0;
GC_n_clean = 0;
@@ -139,6 +184,11 @@ out:
if (GC_n_changed_errors > 0) {
GC_printf1("Found %lu changed bit errors\n",
(unsigned long)GC_n_changed_errors);
+ GC_printf0("These may be benign (provoked by nonpointer changes)\n");
+# ifdef THREADS
+ GC_printf0(
+ "Also expect 1 per thread currently allocating a stubborn obj.\n");
+# endif
}
}
diff --git a/config.h b/config.h
index 4e096102..62492c3e 100644
--- a/config.h
+++ b/config.h
@@ -11,7 +11,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:11 pm PDT */
+/* Boehm, October 3, 1995 6:39 pm PDT */
#ifndef CONFIG_H
@@ -33,6 +33,11 @@
# define HP
# define mach_type_known
# endif
+# if defined(__NetBSD__) && defined(m68k)
+# define M68K
+# define NETBSD
+# define mach_type_known
+# endif
# if defined(vax)
# define VAX
# ifdef ultrix
@@ -42,12 +47,12 @@
# endif
# define mach_type_known
# endif
-# if defined(mips)
+# if defined(mips) || defined(__mips)
# define MIPS
-# ifdef ultrix
+# if defined(ultrix) || defined(__ultrix)
# define ULTRIX
# else
-# ifdef _SYSTYPE_SVR4
+# if defined(_SYSTYPE_SVR4) || defined(SYSTYPE_SVR4) || defined(__SYSTYPE_SVR4__)
# define IRIX5
# else
# define RISCOS /* or IRIX 4.X */
@@ -65,7 +70,7 @@
# define SUNOS5
# define mach_type_known
# endif
-# if defined(__OS2__) && defined(__32BIT__)
+# if (defined(__OS2__) || defined(__EMX__)) && defined(__32BIT__)
# define I386
# define OS2
# define mach_type_known
@@ -74,7 +79,7 @@
# define RT
# define mach_type_known
# endif
-# if defined(sun) && defined(sparc)
+# if defined(sun) && (defined(sparc) || defined(__sparc))
# define SPARC
/* Test for SunOS 5.x */
# include <errno.h>
@@ -85,15 +90,20 @@
# endif
# define mach_type_known
# endif
+# if defined(sparc) && defined(unix) && !defined(sun)
+# define SPARC
+# define DRSNX
+# define mach_type_known
+# endif
# if defined(_IBMR2)
# define RS6000
# define mach_type_known
# endif
-# if defined(SCO)
+# if defined(_M_XENIX) && defined(_M_SYSV) && defined(_M_I386)
+ /* The above test may need refinement */
# define I386
# define SCO
# define mach_type_known
-/* --> incompletely implemented */
# endif
# if defined(_AUX_SOURCE)
# define M68K
@@ -114,8 +124,18 @@
# define mach_type_known
# endif
# if defined(_AMIGA)
+# define M68K
# define AMIGA
+# define mach_type_known
+# endif
+# if defined(THINK_C) || defined(__MWERKS__) && !defined(__powerc)
# define M68K
+# define MACOS
+# define mach_type_known
+# endif
+# if defined(__MWERKS__) && defined(__powerc)
+# define POWERPC
+# define MACOS
# define mach_type_known
# endif
# if defined(NeXT) && defined(mc68000)
@@ -123,6 +143,11 @@
# define NEXT
# define mach_type_known
# endif
+# if defined(NeXT) && defined(i386)
+# define I386
+# define NEXT
+# define mach_type_known
+# endif
# if defined(__FreeBSD__) && defined(i386)
# define I386
# define FREEBSD
@@ -148,11 +173,26 @@
# define CX_UX
# define mach_type_known
# endif
+# if defined(DGUX)
+# define M88K
+ /* DGUX defined */
+# define mach_type_known
+# endif
# if defined(_MSDOS) && (_M_IX86 == 300) || (_M_IX86 == 400)
# define I386
# define MSWIN32 /* or Win32s */
# define mach_type_known
# endif
+# if defined(GO32)
+# define I386
+# define DJGPP /* MSDOS running the DJGPP port of GCC */
+# define mach_type_known
+# endif
+# if defined(__BORLANDC__)
+# define I386
+# define MSWIN32
+# define mach_type_known
+# endif
/* Feel free to add more clauses here */
@@ -168,25 +208,26 @@
# endif
/* Mapping is: M68K ==> Motorola 680X0 */
/* (SUNOS4,HP,NEXT, and SYSV (A/UX), */
- /* and AMIGA variants) */
+ /* MACOS and AMIGA variants) */
/* I386 ==> Intel 386 */
/* (SEQUENT, OS2, SCO, LINUX, NETBSD, */
/* FREEBSD, THREE86BSD, MSWIN32, */
- /* BSDI, SUNOS5 variants) */
+ /* BSDI, SUNOS5, NEXT variants) */
/* NS32K ==> Encore Multimax */
/* MIPS ==> R2000 or R3000 */
/* (RISCOS, ULTRIX variants) */
/* VAX ==> DEC VAX */
/* (BSD, ULTRIX variants) */
- /* RS6000 ==> IBM RS/6000 AIX3.1 */
+ /* RS6000 ==> IBM RS/6000 AIX3.X */
/* RT ==> IBM PC/RT */
/* HP_PA ==> HP9000/700 & /800 */
/* HP/UX */
/* SPARC ==> SPARC under SunOS */
- /* (SUNOS4, SUNOS5 variants) */
+ /* (SUNOS4, SUNOS5, */
+ /* DRSNX variants) */
/* ALPHA ==> DEC Alpha OSF/1 */
/* M88K ==> Motorola 88XX0 */
- /* (CX/UX so far) */
+ /* (CX_UX and DGUX) */
/*
@@ -222,7 +263,7 @@
* If either of the last two macros are defined, then STACKBOTTOM is computed
* during collector startup using one of the following two heuristics:
* HEURISTIC1: Take an address inside GC_init's frame, and round it up to
- * the next multiple of 16 MB.
+ * the next multiple of STACK_GRAN.
* HEURISTIC2: Take an address inside GC_init's frame, increment it repeatedly
* in small steps (decrement if STACK_GROWS_UP), and read the value
* at each location. Remember the value when the first
@@ -270,9 +311,16 @@
*/
+# define STACK_GRAN 0x1000000
# ifdef M68K
# define MACH_TYPE "M68K"
# define ALIGNMENT 2
+# ifdef NETBSD
+# define OS_TYPE "NETBSD"
+# define HEURISTIC2
+ extern char etext;
+# define DATASTART ((ptr_t)(&etext))
+# endif
# ifdef SUNOS4
# define OS_TYPE "SUNOS4"
extern char etext;
@@ -310,11 +358,36 @@
# define OS_TYPE "AMIGA"
/* STACKBOTTOM and DATASTART handled specially */
/* in os_dep.c */
+# define DATAEND /* not needed */
+# endif
+# ifdef MACOS
+# ifndef __LOWMEM__
+# include <LowMem.h>
+# endif
+# define OS_TYPE "MACOS"
+ /* see os_dep.c for details of global data segments. */
+# define STACKBOTTOM ((ptr_t) LMGetCurStackBase())
+# define DATAEND /* not needed */
# endif
# ifdef NEXT
# define OS_TYPE "NEXT"
# define DATASTART ((ptr_t) get_etext())
# define STACKBOTTOM ((ptr_t) 0x4000000)
+# define DATAEND /* not needed */
+# endif
+# endif
+
+# ifdef POWERPC
+# define MACH_TYPE "POWERPC"
+# define ALIGNMENT 2
+# ifdef MACOS
+# ifndef __LOWMEM__
+# include <LowMem.h>
+# endif
+# define OS_TYPE "MACOS"
+ /* see os_dep.c for details of global data segments. */
+# define STACKBOTTOM ((ptr_t) LMGetCurStackBase())
+# define DATAEND /* not needed */
# endif
# endif
@@ -347,8 +420,13 @@
extern int etext;
# ifdef SUNOS5
# define OS_TYPE "SUNOS5"
-# define DATASTART ((ptr_t)((((word) (&etext)) + 0x10003) & ~0x3))
+ extern int _etext;
+ extern int _end;
+ extern char * GC_SysVGetDataStart();
+# define DATASTART (ptr_t)GC_SysVGetDataStart(0x10000, &_etext)
+# define DATAEND (&_end)
# define PROC_VDB
+# define HEURISTIC1
# endif
# ifdef SUNOS4
# define OS_TYPE "SUNOS4"
@@ -363,16 +441,28 @@
/* was done by Robert Ehrlich, Manuel Serrano, and Bernard */
/* Serpette of INRIA. */
/* This assumes ZMAGIC, i.e. demand-loadable executables. */
-# define DATASTART ((ptr_t)(*(int *)0x2004+0x2000))
+# define TEXTSTART 0x2000
+# define DATASTART ((ptr_t)(*(int *)(TEXTSTART+0x4)+TEXTSTART))
+# define MPROTECT_VDB
+# define HEURISTIC1
+# endif
+# ifdef DRSNX
+# define CPP_WORDSZ 32
+# define OS_TYPE "DRSNX"
+ extern char * GC_SysVGetDataStart();
+ extern int etext;
+# define DATASTART (ptr_t)GC_SysVGetDataStart(0x10000, &etext)
# define MPROTECT_VDB
+# define STACKBOTTOM ((ptr_t) 0xdfff0000)
# endif
-# define HEURISTIC1
# define DYNAMIC_LOADING
# endif
# ifdef I386
# define MACH_TYPE "I386"
-# define ALIGNMENT 4 /* Appears to hold for all "32 bit" compilers */
+# define ALIGNMENT 4 /* Appears to hold for all "32 bit" compilers */
+ /* except Borland. The -a4 option fixes */
+ /* Borland. */
# ifdef SEQUENT
# define OS_TYPE "SEQUENT"
extern int etext;
@@ -381,14 +471,15 @@
# endif
# ifdef SUNOS5
# define OS_TYPE "SUNOS5"
- extern int etext;
-# define DATASTART ((ptr_t)((((word) (&etext)) + 0x1003) & ~0x3))
- extern int _start();
+ extern int etext, _start;
+ extern char * GC_SysVGetDataStart();
+# define DATASTART GC_SysVGetDataStart(0x1000, &etext)
# define STACKBOTTOM ((ptr_t)(&_start))
# define PROC_VDB
# endif
# ifdef SCO
# define OS_TYPE "SCO"
+ extern int etext;
# define DATASTART ((ptr_t)((((word) (&etext)) + 0x3fffff) \
& ~0x3fffff) \
+((word)&etext & 0xfff))
@@ -399,17 +490,27 @@
extern int etext;
# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
# define STACKBOTTOM ((ptr_t)0xc0000000)
+# define MPROTECT_VDB
# endif
# ifdef OS2
# define OS_TYPE "OS2"
/* STACKBOTTOM and DATASTART are handled specially in */
/* os_dep.c. OS2 actually has the right */
/* system call! */
+# define DATAEND /* not needed */
# endif
# ifdef MSWIN32
# define OS_TYPE "MSWIN32"
/* STACKBOTTOM and DATASTART are handled specially in */
/* os_dep.c. */
+# define MPROTECT_VDB
+# define DATAEND /* not needed */
+# endif
+# ifdef DJGPP
+# define OS_TYPE "DJGPP"
+ extern int etext;
+# define DATASTART ((ptr_t)(&etext))
+# define STACKBOTTOM ((ptr_t)0x00080000)
# endif
# ifdef FREEBSD
# define OS_TYPE "FREEBSD"
@@ -430,6 +531,12 @@
extern char etext;
# define DATASTART ((ptr_t)(&etext))
# endif
+# ifdef NEXT
+# define OS_TYPE "NEXT"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t)0xc0000000)
+# define DATAEND /* not needed */
+# endif
# endif
# ifdef NS32K
@@ -460,6 +567,11 @@
# ifdef IRIX5
# define OS_TYPE "IRIX5"
# define MPROTECT_VDB
+ /* The above is dubious. Mprotect and signals do work, */
+ /* and dirty bits are implemented under IRIX5. But, */
+ /* at least under IRIX5.2, mprotect seems to be so */
+ /* slow relative to the hardware that incremental */
+ /* collection is likely to be rarely useful. */
# define DYNAMIC_LOADING
# endif
# endif
@@ -485,14 +597,26 @@
# define ALIGNMENT 8
# define DATASTART ((ptr_t) 0x140000000)
# define HEURISTIC2
+ /* Normally HEURISTIC2 is too conervative, since */
+ /* the text segment immediately follows the stack. */
+ /* Hence we give an upper pound. */
+ extern __start;
+# define HEURISTIC2_LIMIT ((ptr_t)((word)(&__start) & ~(getpagesize()-1)))
# define CPP_WORDSZ 64
# define MPROTECT_VDB
+# define DYNAMIC_LOADING
# endif
# ifdef M88K
# define MACH_TYPE "M88K"
# define ALIGNMENT 4
-# define DATASTART ((((word)&etext + 0x3fffff) & ~0x3fffff) + 0x10000)
+# ifdef CX_UX
+# define DATASTART ((((word)&etext + 0x3fffff) & ~0x3fffff) + 0x10000)
+# endif
+# ifdef DGUX
+ extern char * GC_SysVGetDataStart();
+# define DATASTART (ptr_t)GC_SysVGetDataStart(0x10000, &etext)
+# endif
# define STACKBOTTOM ((char*)0xf0000000) /* determined empirically */
# endif
@@ -508,6 +632,24 @@
# define OS_TYPE ""
# endif
+# ifndef DATAEND
+ extern int end;
+# define DATAEND (&end)
+# endif
+
+# if defined(SUNOS5) || defined(DRSNX)
+ /* OS has SVR4 generic features. Probably others also qualify. */
+# define SVR4
+# endif
+
+# if defined(SUNOS5) || defined(DRSNX)
+ /* OS has SUNOS5 style semi-undocumented interface to dynamic */
+ /* loader. */
+# define SUNOS5DL
+ /* OS has SUNOS5 style signal handlers. */
+# define SUNOS5SIGS
+# endif
+
# if CPP_WORDSZ != 32 && CPP_WORDSZ != 64
-> bad word size
# endif
@@ -538,4 +680,8 @@
# define DEFAULT_VDB
# endif
+# if defined(SPARC)
+# define SAVE_CALL_CHAIN
+# endif
+
# endif
diff --git a/cord/SCOPTIONS.amiga b/cord/SCOPTIONS.amiga
new file mode 100755
index 00000000..2a091970
--- /dev/null
+++ b/cord/SCOPTIONS.amiga
@@ -0,0 +1,14 @@
+MATH=STANDARD
+CPU=68030
+NOSTACKCHECK
+OPTIMIZE
+VERBOSE
+NOVERSION
+NOICONS
+OPTIMIZERTIME
+INCLUDEDIR=/
+DEFINE AMIGA
+LIBRARY=cord.lib
+LIBRARY=/gc.lib
+IGNORE=100
+IGNORE=161
diff --git a/cord/SMakefile.amiga b/cord/SMakefile.amiga
new file mode 100644
index 00000000..5aef131e
--- /dev/null
+++ b/cord/SMakefile.amiga
@@ -0,0 +1,20 @@
+# Makefile for cord.lib
+# Michel Schinz 1994/07/20
+
+OBJS = cordbscs.o cordprnt.o cordxtra.o
+
+all: cord.lib cordtest
+
+cordbscs.o: cordbscs.c
+cordprnt.o: cordprnt.c
+cordxtra.o: cordxtra.c
+cordtest.o: cordtest.c
+
+cord.lib: $(OBJS)
+ oml cord.lib r $(OBJS)
+
+cordtest: cordtest.o cord.lib
+ sc cordtest.o link
+
+clean:
+ delete cord.lib cordtest \#?.o \#?.lnk
diff --git a/cord/cord.h b/cord/cord.h
index cdf5e03c..8c9c8be2 100644
--- a/cord/cord.h
+++ b/cord/cord.h
@@ -12,7 +12,7 @@
*
* Author: Hans-J. Boehm (boehm@parc.xerox.com)
*/
-/* Boehm, May 19, 1994 2:22 pm PDT */
+/* Boehm, October 5, 1995 4:20 pm PDT */
/*
* Cords are immutable character strings. A number of operations
@@ -37,6 +37,23 @@
* ASCII NUL characters may be embedded in cords using CORD_from_fn.
* This is handled correctly, but CORD_to_char_star will produce a string
* with embedded NULs when given such a cord.
+ *
+ * This interface is fairly big, largely for performance reasons.
+ * The most basic constants and functions:
+ *
+ * CORD - the type fo a cord;
+ * CORD_EMPTY - empty cord;
+ * CORD_len(cord) - length of a cord;
+ * CORD_cat(cord1,cord2) - concatenation of two cords;
+ * CORD_substr(cord, start, len) - substring (or subcord);
+ * CORD_pos i; CORD_FOR(i, cord) { ... CORD_pos_fetch(i) ... } -
+ * examine each character in a cord. CORD_pos_fetch(i) is the char.
+ * CORD_fetch(int i) - Retrieve i'th character (slowly).
+ * CORD_cmp(cord1, cord2) - compare two cords.
+ * CORD_from_file(FILE * f) - turn a read-only file into a cord.
+ * CORD_to_char_star(cord) - convert to C string.
+ * (Non-NULL C constant strings are cords.)
+ * CORD_printf (etc.) - cord version of printf. Use %r for cords.
*/
# ifndef CORD_H
@@ -55,7 +72,7 @@ typedef const char * CORD;
# define CORD_EMPTY 0
/* Is a nonempty cord represented as a C string? */
-#define IS_STRING(s) (*(s) != '\0')
+#define CORD_IS_STRING(s) (*(s) != '\0')
/* Concatenate two cords. If the arguments are C strings, they may */
/* not be subsequently altered. */
@@ -64,6 +81,8 @@ CORD CORD_cat(CORD x, CORD y);
/* Concatenate a cord and a C string with known length. Except for the */
/* empty string case, this is a special case of CORD_cat. Since the */
/* length is known, it can be faster. */
+/* The string y is shared with the resulting CORD. Hence it should */
+/* not be altered by the caller. */
CORD CORD_cat_char_star(CORD x, const char * y, size_t leny);
/* Compute the length of a cord */
@@ -135,37 +154,37 @@ int CORD_riter(CORD x, CORD_iter_fn f1, void * client_data);
/* described below. Also note that */
/* CORD_pos_fetch, CORD_next and CORD_prev have both macro and function */
/* definitions. The former may evaluate their argument more than once. */
-# include "cord_pos.h"
+# include "private/cord_pos.h"
/*
Visible definitions from above:
typedef <OPAQUE but fairly big> CORD_pos[1];
- /* Extract the cord from a position:
+ * Extract the cord from a position:
CORD CORD_pos_to_cord(CORD_pos p);
- /* Extract the current index from a position:
+ * Extract the current index from a position:
size_t CORD_pos_to_index(CORD_pos p);
- /* Fetch the character located at the given position:
- char CORD_pos_fetch(register CORD_pos p);
+ * Fetch the character located at the given position:
+ char CORD_pos_fetch(CORD_pos p);
- /* Initialize the position to refer to the give cord and index.
- /* Note that this is the most expensive function on positions:
+ * Initialize the position to refer to the given cord and index.
+ * Note that this is the most expensive function on positions:
void CORD_set_pos(CORD_pos p, CORD x, size_t i);
- /* Advance the position to the next character.
- /* P must be initialized and valid.
- /* Invalidates p if past end:
+ * Advance the position to the next character.
+ * P must be initialized and valid.
+ * Invalidates p if past end:
void CORD_next(CORD_pos p);
- /* Move the position to the preceding character.
- /* P must be initialized and valid.
- /* Invalidates p if past beginning:
+ * Move the position to the preceding character.
+ * P must be initialized and valid.
+ * Invalidates p if past beginning:
void CORD_prev(CORD_pos p);
- /* Is the position valid, i.e. inside the cord?
+ * Is the position valid, i.e. inside the cord?
int CORD_pos_valid(CORD_pos p);
*/
# define CORD_FOR(pos, cord) \
@@ -181,11 +200,14 @@ extern void (* CORD_oom_fn)(void);
void CORD_dump(CORD x);
/* The following could easily be implemented by the client. They are */
-/* provided in cord_xtra.c for convenience. */
+/* provided in cordxtra.c for convenience. */
/* Concatenate a character to the end of a cord. */
CORD CORD_cat_char(CORD x, char c);
+/* Concatenate n cords. */
+CORD CORD_catn(int n, /* CORD */ ...);
+
/* Return the character in CORD_substr(x, i, 1) */
char CORD_fetch(CORD x, size_t i);
@@ -238,6 +260,10 @@ CORD CORD_from_file_lazy(FILE * f);
/* x, and is thus modifiable. */
char * CORD_to_char_star(CORD x);
+/* Identical to the above, but the result may share structure with */
+/* the argument and is thus not modifiable. */
+const char * CORD_to_const_char_star(CORD x);
+
/* Write a cord to a file, starting at the current position. No */
/* trailing NULs are newlines are added. */
/* Returns EOF if a write error occurs, 1 otherwise. */
diff --git a/cord/cordbscs.c b/cord/cordbscs.c
index d828155b..d377662a 100644
--- a/cord/cordbscs.c
+++ b/cord/cordbscs.c
@@ -12,8 +12,8 @@
*
* Author: Hans-J. Boehm (boehm@parc.xerox.com)
*/
-/* Boehm, May 19, 1994 2:18 pm PDT */
-# include "../gc.h"
+/* Boehm, October 3, 1994 5:19 pm PDT */
+# include "gc.h"
# include "cord.h"
# include <stdlib.h>
# include <stdio.h>
@@ -87,11 +87,11 @@ typedef union {
#define LEN(s) (((CordRep *)s) -> generic.len)
#define DEPTH(s) (((CordRep *)s) -> generic.depth)
-#define GEN_LEN(s) (IS_STRING(s) ? strlen(s) : LEN(s))
+#define GEN_LEN(s) (CORD_IS_STRING(s) ? strlen(s) : LEN(s))
#define LEFT_LEN(c) ((c) -> left_len != 0? \
(c) -> left_len \
- : (IS_STRING((c) -> left) ? \
+ : (CORD_IS_STRING((c) -> left) ? \
(c) -> len - GEN_LEN((c) -> right) \
: LEN((c) -> left)))
@@ -110,7 +110,7 @@ void CORD_dump_inner(CORD x, unsigned n)
}
if (x == 0) {
fputs("NIL\n", stdout);
- } else if (IS_STRING(x)) {
+ } else if (CORD_IS_STRING(x)) {
for (i = 0; i <= SHORT_LIMIT; i++) {
if (x[i] == '\0') break;
putchar(x[i]);
@@ -152,7 +152,7 @@ CORD CORD_cat_char_star(CORD x, const char * y, size_t leny)
if (x == CORD_EMPTY) return(y);
if (leny == 0) return(x);
- if (IS_STRING(x)) {
+ if (CORD_IS_STRING(x)) {
lenx = strlen(x);
result_len = lenx + leny;
if (result_len <= SHORT_LIMIT) {
@@ -176,9 +176,9 @@ CORD CORD_cat_char_star(CORD x, const char * y, size_t leny)
if (leny <= SHORT_LIMIT/2
&& IS_CONCATENATION(x)
- && IS_STRING(right = ((CordRep *)x) -> concatenation.right)) {
+ && CORD_IS_STRING(right = ((CordRep *)x) -> concatenation.right)) {
/* Merge y into right part of x. */
- if (!IS_STRING(left = ((CordRep *)x) -> concatenation.left)) {
+ if (!CORD_IS_STRING(left = ((CordRep *)x) -> concatenation.left)) {
right_len = lenx - LEN(left);
} else if (((CordRep *)x) -> concatenation.left_len != 0) {
right_len = lenx - ((CordRep *)x) -> concatenation.left_len;
@@ -197,7 +197,7 @@ CORD CORD_cat_char_star(CORD x, const char * y, size_t leny)
lenx -= right_len;
/* Now fall through to concatenate the two pieces: */
}
- if (IS_STRING(x)) {
+ if (CORD_IS_STRING(x)) {
depth = 1;
} else {
depth = DEPTH(x) + 1;
@@ -236,9 +236,9 @@ CORD CORD_cat(CORD x, CORD y)
if (x == CORD_EMPTY) return(y);
if (y == CORD_EMPTY) return(x);
- if (IS_STRING(y)) {
+ if (CORD_IS_STRING(y)) {
return(CORD_cat_char_star(x, y, strlen(y)));
- } else if (IS_STRING(x)) {
+ } else if (CORD_IS_STRING(x)) {
lenx = strlen(x);
depth = DEPTH(y) + 1;
} else {
@@ -356,7 +356,7 @@ CORD CORD_substr_closure(CORD x, size_t i, size_t n, CORD_fn f)
/* A version of CORD_substr that assumes i >= 0, n > 0, and i + n < length(x).*/
CORD CORD_substr_checked(CORD x, size_t i, size_t n)
{
- if (IS_STRING(x)) {
+ if (CORD_IS_STRING(x)) {
if (n > SUBSTR_LIMIT) {
return(CORD_substr_closure(x, i, n, CORD_index_access_fn));
} else {
@@ -448,9 +448,11 @@ CORD CORD_substr(CORD x, size_t i, size_t n)
/* n < 0 is impossible in a correct C implementation, but */
/* quite possible under SunOS 4.X. */
if (i + n > len) n = len - i;
- if (i < 0) ABORT("CORD_substr: second arg. negative");
+# ifndef __STDC__
+ if (i < 0) ABORT("CORD_substr: second arg. negative");
/* Possible only if both client and C implementation are buggy. */
/* But empirically this happens frequently. */
+# endif
return(CORD_substr_checked(x, i, n));
}
@@ -459,7 +461,7 @@ int CORD_iter5(CORD x, size_t i, CORD_iter_fn f1,
CORD_batched_iter_fn f2, void * client_data)
{
if (x == 0) return(0);
- if (IS_STRING(x)) {
+ if (CORD_IS_STRING(x)) {
register const char *p = x+i;
if (*p == '\0') ABORT("2nd arg to CORD_iter5 too big");
@@ -512,14 +514,15 @@ int CORD_iter(CORD x, CORD_iter_fn f1, void * client_data)
int CORD_riter4(CORD x, size_t i, CORD_iter_fn f1, void * client_data)
{
if (x == 0) return(0);
- if (IS_STRING(x)) {
+ if (CORD_IS_STRING(x)) {
register const char *p = x + i;
register char c;
- while (p >= x) {
+ for(;;) {
c = *p;
if (c == '\0') ABORT("2nd arg to CORD_riter4 too big");
if ((*f1)(c, client_data)) return(1);
+ if (p == x) break;
p--;
}
return(0);
@@ -542,12 +545,12 @@ int CORD_riter4(CORD x, size_t i, CORD_iter_fn f1, void * client_data)
register struct Function * f = &(((CordRep *)x) -> function);
register size_t j;
- for (j = i; j >= 0; j--) {
+ for (j = i; ; j--) {
if ((*f1)((*(f -> fn))(j, f -> client_data), client_data)) {
return(1);
}
+ if (j == 0) return(0);
}
- return(0);
}
}
@@ -682,7 +685,7 @@ void CORD_balance_insert(CORD x, size_t len, ForestElement * forest)
{
register int depth;
- if (IS_STRING(x)) {
+ if (CORD_IS_STRING(x)) {
CORD_add_forest(forest, x, len);
} else if (IS_CONCATENATION(x)
&& ((depth = DEPTH(x)) >= MAX_DEPTH
@@ -705,7 +708,7 @@ CORD CORD_balance(CORD x)
register size_t len;
if (x == 0) return(0);
- if (IS_STRING(x)) return(x);
+ if (CORD_IS_STRING(x)) return(x);
if (!min_len_init) CORD_init_min_len();
len = LEN(x);
CORD_init_forest(forest, len);
@@ -730,7 +733,7 @@ void CORD__extend_path(register CORD_pos p)
register size_t top_len = GEN_LEN(top);
/* Fill in the rest of the path. */
- while(!IS_STRING(top) && IS_CONCATENATION(top)) {
+ while(!CORD_IS_STRING(top) && IS_CONCATENATION(top)) {
register struct Concatenation * conc =
&(((CordRep *)top) -> concatenation);
register size_t left_len;
@@ -749,7 +752,7 @@ void CORD__extend_path(register CORD_pos p)
p[0].path_len++;
}
/* Fill in leaf description for fast access. */
- if (IS_STRING(top)) {
+ if (CORD_IS_STRING(top)) {
p[0].cur_leaf = top;
p[0].cur_start = top_pos;
p[0].cur_end = top_pos + top_len;
@@ -778,7 +781,7 @@ void CORD__next(register CORD_pos p)
/* Leaf is not a string or we're at end of leaf */
p[0].cur_pos = cur_pos;
- if (!IS_STRING(leaf)) {
+ if (!CORD_IS_STRING(leaf)) {
/* Function leaf */
register struct Function * f = &(((CordRep *)leaf) -> function);
register size_t start_pos = current_pe -> pe_start_pos;
diff --git a/cord/cordprnt.c b/cord/cordprnt.c
index 1b043152..667560f2 100644
--- a/cord/cordprnt.c
+++ b/cord/cordprnt.c
@@ -20,14 +20,14 @@
/* We assume that void * and char * have the same size. */
/* All this cruft is needed because we want to rely on the underlying */
/* sprintf implementation whenever possible. */
-/* Boehm, May 19, 1994 2:19 pm PDT */
+/* Boehm, September 21, 1995 6:00 pm PDT */
#include "cord.h"
#include "ec.h"
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
-#include "../gc.h"
+#include "gc.h"
#define CONV_SPEC_LEN 50 /* Maximum length of a single */
/* conversion specification. */
@@ -254,11 +254,11 @@ int CORD_vsprintf(CORD * out, CORD format, va_list args)
/* Use standard sprintf to perform conversion */
{
register char * buf;
- int needed_sz;
va_list vsprintf_args = args;
/* The above does not appear to be sanctioned */
/* by the ANSI C standard. */
int max_size = 0;
+ int res;
if (width == VARIABLE) width = va_arg(args, int);
if (prec == VARIABLE) prec = va_arg(args, int);
@@ -302,11 +302,12 @@ int CORD_vsprintf(CORD * out, CORD format, va_list args)
default:
return(-1);
}
- len = (size_t)vsprintf(buf, conv_spec, vsprintf_args);
- if ((char *)len == buf) {
+ res = vsprintf(buf, conv_spec, vsprintf_args);
+ len = (size_t)res;
+ if ((char *)(GC_word)res == buf) {
/* old style vsprintf */
len = strlen(buf);
- } else if (len < 0) {
+ } else if (res < 0) {
return(-1);
}
if (buf != result[0].ec_bufptr) {
diff --git a/cord/cordtest.c b/cord/cordtest.c
index cf1c4a45..1280da9c 100644
--- a/cord/cordtest.c
+++ b/cord/cordtest.c
@@ -10,8 +10,9 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:21 pm PDT */
+/* Boehm, August 24, 1994 11:58 am PDT */
# include "cord.h"
+# include <string.h>
# include <stdio.h>
/* This is a very incomplete test of the cord package. It knows about */
/* a few internals of the package (e.g. when C strings are returned) */
@@ -45,7 +46,7 @@ char id_cord_fn(size_t i, void * client_data)
return((char)i);
}
-test_basics()
+void test_basics()
{
CORD x = "ab";
register int i;
@@ -54,7 +55,7 @@ test_basics()
CORD_pos p;
x = CORD_cat(x,x);
- if (!IS_STRING(x)) ABORT("short cord should usually be a string");
+ if (!CORD_IS_STRING(x)) ABORT("short cord should usually be a string");
if (strcmp(x, "abab") != 0) ABORT("bad CORD_cat result");
for (i = 1; i < 16; i++) {
@@ -78,15 +79,15 @@ test_basics()
if (count != 64*1024 + 2) ABORT("Position based iteration failed");
y = CORD_substr(x, 1023, 5);
- if (!IS_STRING(y)) ABORT("short cord should usually be a string");
+ if (!CORD_IS_STRING(y)) ABORT("short cord should usually be a string");
if (strcmp(y, "babab") != 0) ABORT("bad CORD_substr result");
y = CORD_substr(x, 1024, 8);
- if (!IS_STRING(y)) ABORT("short cord should usually be a string");
+ if (!CORD_IS_STRING(y)) ABORT("short cord should usually be a string");
if (strcmp(y, "abababab") != 0) ABORT("bad CORD_substr result");
y = CORD_substr(x, 128*1024-1, 8);
- if (!IS_STRING(y)) ABORT("short cord should usually be a string");
+ if (!CORD_IS_STRING(y)) ABORT("short cord should usually be a string");
if (strcmp(y, "bc") != 0) ABORT("bad CORD_substr result");
x = CORD_balance(x);
@@ -99,7 +100,7 @@ test_basics()
if (count != 64*1024 + 2) ABORT("CORD_iter5 failed");
y = CORD_substr(x, 1023, 5);
- if (!IS_STRING(y)) ABORT("short cord should usually be a string");
+ if (!CORD_IS_STRING(y)) ABORT("short cord should usually be a string");
if (strcmp(y, "babab") != 0) ABORT("bad CORD_substr result");
y = CORD_from_fn(id_cord_fn, 0, 13);
i = 0;
@@ -112,11 +113,14 @@ test_basics()
if (i != 13) ABORT("Bad apparent length for function node");
}
-test_extras()
+void test_extras()
{
-# ifdef __OS2__
+# if defined(__OS2__)
# define FNAME1 "tmp1"
# define FNAME2 "tmp2"
+# elif defined(AMIGA)
+# define FNAME1 "T:tmp1"
+# define FNAME2 "T:tmp2"
# else
# define FNAME1 "/tmp/cord_test"
# define FNAME2 "/tmp/cord_test2"
@@ -128,6 +132,9 @@ test_extras()
FILE *f;
FILE *f1a, *f1b, *f2;
+ w = CORD_cat(CORD_cat(y,y),y);
+ z = CORD_catn(3,y,y,y);
+ if (CORD_cmp(w,z) != 0) ABORT("CORD_catn comparison wrong");
for (i = 1; i < 100; i++) {
x = CORD_cat(x, y);
}
@@ -182,7 +189,7 @@ test_extras()
}
}
-test_printf()
+void test_printf()
{
CORD result;
char result2[200];
@@ -190,7 +197,7 @@ test_printf()
short s;
CORD x;
- if (CORD_sprintf(&result, "%7.2f%ln", 3.14159, &l) != 7)
+ if (CORD_sprintf(&result, "%7.2f%ln", 3.14159F, &l) != 7)
ABORT("CORD_sprintf failed 1");
if (CORD_cmp(result, " 3.14") != 0)ABORT("CORD_sprintf goofed 1");
if (l != 7) ABORT("CORD_sprintf goofed 2");
@@ -210,6 +217,9 @@ test_printf()
main()
{
+# ifdef THINK_C
+ printf("cordtest:\n");
+# endif
test_basics();
test_extras();
test_printf();
diff --git a/cord/cordxtra.c b/cord/cordxtra.c
index 4aaaf6e7..0e4a478f 100644
--- a/cord/cordxtra.c
+++ b/cord/cordxtra.c
@@ -17,17 +17,26 @@
* implementation. They serve also serve as example client code for
* cord_basics.
*/
-/* Boehm, May 19, 1994 2:18 pm PDT */
+/* Boehm, October 3, 1994 5:10 pm PDT */
# include <stdio.h>
# include <string.h>
# include <stdlib.h>
+# include <stdarg.h>
# include "cord.h"
# include "ec.h"
# define I_HIDE_POINTERS /* So we get access to allocation lock. */
/* We use this for lazy file reading, */
/* so that we remain independent */
/* of the threads primitives. */
-# include "../gc.h"
+# include "gc.h"
+
+/* For now we assume that pointer reads and writes are atomic, */
+/* i.e. another thread always sees the state before or after */
+/* a write. This might be false on a Motorola M68K with */
+/* pointers that are not 32-bit aligned. But there probably */
+/* aren't too many threads packages running on those. */
+# define ATOMIC_WRITE(x,y) (x) = (y)
+# define ATOMIC_READ(x) (*(x))
/* The standard says these are in stdio.h, but they aren't always: */
# ifndef SEEK_SET
@@ -58,6 +67,21 @@ CORD CORD_cat_char(CORD x, char c)
return(CORD_cat_char_star(x, string, 1));
}
+CORD CORD_catn(int nargs, ...)
+{
+ register CORD result = CORD_EMPTY;
+ va_list args;
+ register int i;
+
+ va_start(args, nargs);
+ for (i = 0; i < nargs; i++) {
+ register CORD next = va_arg(args, CORD);
+ result = CORD_cat(result, next);
+ }
+ va_end(args);
+ return(result);
+}
+
typedef struct {
size_t len;
size_t count;
@@ -86,7 +110,7 @@ int CORD_batched_fill_proc(const char * s, void * client_data)
register char * buf = d -> buf;
register const char * t = s;
- while(((d -> buf)[count] = *t++) != '\0') {
+ while((buf[count] = *t++) != '\0') {
count++;
if (count >= max) {
d -> count = count;
@@ -97,7 +121,7 @@ int CORD_batched_fill_proc(const char * s, void * client_data)
return(0);
}
-/* Fill buf with between min and max characters starting at i. */
+/* Fill buf with len characters starting at i. */
/* Assumes len characters are available. */
void CORD_fill_buf(CORD x, size_t i, size_t len, char * buf)
{
@@ -117,7 +141,7 @@ int CORD_cmp(CORD x, CORD y)
if (y == CORD_EMPTY) return(x != CORD_EMPTY);
if (x == CORD_EMPTY) return(-1);
- if (IS_STRING(y) && IS_STRING(x)) return(strcmp(x,y));
+ if (CORD_IS_STRING(y) && CORD_IS_STRING(x)) return(strcmp(x,y));
CORD_set_pos(xpos, x, 0);
CORD_set_pos(ypos, y, 0);
for(;;) {
@@ -211,6 +235,13 @@ char * CORD_to_char_star(CORD x)
return(result);
}
+const char * CORD_to_const_char_star(CORD x)
+{
+ if (x == 0) return("");
+ if (CORD_IS_STRING(x)) return((const char *)x);
+ return(CORD_to_char_star(x));
+}
+
char CORD_fetch(CORD x, size_t i)
{
CORD_pos xpos;
@@ -330,7 +361,7 @@ size_t CORD_str(CORD x, size_t start, CORD s)
register size_t match_pos;
if (s == CORD_EMPTY) return(start);
- if (IS_STRING(s)) {
+ if (CORD_IS_STRING(s)) {
s_start = s;
slen = strlen(s);
} else {
@@ -484,7 +515,7 @@ refill_data * client_data;
}
new_cache -> tag = DIV_LINE_SZ(file_pos);
/* Store barrier goes here. */
- state -> lf_cache[line_no] = new_cache;
+ ATOMIC_WRITE(state -> lf_cache[line_no], new_cache);
state -> lf_current = line_start + LINE_SZ;
return(new_cache->data[MOD_LINE_SZ(file_pos)]);
}
@@ -492,7 +523,9 @@ refill_data * client_data;
char CORD_lf_func(size_t i, void * client_data)
{
register lf_state * state = (lf_state *)client_data;
- register cache_line * cl = state -> lf_cache[DIV_LINE_SZ(MOD_CACHE_SZ(i))];
+ register cache_line * volatile * cl_addr =
+ &(state -> lf_cache[DIV_LINE_SZ(MOD_CACHE_SZ(i))]);
+ register cache_line * cl = (cache_line *)ATOMIC_READ(cl_addr);
if (cl == 0 || cl -> tag != DIV_LINE_SZ(i)) {
/* Cache miss */
@@ -522,6 +555,17 @@ CORD CORD_from_file_lazy_inner(FILE * f, size_t len)
register int i;
if (state == 0) OUT_OF_MEMORY;
+ if (len != 0) {
+ /* Dummy read to force buffer allocation. */
+ /* This greatly increases the probability */
+ /* of avoiding deadlock if buffer allocation */
+ /* is redirected to GC_malloc and the */
+ /* world is multithreaded. */
+ char buf[1];
+
+ (void) fread(buf, 1, 1, f);
+ rewind(f);
+ }
state -> lf_file = f;
for (i = 0; i < CACHE_SZ/LINE_SZ; i++) {
state -> lf_cache[i] = 0;
@@ -533,7 +577,7 @@ CORD CORD_from_file_lazy_inner(FILE * f, size_t len)
CORD CORD_from_file_lazy(FILE * f)
{
- register size_t len;
+ register long len;
if (fseek(f, 0l, SEEK_END) != 0) {
ABORT("Bad fd argument - fseek failed");
@@ -542,14 +586,14 @@ CORD CORD_from_file_lazy(FILE * f)
ABORT("Bad fd argument - ftell failed");
}
rewind(f);
- return(CORD_from_file_lazy_inner(f, len));
+ return(CORD_from_file_lazy_inner(f, (size_t)len));
}
# define LAZY_THRESHOLD (128*1024 + 1)
CORD CORD_from_file(FILE * f)
{
- register size_t len;
+ register long len;
if (fseek(f, 0l, SEEK_END) != 0) {
ABORT("Bad fd argument - fseek failed");
@@ -561,6 +605,6 @@ CORD CORD_from_file(FILE * f)
if (len < LAZY_THRESHOLD) {
return(CORD_from_file_eager(f));
} else {
- return(CORD_from_file_lazy_inner(f, len));
+ return(CORD_from_file_lazy_inner(f, (size_t)len));
}
}
diff --git a/cord/de.c b/cord/de.c
index c2cad50a..20810c94 100644
--- a/cord/de.c
+++ b/cord/de.c
@@ -26,20 +26,50 @@
* The redisplay algorithm doesn't let curses do the scrolling.
* The rule for moving the window over the file is suboptimal.
*/
+/* Boehm, February 6, 1995 12:27 pm PST */
+
/* Boehm, May 19, 1994 2:20 pm PDT */
#include <stdio.h>
-#include "../gc.h"
+#include "gc.h"
#include "cord.h"
-#ifdef WIN32
+
+#ifdef THINK_C
+#define MACINTOSH
+#include <ctype.h>
+#endif
+
+#if defined(__BORLANDC__) && !defined(WIN32)
+ /* If this is DOS or win16, we'll fail anyway. */
+ /* Might as well assume win32. */
+# define WIN32
+#endif
+
+#if defined(WIN32)
# include <windows.h>
# include "de_win.h"
+#elif defined(MACINTOSH)
+# include <console.h>
+/* curses emulation. */
+# define initscr()
+# define endwin()
+# define nonl()
+# define noecho() csetmode(C_NOECHO, stdout)
+# define cbreak() csetmode(C_CBREAK, stdout)
+# define refresh()
+# define addch(c) putchar(c)
+# define standout() cinverse(1, stdout)
+# define standend() cinverse(0, stdout)
+# define move(line,col) cgotoxy(col + 1, line + 1, stdout)
+# define clrtoeol() ccleol(stdout)
+# define de_error(s) { fprintf(stderr, s); getchar(); }
+# define LINES 25
+# define COLS 80
#else
# include <curses.h>
# define de_error(s) { fprintf(stderr, s); sleep(2); }
#endif
#include "de_cmds.h"
-
/* List of line number to position mappings, in descending order. */
/* There may be holes. */
typedef struct LineMapRep {
@@ -174,20 +204,26 @@ int screen_size = 0;
# ifndef WIN32
/* Replace a line in the curses stdscr. All control characters are */
/* displayed as upper case characters in standout mode. This isn't */
-/* terribly appropriate for tabs. */
+/* terribly appropriate for tabs. */
void replace_line(int i, CORD s)
{
register int c;
CORD_pos p;
+ size_t len = CORD_len(s);
if (screen == 0 || LINES > screen_size) {
screen_size = LINES;
screen = (CORD *)GC_MALLOC(screen_size * sizeof(CORD));
}
- if (CORD_cmp(screen[i], s) != 0) {
- move(i,0); clrtoeol();
+# if !defined(MACINTOSH)
/* A gross workaround for an apparent curses bug: */
- if (i == LINES-1) s = CORD_substr(s, 0, CORD_len(s) - 1);
+ if (i == LINES-1 && len == COLS) {
+ s = CORD_substr(s, 0, CORD_len(s) - 1);
+ }
+# endif
+ if (CORD_cmp(screen[i], s) != 0) {
+ move(i, 0); clrtoeol(); move(i,0);
+
CORD_FOR (p, s) {
c = CORD_pos_fetch(p) & 0x7f;
if (iscntrl(c)) {
@@ -263,7 +299,7 @@ void normalize_display()
int old_col = dis_col;
dis_granularity = 1;
- if (LINES > 15 && COLS > 15) dis_granularity = 5;
+ if (LINES > 15 && COLS > 15) dis_granularity = 2;
while (dis_line > line) dis_line -= dis_granularity;
while (dis_col > col) dis_col -= dis_granularity;
while (line >= dis_line + LINES) dis_line += dis_granularity;
@@ -273,8 +309,11 @@ void normalize_display()
}
}
-# ifndef WIN32
-# define move_cursor(x,y) move(y,x)
+# if defined(WIN32)
+# elif defined(MACINTOSH)
+# define move_cursor(x,y) cgotoxy(x + 1, y + 1, stdout)
+# else
+# define move_cursor(x,y) move(y,x)
# endif
/* Adjust display so that cursor is visible; move cursor into position */
@@ -309,7 +348,11 @@ void fix_pos()
}
}
-#ifndef WIN32
+#if defined(WIN32)
+# define beep() Beep(1000 /* Hz */, 300 /* msecs */)
+#elif defined(MACINTOSH)
+# define beep() SysBeep(1)
+#else
/*
* beep() is part of some curses packages and not others.
* We try to match the type of the builtin one, if any.
@@ -323,8 +366,6 @@ void fix_pos()
putc('\007', stderr);
return(0);
}
-#else
-# define beep() Beep(1000 /* Hz */, 300 /* msecs */)
#endif
# define NO_PREFIX -1
@@ -489,6 +530,7 @@ void do_command(int c)
}
/* OS independent initialization */
+
void generic_init(void)
{
FILE * f;
@@ -519,6 +561,13 @@ char ** argv;
{
int c;
CORD initial;
+
+#if defined(MACINTOSH)
+ console_options.title = "\pDumb Editor";
+ cshow(stdout);
+ GC_init();
+ argc = ccommand(&argv);
+#endif
if (argc != 2) goto usage;
arg_file_name = argv[1];
@@ -527,9 +576,15 @@ char ** argv;
noecho(); nonl(); cbreak();
generic_init();
while ((c = getchar()) != QUIT) {
- do_command(c);
+ if (c == EOF) break;
+ do_command(c);
}
done:
+ move(LINES-1, 0);
+ clrtoeol();
+ refresh();
+ nl();
+ echo();
endwin();
exit(0);
usage:
diff --git a/cord/de_win.c b/cord/de_win.c
index 13567517..119d0fa0 100644
--- a/cord/de_win.c
+++ b/cord/de_win.c
@@ -10,7 +10,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:21 pm PDT */
+/* Boehm, February 6, 1995 12:29 pm PST */
/*
* The MS Windows specific part of de.
@@ -94,8 +94,8 @@ int APIENTRY WinMain (HINSTANCE hInstance, HINSTANCE hPrevInstance,
hwnd = CreateWindow (szAppName,
FullAppName,
WS_OVERLAPPEDWINDOW | WS_CAPTION, /* Window style */
- CW_USEDEFAULT, 0, /* default pos. */,
- CW_USEDEFAULT, 0, /* default width, height */,
+ CW_USEDEFAULT, 0, /* default pos. */
+ CW_USEDEFAULT, 0, /* default width, height */
NULL, /* No parent */
NULL, /* Window class menu */
hInstance, NULL);
diff --git a/cord/gc.h b/cord/gc.h
new file mode 100644
index 00000000..ab7944ef
--- /dev/null
+++ b/cord/gc.h
@@ -0,0 +1,583 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, October 9, 1995 1:14 pm PDT */
+
+/*
+ * Note that this defines a large number of tuning hooks, which can
+ * safely be ignored in nearly all cases. For normal use it suffices
+ * to call only GC_MALLOC and perhaps GC_REALLOC.
+ * For better performance, also look at GC_MALLOC_ATOMIC, and
+ * GC_enable_incremental. If you need an action to be performed
+ * immediately before an object is collected, look at GC_register_finalizer.
+ * If you are using Solaris threads, look at the end of this file.
+ * Everything else is best ignored unless you encounter performance
+ * problems.
+ */
+
+#ifndef _GC_H
+
+# define _GC_H
+
+# if defined(__STDC__) || defined(__cplusplus)
+# define GC_PROTO(args) args
+ typedef void * GC_PTR;
+# else
+# define GC_PROTO(args) ()
+ typedef char * GC_PTR;
+# endif
+
+# ifdef __cplusplus
+ extern "C" {
+# endif
+
+# include <stddef.h>
+
+/* Define word and signed_word to be unsigned and signed types of the */
+/* size as char * or void *. There seems to be no way to do this */
+/* even semi-portably. The following is probably no better/worse */
+/* than almost anything else. */
+/* The ANSI standard suggests that size_t and ptr_diff_t might be */
+/* better choices. But those appear to have incorrect definitions */
+/* on may systems. Notably "typedef int size_t" seems to be both */
+/* frequent and WRONG. */
+typedef unsigned long GC_word;
+typedef long GC_signed_word;
+
+/* Public read-only variables */
+
+extern GC_word GC_gc_no;/* Counter incremented per collection. */
+ /* Includes empty GCs at startup. */
+
+
+/* Public R/W variables */
+
+extern int GC_quiet; /* Disable statistics output. Only matters if */
+ /* collector has been compiled with statistics */
+ /* enabled. This involves a performance cost, */
+ /* and is thus not the default. */
+
+extern int GC_dont_gc; /* Dont collect unless explicitly requested, e.g. */
+ /* beacuse it's not safe. */
+
+extern int GC_dont_expand;
+ /* Dont expand heap unless explicitly requested */
+ /* or forced to. */
+
+extern int GC_full_freq; /* Number of partial collections between */
+ /* full collections. Matters only if */
+ /* GC_incremental is set. */
+
+extern GC_word GC_non_gc_bytes;
+ /* Bytes not considered candidates for collection. */
+ /* Used only to control scheduling of collections. */
+
+extern GC_word GC_free_space_divisor;
+ /* We try to make sure that we allocate at */
+ /* least N/GC_free_space_divisor bytes between */
+ /* collections, where N is the heap size plus */
+ /* a rough estimate of the root set size. */
+ /* Initially, GC_free_space_divisor = 4. */
+ /* Increasing its value will use less space */
+ /* but more collection time. Decreasing it */
+ /* will appreciably decrease collection time */
+ /* at the expense of space. */
+ /* GC_free_space_divisor = 1 will effectively */
+ /* disable collections. */
+
+
+/* Public procedures */
+/*
+ * general purpose allocation routines, with roughly malloc calling conv.
+ * The atomic versions promise that no relevant pointers are contained
+ * in the object. The nonatomic versions guarantee that the new object
+ * is cleared. GC_malloc_stubborn promises that no changes to the object
+ * will occur after GC_end_stubborn_change has been called on the
+ * result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object
+ * that is scanned for pointers to collectable objects, but is not itself
+ * collectable. GC_malloc_uncollectable and GC_free called on the resulting
+ * object implicitly update GC_non_gc_bytes appropriately.
+ */
+extern GC_PTR GC_malloc GC_PROTO((size_t size_in_bytes));
+extern GC_PTR GC_malloc_atomic GC_PROTO((size_t size_in_bytes));
+extern GC_PTR GC_malloc_uncollectable GC_PROTO((size_t size_in_bytes));
+extern GC_PTR GC_malloc_stubborn GC_PROTO((size_t size_in_bytes));
+
+/* Explicitly deallocate an object. Dangerous if used incorrectly. */
+/* Requires a pointer to the base of an object. */
+/* If the argument is stubborn, it should not be changeable when freed. */
+/* An object should not be enable for finalization when it is */
+/* explicitly deallocated. */
+/* GC_free(0) is a no-op, as required by ANSI C for free. */
+extern void GC_free GC_PROTO((GC_PTR object_addr));
+
+/*
+ * Stubborn objects may be changed only if the collector is explicitly informed.
+ * The collector is implicitly informed of coming change when such
+ * an object is first allocated. The following routines inform the
+ * collector that an object will no longer be changed, or that it will
+ * once again be changed. Only nonNIL pointer stores into the object
+ * are considered to be changes. The argument to GC_end_stubborn_change
+ * must be exacly the value returned by GC_malloc_stubborn or passed to
+ * GC_change_stubborn. (In the second case it may be an interior pointer
+ * within 512 bytes of the beginning of the objects.)
+ * There is a performance penalty for allowing more than
+ * one stubborn object to be changed at once, but it is acceptable to
+ * do so. The same applies to dropping stubborn objects that are still
+ * changeable.
+ */
+extern void GC_change_stubborn GC_PROTO((GC_PTR));
+extern void GC_end_stubborn_change GC_PROTO((GC_PTR));
+
+/* Return a pointer to the base (lowest address) of an object given */
+/* a pointer to a location within the object. */
+/* Return 0 if displaced_pointer doesn't point to within a valid */
+/* object. */
+extern GC_PTR GC_base GC_PROTO((GC_PTR displaced_pointer));
+
+/* Given a pointer to the base of an object, return its size in bytes. */
+/* The returned size may be slightly larger than what was originally */
+/* requested. */
+extern size_t GC_size GC_PROTO((GC_PTR object_addr));
+
+/* For compatibility with C library. This is occasionally faster than */
+/* a malloc followed by a bcopy. But if you rely on that, either here */
+/* or with the standard C library, your code is broken. In my */
+/* opinion, it shouldn't have been invented, but now we're stuck. -HB */
+/* The resulting object has the same kind as the original. */
+/* If the argument is stubborn, the result will have changes enabled. */
+/* It is an error to have changes enabled for the original object. */
+/* Follows ANSI comventions for NULL old_object. */
+extern GC_PTR GC_realloc GC_PROTO((GC_PTR old_object,
+ size_t new_size_in_bytes));
+
+/* Explicitly increase the heap size. */
+/* Returns 0 on failure, 1 on success. */
+extern int GC_expand_hp GC_PROTO((size_t number_of_bytes));
+
+/* Limit the heap size to n bytes. Useful when you're debugging, */
+/* especially on systems that don't handle running out of memory well. */
+/* n == 0 ==> unbounded. This is the default. */
+extern void GC_set_max_heap_size GC_PROTO((GC_word n));
+
+/* Clear the set of root segments. Wizards only. */
+extern void GC_clear_roots GC_PROTO((void));
+
+/* Add a root segment. Wizards only. */
+extern void GC_add_roots GC_PROTO((char * low_address,
+ char * high_address_plus_1));
+
+/* Add a displacement to the set of those considered valid by the */
+/* collector. GC_register_displacement(n) means that if p was returned */
+/* by GC_malloc, then (char *)p + n will be considered to be a valid */
+/* pointer to n. N must be small and less than the size of p. */
+/* (All pointers to the interior of objects from the stack are */
+/* considered valid in any case. This applies to heap objects and */
+/* static data.) */
+/* Preferably, this should be called before any other GC procedures. */
+/* Calling it later adds to the probability of excess memory */
+/* retention. */
+/* This is a no-op if the collector was compiled with recognition of */
+/* arbitrary interior pointers enabled, which is now the default. */
+void GC_register_displacement GC_PROTO((GC_word n));
+
+/* The following version should be used if any debugging allocation is */
+/* being done. */
+void GC_debug_register_displacement GC_PROTO((GC_word n));
+
+/* Explicitly trigger a full, world-stop collection. */
+void GC_gcollect GC_PROTO((void));
+
+/* Trigger a full world-stopped collection. Abort the collection if */
+/* and when stop_func returns a nonzero value. Stop_func will be */
+/* called frequently, and should be reasonably fast. This works even */
+/* if virtual dirty bits, and hence incremental collection is not */
+/* available for this architecture. Collections can be aborted faster */
+/* than normal pause times for incremental collection. However, */
+/* aborted collections do no useful work; the next collection needs */
+/* to start from the beginning. */
+typedef int (* GC_stop_func) GC_PROTO((void));
+int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
+
+/* Return the number of bytes in the heap. Excludes collector private */
+/* data structures. Includes empty blocks and fragmentation loss. */
+/* Includes some pages that were allocated but never written. */
+size_t GC_get_heap_size GC_PROTO((void));
+
+/* Return the number of bytes allocated since the last collection. */
+size_t GC_get_bytes_since_gc GC_PROTO((void));
+
+/* Enable incremental/generational collection. */
+/* Not advisable unless dirty bits are */
+/* available or most heap objects are */
+/* pointerfree(atomic) or immutable. */
+/* Don't use in leak finding mode. */
+/* Ignored if GC_dont_gc is true. */
+void GC_enable_incremental GC_PROTO((void));
+
+/* Perform some garbage collection work, if appropriate. */
+/* Return 0 if there is no more work to be done. */
+/* Typically performs an amount of work corresponding roughly */
+/* to marking from one page. May do more work if further */
+/* progress requires it, e.g. if incremental collection is */
+/* disabled. It is reasonable to call this in a wait loop */
+/* until it returns 0. */
+int GC_collect_a_little GC_PROTO((void));
+
+/* Allocate an object of size lb bytes. The client guarantees that */
+/* as long as the object is live, it will be referenced by a pointer */
+/* that points to somewhere within the first 256 bytes of the object. */
+/* (This should normally be declared volatile to prevent the compiler */
+/* from invalidating this assertion.) This routine is only useful */
+/* if a large array is being allocated. It reduces the chance of */
+/* accidentally retaining such an array as a result of scanning an */
+/* integer that happens to be an address inside the array. (Actually, */
+/* it reduces the chance of the allocator not finding space for such */
+/* an array, since it will try hard to avoid introducing such a false */
+/* reference.) On a SunOS 4.X or MS Windows system this is recommended */
+/* for arrays likely to be larger than 100K or so. For other systems, */
+/* or if the collector is not configured to recognize all interior */
+/* pointers, the threshold is normally much higher. */
+extern GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
+extern GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
+
+/* Debugging (annotated) allocation. GC_gcollect will check */
+/* objects allocated in this way for overwrites, etc. */
+extern GC_PTR GC_debug_malloc
+ GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+extern GC_PTR GC_debug_malloc_atomic
+ GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+extern GC_PTR GC_debug_malloc_uncollectable
+ GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+extern GC_PTR GC_debug_malloc_stubborn
+ GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+extern void GC_debug_free GC_PROTO((GC_PTR object_addr));
+extern GC_PTR GC_debug_realloc
+ GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
+ char * descr_string, int descr_int));
+
+void GC_debug_change_stubborn GC_PROTO((GC_PTR));
+void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
+# ifdef GC_DEBUG
+# define GC_MALLOC(sz) GC_debug_malloc(sz, __FILE__, __LINE__)
+# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, __FILE__, __LINE__)
+# define GC_MALLOC_UNCOLLECTABLE(sz) GC_debug_malloc_uncollectable(sz, \
+ __FILE__, __LINE__)
+# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, __FILE__, \
+ __LINE__)
+# define GC_FREE(p) GC_debug_free(p)
+# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
+ GC_register_finalizer(GC_base(p), GC_debug_invoke_finalizer, \
+ GC_make_closure(f,d), of, od)
+# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
+ GC_register_finalizer_ignore_self( \
+ GC_base(p), GC_debug_invoke_finalizer, \
+ GC_make_closure(f,d), of, od)
+# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, __FILE__, \
+ __LINE__)
+# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
+# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
+# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
+ GC_general_register_disappearing_link(link, GC_base(obj))
+# define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n)
+# else
+# define GC_MALLOC(sz) GC_malloc(sz)
+# define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
+# define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz)
+# define GC_REALLOC(old, sz) GC_realloc(old, sz)
+# define GC_FREE(p) GC_free(p)
+# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
+ GC_register_finalizer(p, f, d, of, od)
+# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
+ GC_register_finalizer_ignore_self(p, f, d, of, od)
+# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
+# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
+# define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
+# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
+ GC_general_register_disappearing_link(link, obj)
+# define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n)
+# endif
+/* The following are included because they are often convenient, and */
+/* reduce the chance for a misspecifed size argument. But calls may */
+/* expand to something syntactically incorrect if t is a complicated */
+/* type expression. */
+# define GC_NEW(t) (t *)GC_MALLOC(sizeof (t))
+# define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t))
+# define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t))
+# define GC_NEW_UNCOLLECTABLE(t) (t *)GC_MALLOC_UNCOLLECTABLE(sizeof (t))
+
+/* Finalization. Some of these primitives are grossly unsafe. */
+/* The idea is to make them both cheap, and sufficient to build */
+/* a safer layer, closer to PCedar finalization. */
+/* The interface represents my conclusions from a long discussion */
+/* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
+/* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
+/* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
+typedef void (*GC_finalization_proc)
+ GC_PROTO((GC_PTR obj, GC_PTR client_data));
+
+extern void GC_register_finalizer
+ GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
+ GC_finalization_proc *ofn, GC_PTR *ocd));
+ /* When obj is no longer accessible, invoke */
+ /* (*fn)(obj, cd). If a and b are inaccessible, and */
+ /* a points to b (after disappearing links have been */
+ /* made to disappear), then only a will be */
+ /* finalized. (If this does not create any new */
+ /* pointers to b, then b will be finalized after the */
+ /* next collection.) Any finalizable object that */
+ /* is reachable from itself by following one or more */
+ /* pointers will not be finalized (or collected). */
+ /* Thus cycles involving finalizable objects should */
+ /* be avoided, or broken by disappearing links. */
+ /* Fn should terminate as quickly as possible, and */
+ /* defer extended computation. */
+ /* All but the last finalizer registered for an object */
+ /* is ignored. */
+ /* Finalization may be removed by passing 0 as fn. */
+ /* The old finalizer and client data are stored in */
+ /* *ofn and *ocd. */
+ /* Fn is never invoked on an accessible object, */
+ /* provided hidden pointers are converted to real */
+ /* pointers only if the allocation lock is held, and */
+ /* such conversions are not performed by finalization */
+ /* routines. */
+ /* If GC_register_finalizer is aborted as a result of */
+ /* a signal, the object may be left with no */
+ /* finalization, even if neither the old nor new */
+ /* finalizer were NULL. */
+ /* Obj should be the nonNULL starting address of an */
+ /* object allocated by GC_malloc or friends. */
+ /* Note that any garbage collectable object referenced */
+ /* by cd will be considered accessible until the */
+ /* finalizer is invoked. */
+
+/* Another versions of the above follow. It ignores */
+/* self-cycles, i.e. pointers from a finalizable object to */
+/* itself. There is a stylistic argument that this is wrong, */
+/* but it's unavoidable for C++, since the compiler may */
+/* silently introduce these. It's also benign in that specific */
+/* case. */
+extern void GC_register_finalizer_ignore_self
+ GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
+ GC_finalization_proc *ofn, GC_PTR *ocd));
+
+/* The following routine may be used to break cycles between */
+/* finalizable objects, thus causing cyclic finalizable */
+/* objects to be finalized in the correct order. Standard */
+/* use involves calling GC_register_disappearing_link(&p), */
+/* where p is a pointer that is not followed by finalization */
+/* code, and should not be considered in determining */
+/* finalization order. */
+extern int GC_register_disappearing_link GC_PROTO((GC_PTR * /* link */));
+ /* Link should point to a field of a heap allocated */
+ /* object obj. *link will be cleared when obj is */
+ /* found to be inaccessible. This happens BEFORE any */
+ /* finalization code is invoked, and BEFORE any */
+ /* decisions about finalization order are made. */
+ /* This is useful in telling the finalizer that */
+ /* some pointers are not essential for proper */
+ /* finalization. This may avoid finalization cycles. */
+ /* Note that obj may be resurrected by another */
+ /* finalizer, and thus the clearing of *link may */
+ /* be visible to non-finalization code. */
+ /* There's an argument that an arbitrary action should */
+ /* be allowed here, instead of just clearing a pointer. */
+ /* But this causes problems if that action alters, or */
+ /* examines connectivity. */
+ /* Returns 1 if link was already registered, 0 */
+ /* otherwise. */
+ /* Only exists for backward compatibility. See below: */
+
+extern int GC_general_register_disappearing_link
+ GC_PROTO((GC_PTR * /* link */, GC_PTR obj));
+ /* A slight generalization of the above. *link is */
+ /* cleared when obj first becomes inaccessible. This */
+ /* can be used to implement weak pointers easily and */
+ /* safely. Typically link will point to a location */
+ /* holding a disguised pointer to obj. (A pointer */
+ /* inside an "atomic" object is effectively */
+ /* disguised.) In this way soft */
+ /* pointers are broken before any object */
+ /* reachable from them are finalized. Each link */
+ /* May be registered only once, i.e. with one obj */
+ /* value. This was added after a long email discussion */
+ /* with John Ellis. */
+ /* Obj must be a pointer to the first word of an object */
+ /* we allocated. It is unsafe to explicitly deallocate */
+ /* the object containing link. Explicitly deallocating */
+ /* obj may or may not cause link to eventually be */
+ /* cleared. */
+extern int GC_unregister_disappearing_link GC_PROTO((GC_PTR * /* link */));
+ /* Returns 0 if link was not actually registered. */
+ /* Undoes a registration by either of the above two */
+ /* routines. */
+
+/* Auxiliary fns to make finalization work correctly with displaced */
+/* pointers introduced by the debugging allocators. */
+extern GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data));
+extern void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data));
+
+/* GC_set_warn_proc can be used to redirect or filter warning messages. */
+typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg));
+extern GC_warn_proc GC_set_warn_proc GC_PROTO((GC_warn_proc p));
+ /* Returns old warning procedure. */
+
+/* The following is intended to be used by a higher level */
+/* (e.g. cedar-like) finalization facility. It is expected */
+/* that finalization code will arrange for hidden pointers to */
+/* disappear. Otherwise objects can be accessed after they */
+/* have been collected. */
+/* Note that putting pointers in atomic objects or in */
+/* nonpointer slots of "typed" objects is equivalent to */
+/* disguising them in this way, and may have other advantages. */
+# if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS)
+ typedef GC_word GC_hidden_pointer;
+# define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
+# define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p)))
+ /* Converting a hidden pointer to a real pointer requires verifying */
+ /* that the object still exists. This involves acquiring the */
+ /* allocator lock to avoid a race with the collector. */
+# endif /* I_HIDE_POINTERS */
+
+typedef GC_PTR (*GC_fn_type) GC_PROTO((GC_PTR client_data));
+extern GC_PTR GC_call_with_alloc_lock
+ GC_PROTO((GC_fn_type fn, GC_PTR client_data));
+
+/* Check that p and q point to the same object. */
+/* Fail conspicuously if they don't. */
+/* Returns the first argument. */
+/* Succeeds if neither p nor q points to the heap. */
+/* May succeed if both p and q point to between heap objects. */
+extern GC_PTR GC_same_obj GC_PROTO((GC_PTR p, GC_PTR q));
+
+/* Checked pointer pre- and post- increment operations. Note that */
+/* the second argument is in units of bytes, not multiples of the */
+/* object size. This should either be invoked from a macro, or the */
+/* call should be automatically generated. */
+extern GC_PTR GC_pre_incr GC_PROTO((GC_PTR *p, size_t how_much));
+extern GC_PTR GC_post_incr GC_PROTO((GC_PTR *p, size_t how_much));
+
+/* Check that p is visible */
+/* to the collector as a possibly pointer containing location. */
+/* If it isn't fail conspicuously. */
+/* Returns the argument in all cases. May erroneously succeed */
+/* in hard cases. (This is intended for debugging use with */
+/* untyped allocations. The idea is that it should be possible, though */
+/* slow, to add such a call to all indirect pointer stores.) */
+/* Currently useless for multithreaded worlds. */
+extern GC_PTR GC_is_visible GC_PROTO((GC_PTR p));
+
+/* Check that if p is a pointer to a heap page, then it points to */
+/* a valid displacement within a heap object. */
+/* Fail conspicuously if this property does not hold. */
+/* Uninteresting with ALL_INTERIOR_POINTERS. */
+/* Always returns its argument. */
+extern GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR p));
+
+/* Safer, but slow, pointer addition. Probably useful mainly with */
+/* a preprocessor. Useful only for heap pointers. */
+#ifdef GC_DEBUG
+# define GC_PTR_ADD3(x, n, type_of_result) \
+ ((type_of_result)GC_same_obj((x)+(n), (x)))
+# define GC_PRE_INCR3(x, n, type_of_result) \
+ ((type_of_result)GC_pre_incr(&(x), (n)*sizeof(*x))
+# define GC_POST_INCR2(x, type_of_result) \
+ ((type_of_result)GC_post_incr(&(x), sizeof(*x))
+# ifdef __GNUC__
+# define GC_PTR_ADD(x, n) \
+ GC_PTR_ADD3(x, n, typeof(x))
+# define GC_PRE_INCR(x, n) \
+ GC_PRE_INCR3(x, n, typeof(x))
+# define GC_POST_INCR(x, n) \
+ GC_POST_INCR3(x, typeof(x))
+# else
+ /* We can't do this right without typeof, which ANSI */
+ /* decided was not sufficiently useful. Repeatedly */
+ /* mentioning the arguments seems too dangerous to be */
+ /* useful. So does not casting the result. */
+# define GC_PTR_ADD(x, n) ((x)+(n))
+# endif
+#else /* !GC_DEBUG */
+# define GC_PTR_ADD3(x, n, type_of_result) ((x)+(n))
+# define GC_PTR_ADD(x, n) ((x)+(n))
+# define GC_PRE_INCR3(x, n, type_of_result) ((x) += (n))
+# define GC_PRE_INCR(x, n) ((x) += (n))
+# define GC_POST_INCR2(x, n, type_of_result) ((x)++)
+# define GC_POST_INCR(x, n) ((x)++)
+#endif
+
+/* Safer assignment of a pointer to a nonstack location. */
+#ifdef GC_DEBUG
+# ifdef __STDC__
+# define GC_PTR_STORE(p, q) \
+ (*(void **)GC_is_visible(p) = GC_is_valid_displacement(q))
+# else
+# define GC_PTR_STORE(p, q) \
+ (*(char **)GC_is_visible(p) = GC_is_valid_displacement(q))
+# endif
+#else /* !GC_DEBUG */
+# define GC_PTR_STORE(p, q) *((p) = (q))
+#endif
+
+
+#ifdef SOLARIS_THREADS
+/* We need to intercept calls to many of the threads primitives, so */
+/* that we can locate thread stacks and stop the world. */
+/* Note also that the collector cannot see thread specific data. */
+/* Thread specific data should generally consist of pointers to */
+/* uncollectable objects, which are deallocated using the destructor */
+/* facility in thr_keycreate. */
+# include <thread.h>
+# include <signal.h>
+ int GC_thr_create(void *stack_base, size_t stack_size,
+ void *(*start_routine)(void *), void *arg, long flags,
+ thread_t *new_thread);
+ int GC_thr_join(thread_t wait_for, thread_t *departed, void **status);
+ int GC_thr_suspend(thread_t target_thread);
+ int GC_thr_continue(thread_t target_thread);
+ void * GC_dlopen(const char *path, int mode);
+
+# define thr_create GC_thr_create
+# define thr_join GC_thr_join
+# define thr_suspend GC_thr_suspend
+# define thr_continue GC_thr_continue
+# define dlopen GC_dlopen
+
+/* This returns a list of objects, linked through their first */
+/* word. Its use can greatly reduce lock contention problems, since */
+/* the allocation lock can be acquired and released many fewer times. */
+GC_PTR GC_malloc_many(size_t lb);
+#define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
+ /* in returned list. */
+
+#endif /* SOLARIS_THREADS */
+
+/*
+ * If you are planning on putting
+ * the collector in a SunOS 5 dynamic library, you need to call GC_INIT()
+ * from the statically loaded program section.
+ * This circumvents a Solaris 2.X (X<=4) linker bug.
+ */
+#if defined(sparc) || defined(__sparc)
+# define GC_INIT() { extern end, etext; \
+ GC_noop(&end, &etext); }
+#else
+# define GC_INIT()
+#endif
+
+#ifdef __cplusplus
+ } /* end of extern "C" */
+#endif
+
+#endif /* _GC_H */
diff --git a/cord/cord_pos.h b/cord/private/cord_pos.h
index a07d07f6..a07d07f6 100644
--- a/cord/cord_pos.h
+++ b/cord/private/cord_pos.h
diff --git a/dbg_mlc.c b/dbg_mlc.c
index 87275d66..432d729e 100644
--- a/dbg_mlc.c
+++ b/dbg_mlc.c
@@ -1,6 +1,6 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -11,33 +11,17 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:07 pm PDT */
+/* Boehm, October 9, 1995 1:16 pm PDT */
# include "gc_priv.h"
/* Do we want to and know how to save the call stack at the time of */
/* an allocation? How much space do we want to use in each object? */
-# if defined(SPARC) && defined(SUNOS4)
-# include <machine/frame.h>
-# define SAVE_CALL_CHAIN
-# define NFRAMES 5 /* Number of frames to save. */
-# define NARGS 2 /* Mumber of arguments to save for each call. */
-# if NARGS > 6
- --> We only know how to to get the first 6 arguments
-# endif
-# endif
-
# define START_FLAG ((word)0xfedcedcb)
# define END_FLAG ((word)0xbcdecdef)
/* Stored both one past the end of user object, and one before */
/* the end of the object as seen by the allocator. */
-#ifdef SAVE_CALL_CHAIN
- struct callinfo {
- word ci_pc;
- word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
- };
-#endif
/* Object header */
typedef struct {
@@ -56,50 +40,6 @@ typedef struct {
#undef ROUNDED_UP_WORDS
#define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1) - 1)
-#if defined(SPARC) && defined(SUNOS4)
-/* Fill in the pc and argument information for up to NFRAMES of my */
-/* callers. Ignore my frame and my callers frame. */
-void GC_save_callers (info)
-struct callinfo info[NFRAMES];
-{
- struct frame *frame;
- struct frame *fp;
- int nframes = 0;
- word GC_save_regs_in_stack();
-
- frame = (struct frame *) GC_save_regs_in_stack ();
-
- for (fp = frame -> fr_savfp; fp != 0 && nframes < NFRAMES;
- fp = fp -> fr_savfp, nframes++) {
- register int i;
-
- info[nframes].ci_pc = fp->fr_savpc;
- for (i = 0; i < NARGS; i++) {
- info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
- }
- }
- if (nframes < NFRAMES) info[nframes].ci_pc = 0;
-}
-
-void GC_print_callers (info)
-struct callinfo info[NFRAMES];
-{
- register int i,j;
-
- GC_err_printf0("\tCall chain at allocation:\n");
- for (i = 0; i < NFRAMES; i++) {
- if (info[i].ci_pc == 0) break;
- GC_err_printf1("\t##PC##= 0x%X\n\t\targs: ", info[i].ci_pc);
- for (j = 0; j < NARGS; j++) {
- if (j != 0) GC_err_printf0(", ");
- GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
- ~(info[i].ci_arg[j]));
- }
- GC_err_printf0("\n");
- }
-}
-
-#endif /* SPARC & SUNOS4 */
#ifdef SAVE_CALL_CHAIN
# define ADD_CALL_CHAIN(base) GC_save_callers(((oh *)(base)) -> oh_ci)
@@ -204,7 +144,7 @@ ptr_t p, clobbered_addr;
if (clobbered_addr <= (ptr_t)(&(ohdr -> oh_sz))
|| ohdr -> oh_string == 0) {
GC_err_printf1("<smashed>, appr. sz = %ld)\n",
- BYTES_TO_WORDS(GC_size((ptr_t)ohdr)));
+ GC_size((ptr_t)ohdr) - DEBUG_BYTES);
} else {
if (ohdr -> oh_string[0] == '\0') {
GC_err_puts("EMPTY(smashed?)");
@@ -213,6 +153,7 @@ ptr_t p, clobbered_addr;
}
GC_err_printf2(":%ld, sz=%ld)\n", (unsigned long)(ohdr -> oh_int),
(unsigned long)(ohdr -> oh_sz));
+ PRINT_CALL_CHAIN(ohdr);
}
}
@@ -225,16 +166,27 @@ void GC_start_debugging()
GC_register_displacement((word)sizeof(oh));
}
+# if defined(__STDC__) || defined(__cplusplus)
+ void GC_debug_register_displacement(GC_word offset)
+# else
+ void GC_debug_register_displacement(offset)
+ GC_word offset;
+# endif
+{
+ GC_register_displacement(offset);
+ GC_register_displacement((word)sizeof(oh) + offset);
+}
+
# ifdef __STDC__
- extern_ptr_t GC_debug_malloc(size_t lb, char * s, int i)
+ GC_PTR GC_debug_malloc(size_t lb, char * s, int i)
# else
- extern_ptr_t GC_debug_malloc(lb, s, i)
+ GC_PTR GC_debug_malloc(lb, s, i)
size_t lb;
char * s;
int i;
# endif
{
- extern_ptr_t result = GC_malloc(lb + DEBUG_BYTES);
+ GC_PTR result = GC_malloc(lb + DEBUG_BYTES);
if (result == 0) {
GC_err_printf1("GC_debug_malloc(%ld) returning NIL (",
@@ -252,15 +204,15 @@ void GC_start_debugging()
#ifdef STUBBORN_ALLOC
# ifdef __STDC__
- extern_ptr_t GC_debug_malloc_stubborn(size_t lb, char * s, int i)
+ GC_PTR GC_debug_malloc_stubborn(size_t lb, char * s, int i)
# else
- extern_ptr_t GC_debug_malloc_stubborn(lb, s, i)
+ GC_PTR GC_debug_malloc_stubborn(lb, s, i)
size_t lb;
char * s;
int i;
# endif
{
- extern_ptr_t result = GC_malloc_stubborn(lb + DEBUG_BYTES);
+ GC_PTR result = GC_malloc_stubborn(lb + DEBUG_BYTES);
if (result == 0) {
GC_err_printf1("GC_debug_malloc(%ld) returning NIL (",
@@ -277,9 +229,9 @@ void GC_start_debugging()
}
void GC_debug_change_stubborn(p)
-extern_ptr_t p;
+GC_PTR p;
{
- register extern_ptr_t q = GC_base(p);
+ register GC_PTR q = GC_base(p);
register hdr * hhdr;
if (q == 0) {
@@ -297,9 +249,9 @@ extern_ptr_t p;
}
void GC_debug_end_stubborn_change(p)
-extern_ptr_t p;
+GC_PTR p;
{
- register extern_ptr_t q = GC_base(p);
+ register GC_PTR q = GC_base(p);
register hdr * hhdr;
if (q == 0) {
@@ -319,15 +271,15 @@ extern_ptr_t p;
#endif /* STUBBORN_ALLOC */
# ifdef __STDC__
- extern_ptr_t GC_debug_malloc_atomic(size_t lb, char * s, int i)
+ GC_PTR GC_debug_malloc_atomic(size_t lb, char * s, int i)
# else
- extern_ptr_t GC_debug_malloc_atomic(lb, s, i)
+ GC_PTR GC_debug_malloc_atomic(lb, s, i)
size_t lb;
char * s;
int i;
# endif
{
- extern_ptr_t result = GC_malloc_atomic(lb + DEBUG_BYTES);
+ GC_PTR result = GC_malloc_atomic(lb + DEBUG_BYTES);
if (result == 0) {
GC_err_printf1("GC_debug_malloc_atomic(%ld) returning NIL (",
@@ -344,15 +296,15 @@ extern_ptr_t p;
}
# ifdef __STDC__
- extern_ptr_t GC_debug_malloc_uncollectable(size_t lb, char * s, int i)
+ GC_PTR GC_debug_malloc_uncollectable(size_t lb, char * s, int i)
# else
- extern_ptr_t GC_debug_malloc_uncollectable(lb, s, i)
+ GC_PTR GC_debug_malloc_uncollectable(lb, s, i)
size_t lb;
char * s;
int i;
# endif
{
- extern_ptr_t result = GC_malloc_uncollectable(lb + DEBUG_BYTES);
+ GC_PTR result = GC_malloc_uncollectable(lb + DEBUG_BYTES);
if (result == 0) {
GC_err_printf1("GC_debug_malloc_uncollectable(%ld) returning NIL (",
@@ -370,13 +322,13 @@ extern_ptr_t p;
# ifdef __STDC__
- void GC_debug_free(extern_ptr_t p)
+ void GC_debug_free(GC_PTR p)
# else
void GC_debug_free(p)
- extern_ptr_t p;
+ GC_PTR p;
# endif
{
- register extern_ptr_t base = GC_base(p);
+ register GC_PTR base = GC_base(p);
register ptr_t clobbered;
if (base == 0) {
@@ -408,18 +360,18 @@ extern_ptr_t p;
}
# ifdef __STDC__
- extern_ptr_t GC_debug_realloc(extern_ptr_t p, size_t lb, char *s, int i)
+ GC_PTR GC_debug_realloc(GC_PTR p, size_t lb, char *s, int i)
# else
- extern_ptr_t GC_debug_realloc(p, lb, s, i)
- extern_ptr_t p;
+ GC_PTR GC_debug_realloc(p, lb, s, i)
+ GC_PTR p;
size_t lb;
char *s;
int i;
# endif
{
- register extern_ptr_t base = GC_base(p);
+ register GC_PTR base = GC_base(p);
register ptr_t clobbered;
- register extern_ptr_t result = GC_debug_malloc(lb, s, i);
+ register GC_PTR result = GC_debug_malloc(lb, s, i);
register size_t copy_sz = lb;
register size_t old_sz;
register hdr * hhdr;
@@ -503,20 +455,25 @@ word dummy;
/* I hold the allocation lock. Normally called by collector. */
void GC_check_heap_proc()
{
+# ifndef SMALL_CONFIG
+ if (sizeof(oh) & (2 * sizeof(word) - 1) != 0) {
+ ABORT("Alignment problem: object header has inappropriate size\n");
+ }
+# endif
GC_apply_to_all_blocks(GC_check_heap_block, (word)0);
}
struct closure {
GC_finalization_proc cl_fn;
- extern_ptr_t cl_data;
+ GC_PTR cl_data;
};
# ifdef __STDC__
void * GC_make_closure(GC_finalization_proc fn, void * data)
# else
- extern_ptr_t GC_make_closure(fn, data)
+ GC_PTR GC_make_closure(fn, data)
GC_finalization_proc fn;
- extern_ptr_t data;
+ GC_PTR data;
# endif
{
struct closure * result =
@@ -524,7 +481,7 @@ struct closure {
result -> cl_fn = fn;
result -> cl_data = data;
- return((extern_ptr_t)result);
+ return((GC_PTR)result);
}
# ifdef __STDC__
@@ -537,6 +494,6 @@ struct closure {
{
register struct closure * cl = (struct closure *) data;
- (*(cl -> cl_fn))((extern_ptr_t)((char *)obj + sizeof(oh)), cl -> cl_data);
+ (*(cl -> cl_fn))((GC_PTR)((char *)obj + sizeof(oh)), cl -> cl_data);
}
diff --git a/dyn_load.c b/dyn_load.c
index 28817b0c..e3f2ac69 100644
--- a/dyn_load.c
+++ b/dyn_load.c
@@ -13,7 +13,7 @@
* Original author: Bill Janssen
* Heavily modified by Hans Boehm and others
*/
-/* Boehm, May 19, 1994 1:57 pm PDT */
+/* Boehm, September 21, 1995 5:57 pm PDT */
/*
* This is incredibly OS specific code for tracking down data sections in
@@ -26,17 +26,33 @@
* None of this is safe with dlclose and incremental collection.
* But then not much of anything is safe in the presence of dlclose.
*/
-#include <sys/types.h>
+#ifndef MACOS
+# include <sys/types.h>
+#endif
#include "gc_priv.h"
+/* BTL: avoid circular redefinition of dlopen if SOLARIS_THREADS defined */
+# if defined(SOLARIS_THREADS) && defined(dlopen)
+ /* To support threads in Solaris, gc.h interposes on dlopen by */
+ /* defining "dlopen" to be "GC_dlopen", which is implemented below. */
+ /* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
+ /* real system dlopen() in their implementation. We first remove */
+ /* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
+# undef dlopen
+# define GC_must_restore_redefined_dlopen
+# else
+# undef GC_must_restore_redefined_dlopen
+# endif
+
#if (defined(DYNAMIC_LOADING) || defined(MSWIN32)) && !defined(PCR)
-#if !defined(SUNOS4) && !defined(SUNOS5) && !defined(IRIX5) && !defined(MSWIN32)
+#if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && !defined(MSWIN32) && !defined(ALPHA)
--> We only know how to find data segments of dynamic libraries under SunOS,
- --> IRIX5 and Win32. Additional SVR4 variants might not be too hard to add.
+ --> IRIX5, DRSNX and Win32. Additional SVR4 variants might not be too
+ --> hard to add.
#endif
#include <stdio.h>
-#ifdef SUNOS5
+#ifdef SUNOS5DL
# include <sys/elf.h>
# include <dlfcn.h>
# include <link.h>
@@ -52,7 +68,7 @@
#endif
-#ifdef SUNOS5
+#ifdef SUNOS5DL
#ifdef LINT
Elf32_Dyn _DYNAMIC;
@@ -65,8 +81,23 @@ GC_FirstDLOpenedLinkMap()
Elf32_Dyn *dp;
struct r_debug *r;
static struct link_map * cachedResult = 0;
+ static Elf32_Dyn *dynStructureAddr = 0;
+ /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
+
+# ifdef SUNOS53_SHARED_LIB
+ /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
+ /* up properly in dynamically linked .so's. This means we have */
+ /* to use its value in the set of original object files loaded */
+ /* at program startup. */
+ if( dynStructureAddr == 0 ) {
+ void* startupSyms = dlopen(0, RTLD_LAZY);
+ dynStructureAddr = (Elf32_Dyn*)dlsym(startupSyms, "_DYNAMIC");
+ }
+# else
+ dynStructureAddr = &_DYNAMIC;
+# endif
- if( &_DYNAMIC == 0) {
+ if( dynStructureAddr == 0) {
return(0);
}
if( cachedResult == 0 ) {
@@ -125,7 +156,7 @@ static ptr_t GC_first_common()
#endif
-# if defined(SUNOS4) || defined(SUNOS5)
+# if defined(SUNOS4) || defined(SUNOS5DL)
/* Add dynamic library data sections to the root set. */
# if !defined(PCR) && !defined(SOLARIS_THREADS) && defined(THREADS)
# ifndef SRC_M3
@@ -152,6 +183,11 @@ void * GC_dlopen(const char *path, int mode)
}
# endif
+/* BTL: added to fix circular dlopen definition if SOLARIS_THREADS defined */
+# if defined(GC_must_restore_redefined_dlopen)
+# define dlopen GC_dlopen
+# endif
+
void GC_register_dynamic_libraries()
{
struct link_map *lm = GC_FirstDLOpenedLinkMap();
@@ -166,9 +202,10 @@ void GC_register_dynamic_libraries()
e = (struct exec *) lm->lm_addr;
GC_add_roots_inner(
((char *) (N_DATOFF(*e) + lm->lm_addr)),
- ((char *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr)));
+ ((char *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr)),
+ TRUE);
# endif
-# ifdef SUNOS5
+# ifdef SUNOS5DL
Elf32_Ehdr * e;
Elf32_Phdr * p;
unsigned long offset;
@@ -186,7 +223,8 @@ void GC_register_dynamic_libraries()
start = ((char *)(p->p_vaddr)) + offset;
GC_add_roots_inner(
start,
- start + p->p_memsz
+ start + p->p_memsz,
+ TRUE
);
}
break;
@@ -205,7 +243,7 @@ void GC_register_dynamic_libraries()
if (common_start == 0) common_start = GC_first_common();
if (common_start != 0) {
common_end = GC_find_limit(common_start, TRUE);
- GC_add_roots_inner((char *)common_start, (char *)common_end);
+ GC_add_roots_inner((char *)common_start, (char *)common_end, TRUE);
}
}
# endif
@@ -305,7 +343,7 @@ void GC_register_dynamic_libraries()
}
}
}
- GC_add_roots_inner(start, limit);
+ GC_add_roots_inner(start, limit, TRUE);
irrelevant: ;
}
}
@@ -335,7 +373,7 @@ void GC_register_dynamic_libraries()
/* Part of the stack; ignore it. */
return;
}
- GC_add_roots_inner(base, limit);
+ GC_add_roots_inner(base, limit, TRUE);
}
extern bool GC_win32s;
@@ -382,6 +420,9 @@ void GC_register_dynamic_libraries()
#endif /* MSWIN32 */
#if defined(ALPHA)
+
+#include <loader.h>
+
void GC_register_dynamic_libraries()
{
int status;
@@ -415,15 +456,15 @@ void GC_register_dynamic_libraries()
/* Check status AFTER checking moduleid because */
/* of a bug in the non-shared ldr_next_module stub */
if (status != 0 ) {
- GC_printf("dynamic_load: status = %ld\n", (long)status);
+ GC_printf1("dynamic_load: status = %ld\n", (long)status);
{
extern char *sys_errlist[];
extern int sys_nerr;
extern int errno;
if (errno <= sys_nerr) {
- GC_printf("dynamic_load: %s\n", sys_errlist[errno]);
+ GC_printf1("dynamic_load: %s\n", (long)sys_errlist[errno]);
} else {
- GC_printf("dynamic_load: %d\n", errno);
+ GC_printf1("dynamic_load: %d\n", (long)errno);
}
}
ABORT("ldr_next_module failed");
@@ -474,7 +515,8 @@ void GC_register_dynamic_libraries()
/* register region as a garbage collection root */
GC_add_roots_inner (
(char *)regioninfo.lri_mapaddr,
- (char *)regioninfo.lri_mapaddr + regioninfo.lri_size);
+ (char *)regioninfo.lri_mapaddr + regioninfo.lri_size,
+ TRUE);
}
}
@@ -512,7 +554,8 @@ void GC_register_dynamic_libraries()
== PCR_IL_SegFlags_Traced_on) {
GC_add_roots_inner
((char *)(q -> ls_addr),
- (char *)(q -> ls_addr) + q -> ls_bytes);
+ (char *)(q -> ls_addr) + q -> ls_bytes,
+ TRUE);
}
}
}
diff --git a/finalize.c b/finalize.c
index 45339b3c..5340e0f7 100644
--- a/finalize.c
+++ b/finalize.c
@@ -11,12 +11,16 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:08 pm PDT */
+/* Boehm, September 22, 1995 5:49 pm PDT */
# define I_HIDE_POINTERS
-# include "gc.h"
# include "gc_priv.h"
# include "gc_mark.h"
+/* Type of mark procedure used for marking from finalizable object. */
+/* This procedure normally does not mark the object, only its */
+/* descendents. */
+typedef void finalization_mark_proc(/* ptr_t finalizable_obj_ptr */);
+
# define HASH3(addr,size,log_size) \
((((word)(addr) >> 3) ^ ((word)(addr) >> (3+(log_size)))) \
& ((size) - 1))
@@ -57,6 +61,7 @@ static struct finalizable_object {
GC_finalization_proc fo_fn; /* Finalizer. */
ptr_t fo_client_data;
word fo_object_size; /* In bytes. */
+ finalization_mark_proc * fo_mark_proc; /* Mark-through procedure */
} **fo_head = 0;
struct finalizable_object * GC_finalize_now = 0;
@@ -74,8 +79,6 @@ void GC_push_finalizer_structures()
}
# endif
-# define ALLOC(x, t) t *x = GC_NEW(t)
-
/* Double the size of a hash table. *size_ptr is the log of its current */
/* size. May be a noop. */
/* *table is a pointer to an array of hash headers. If we succeed, we */
@@ -92,8 +95,8 @@ signed_word * log_size_ptr;
word old_size = ((log_old_size == -1)? 0: (1 << log_old_size));
register word new_size = 1 << log_new_size;
struct hash_chain_entry **new_table = (struct hash_chain_entry **)
- GC_malloc_ignore_off_page_inner(
- (size_t)new_size * sizeof(struct hash_chain_entry *));
+ GC_generic_malloc_inner_ignore_off_page(
+ (size_t)new_size * sizeof(struct hash_chain_entry *), NORMAL);
if (new_table == 0) {
if (table == 0) {
@@ -118,21 +121,30 @@ signed_word * log_size_ptr;
*table = new_table;
}
-
-int GC_register_disappearing_link(link)
-extern_ptr_t * link;
+# if defined(__STDC__) || defined(__cplusplus)
+ int GC_register_disappearing_link(GC_PTR * link)
+# else
+ int GC_register_disappearing_link(link)
+ GC_PTR * link;
+# endif
{
ptr_t base;
- base = (ptr_t)GC_base((extern_ptr_t)link);
+ base = (ptr_t)GC_base((GC_PTR)link);
if (base == 0)
ABORT("Bad arg to GC_register_disappearing_link");
return(GC_general_register_disappearing_link(link, base));
}
-int GC_general_register_disappearing_link(link, obj)
-extern_ptr_t * link;
-extern_ptr_t obj;
+# if defined(__STDC__) || defined(__cplusplus)
+ int GC_general_register_disappearing_link(GC_PTR * link,
+ GC_PTR obj)
+# else
+ int GC_general_register_disappearing_link(link, obj)
+ GC_PTR * link;
+ GC_PTR obj;
+# endif
+
{
struct disappearing_link *curr_dl;
int index;
@@ -176,7 +188,8 @@ extern_ptr_t obj;
new_dl = (struct disappearing_link *)
GC_generic_malloc_inner(sizeof(struct disappearing_link),NORMAL);
# else
- new_dl = GC_NEW(struct disappearing_link);
+ new_dl = (struct disappearing_link *)
+ GC_malloc(sizeof(struct disappearing_link));
# endif
if (new_dl != 0) {
new_dl -> dl_hidden_obj = HIDE_POINTER(obj);
@@ -194,8 +207,12 @@ extern_ptr_t obj;
return(0);
}
-int GC_unregister_disappearing_link(link)
-extern_ptr_t * link;
+# if defined(__STDC__) || defined(__cplusplus)
+ int GC_unregister_disappearing_link(GC_PTR * link)
+# else
+ int GC_unregister_disappearing_link(link)
+ GC_PTR * link;
+# endif
{
struct disappearing_link *curr_dl, *prev_dl;
int index;
@@ -216,7 +233,7 @@ extern_ptr_t * link;
GC_dl_entries--;
UNLOCK();
ENABLE_SIGNALS();
- GC_free((extern_ptr_t)curr_dl);
+ GC_free((GC_PTR)curr_dl);
return(1);
}
prev_dl = curr_dl;
@@ -228,16 +245,61 @@ out:
return(0);
}
+/* Possible finalization_marker procedures. Note that mark stack */
+/* overflow is handled by the caller, and is not a disaster. */
+void GC_normal_finalize_mark_proc(p)
+ptr_t p;
+{
+ hdr * hhdr = HDR(p);
+
+ PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top,
+ &(GC_mark_stack[GC_mark_stack_size]));
+}
+
+/* This only pays very partial attention to the mark descriptor. */
+/* It does the right thing for normal and atomic objects, and treats */
+/* most others as normal. */
+void GC_ignore_self_finalize_mark_proc(p)
+ptr_t p;
+{
+ hdr * hhdr = HDR(p);
+ word descr = hhdr -> hb_descr;
+ ptr_t q, r;
+ ptr_t scan_limit;
+ ptr_t target_limit = p + WORDS_TO_BYTES(hhdr -> hb_sz) - 1;
+
+ if ((descr & DS_TAGS) == DS_LENGTH) {
+ scan_limit = p + descr - sizeof(word);
+ } else {
+ scan_limit = target_limit + 1 - sizeof(word);
+ }
+ for (q = p; q <= scan_limit; q += ALIGNMENT) {
+ r = *(ptr_t *)q;
+ if (r < p || r > target_limit) {
+ GC_PUSH_ONE_HEAP((word)r);
+ }
+ }
+}
+
+/*ARGSUSED*/
+void GC_null_finalize_mark_proc(p)
+ptr_t p;
+{
+}
+
+
+
/* Register a finalization function. See gc.h for details. */
/* in the nonthreads case, we try to avoid disabling signals, */
/* since it can be expensive. Threads packages typically */
/* make it cheaper. */
-void GC_register_finalizer(obj, fn, cd, ofn, ocd)
-extern_ptr_t obj;
+void GC_register_finalizer_inner(obj, fn, cd, ofn, ocd, mp)
+GC_PTR obj;
GC_finalization_proc fn;
-extern_ptr_t cd;
+GC_PTR cd;
GC_finalization_proc * ofn;
-extern_ptr_t * ocd;
+GC_PTR * ocd;
+finalization_mark_proc * mp;
{
ptr_t base;
struct finalizable_object * curr_fo, * prev_fo;
@@ -275,7 +337,7 @@ extern_ptr_t * ocd;
/* should be safe. The client may see only *ocd */
/* updated, but we'll declare that to be his */
/* problem. */
- if (ocd) *ocd = (extern_ptr_t) curr_fo -> fo_client_data;
+ if (ocd) *ocd = (GC_PTR) curr_fo -> fo_client_data;
if (ofn) *ofn = curr_fo -> fo_fn;
/* Delete the structure for base. */
if (prev_fo == 0) {
@@ -289,11 +351,12 @@ extern_ptr_t * ocd;
/* estimate will only make the table larger than */
/* necessary. */
# ifndef THREADS
- GC_free((extern_ptr_t)curr_fo);
+ GC_free((GC_PTR)curr_fo);
# endif
} else {
curr_fo -> fo_fn = fn;
curr_fo -> fo_client_data = (ptr_t)cd;
+ curr_fo -> fo_mark_proc = mp;
/* Reinsert it. We deleted it first to maintain */
/* consistency in the event of a signal. */
if (prev_fo == 0) {
@@ -324,13 +387,15 @@ extern_ptr_t * ocd;
new_fo = (struct finalizable_object *)
GC_generic_malloc_inner(sizeof(struct finalizable_object),NORMAL);
# else
- new_fo = GC_NEW(struct finalizable_object);
+ new_fo = (struct finalizable_object *)
+ GC_malloc(sizeof(struct finalizable_object));
# endif
if (new_fo != 0) {
new_fo -> fo_hidden_base = (word)HIDE_POINTER(base);
new_fo -> fo_fn = fn;
new_fo -> fo_client_data = (ptr_t)cd;
new_fo -> fo_object_size = GC_size(base);
+ new_fo -> fo_mark_proc = mp;
fo_set_next(new_fo, fo_head[index]);
GC_fo_entries++;
fo_head[index] = new_fo;
@@ -343,6 +408,58 @@ extern_ptr_t * ocd;
# endif
}
+# if defined(__STDC__)
+ void GC_register_finalizer(void * obj,
+ GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void ** ocd)
+# else
+ void GC_register_finalizer(obj, fn, cd, ofn, ocd)
+ GC_PTR obj;
+ GC_finalization_proc fn;
+ GC_PTR cd;
+ GC_finalization_proc * ofn;
+ GC_PTR * ocd;
+# endif
+{
+ GC_register_finalizer_inner(obj, fn, cd, ofn,
+ ocd, GC_normal_finalize_mark_proc);
+}
+
+# if defined(__STDC__)
+ void GC_register_finalizer_ignore_self(void * obj,
+ GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void ** ocd)
+# else
+ void GC_register_finalizer_ignore_self(obj, fn, cd, ofn, ocd)
+ GC_PTR obj;
+ GC_finalization_proc fn;
+ GC_PTR cd;
+ GC_finalization_proc * ofn;
+ GC_PTR * ocd;
+# endif
+{
+ GC_register_finalizer_inner(obj, fn, cd, ofn,
+ ocd, GC_ignore_self_finalize_mark_proc);
+}
+
+# if defined(__STDC__)
+ void GC_register_finalizer_no_order(void * obj,
+ GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void ** ocd)
+# else
+ void GC_register_finalizer_no_order(obj, fn, cd, ofn, ocd)
+ GC_PTR obj;
+ GC_finalization_proc fn;
+ GC_PTR cd;
+ GC_finalization_proc * ofn;
+ GC_PTR * ocd;
+# endif
+{
+ GC_register_finalizer_inner(obj, fn, cd, ofn,
+ ocd, GC_null_finalize_mark_proc);
+}
+
+
/* Called with world stopped. Cause disappearing links to disappear, */
/* and invoke finalizers. */
void GC_finalize()
@@ -351,8 +468,8 @@ void GC_finalize()
struct finalizable_object * curr_fo, * prev_fo, * next_fo;
ptr_t real_ptr, real_link;
register int i;
- int dl_size = 1 << log_dl_table_size;
- int fo_size = 1 << log_fo_table_size;
+ int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
+ int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
/* Make disappearing links disappear */
for (i = 0; i < dl_size; i++) {
@@ -387,10 +504,7 @@ void GC_finalize()
for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
if (!GC_is_marked(real_ptr)) {
- hdr * hhdr = HDR(real_ptr);
-
- PUSH_OBJ((word *)real_ptr, hhdr, GC_mark_stack_top,
- &(GC_mark_stack[GC_mark_stack_size]));
+ (*(curr_fo -> fo_mark_proc))(real_ptr);
while (!GC_mark_stack_empty()) GC_mark_from_mark_stack();
if (GC_mark_state != MS_NONE) {
/* Mark stack overflowed. Very unlikely. */
@@ -403,17 +517,16 @@ void GC_finalize()
GC_set_mark_bit(real_ptr);
while (!GC_mark_some());
}
- /*
if (GC_is_marked(real_ptr)) {
- --> Report finalization cycle here, if desired
+ WARN("Finalization cycle involving %lx\n", real_ptr);
}
- */
}
}
}
/* Enqueue for finalization all objects that are still */
/* unreachable. */
+ GC_words_finalized = 0;
for (i = 0; i < fo_size; i++) {
curr_fo = fo_head[i];
prev_fo = 0;
@@ -432,6 +545,9 @@ void GC_finalize()
/* Add to list of objects awaiting finalization. */
fo_set_next(curr_fo, GC_finalize_now);
GC_finalize_now = curr_fo;
+ GC_words_finalized +=
+ ALIGNED_WORDS(curr_fo -> fo_object_size)
+ + ALIGNED_WORDS(sizeof(struct finalizable_object));
# ifdef PRINTSTATS
if (!GC_is_marked((ptr_t)curr_fo)) {
ABORT("GC_finalize: found accessible unmarked object\n");
@@ -490,23 +606,29 @@ void GC_invoke_finalizers()
# else
GC_finalize_now = fo_next(curr_fo);
# endif
+ fo_set_next(curr_fo, 0);
real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
(*(curr_fo -> fo_fn))(real_ptr, curr_fo -> fo_client_data);
-# ifndef THREADS
- GC_free((extern_ptr_t)curr_fo);
+ curr_fo -> fo_client_data = 0;
+# ifdef UNDEFINED
+ /* This is probably a bad idea. It throws off accounting if */
+ /* nearly all objects are finalizable. O.w. it shouldn't */
+ /* matter. */
+ GC_free((GC_PTR)curr_fo);
# endif
}
}
# ifdef __STDC__
- extern_ptr_t GC_call_with_alloc_lock(GC_fn_type fn, extern_ptr_t client_data)
+ GC_PTR GC_call_with_alloc_lock(GC_fn_type fn,
+ GC_PTR client_data)
# else
- extern_ptr_t GC_call_with_alloc_lock(fn, client_data)
+ GC_PTR GC_call_with_alloc_lock(fn, client_data)
GC_fn_type fn;
- extern_ptr_t client_data;
+ GC_PTR client_data;
# endif
{
- extern_ptr_t result;
+ GC_PTR result;
DCL_LOCK_STATE;
# ifdef THREADS
diff --git a/gc.h b/gc.h
index 65a26093..ab7944ef 100644
--- a/gc.h
+++ b/gc.h
@@ -11,12 +11,36 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:13 pm PDT */
+/* Boehm, October 9, 1995 1:14 pm PDT */
+
+/*
+ * Note that this defines a large number of tuning hooks, which can
+ * safely be ignored in nearly all cases. For normal use it suffices
+ * to call only GC_MALLOC and perhaps GC_REALLOC.
+ * For better performance, also look at GC_MALLOC_ATOMIC, and
+ * GC_enable_incremental. If you need an action to be performed
+ * immediately before an object is collected, look at GC_register_finalizer.
+ * If you are using Solaris threads, look at the end of this file.
+ * Everything else is best ignored unless you encounter performance
+ * problems.
+ */
#ifndef _GC_H
# define _GC_H
+# if defined(__STDC__) || defined(__cplusplus)
+# define GC_PROTO(args) args
+ typedef void * GC_PTR;
+# else
+# define GC_PROTO(args) ()
+ typedef char * GC_PTR;
+# endif
+
+# ifdef __cplusplus
+ extern "C" {
+# endif
+
# include <stddef.h>
/* Define word and signed_word to be unsigned and signed types of the */
@@ -70,7 +94,7 @@ extern GC_word GC_free_space_divisor;
/* at the expense of space. */
/* GC_free_space_divisor = 1 will effectively */
/* disable collections. */
-
+
/* Public procedures */
/*
@@ -84,17 +108,10 @@ extern GC_word GC_free_space_divisor;
* collectable. GC_malloc_uncollectable and GC_free called on the resulting
* object implicitly update GC_non_gc_bytes appropriately.
*/
-#if defined(__STDC__) || defined(__cplusplus)
- extern void * GC_malloc(size_t size_in_bytes);
- extern void * GC_malloc_atomic(size_t size_in_bytes);
- extern void * GC_malloc_uncollectable(size_t size_in_bytes);
- extern void * GC_malloc_stubborn(size_t size_in_bytes);
-# else
- extern char * GC_malloc(/* size_in_bytes */);
- extern char * GC_malloc_atomic(/* size_in_bytes */);
- extern char * GC_malloc_uncollectable(/* size_in_bytes */);
- extern char * GC_malloc_stubborn(/* size_in_bytes */);
-# endif
+extern GC_PTR GC_malloc GC_PROTO((size_t size_in_bytes));
+extern GC_PTR GC_malloc_atomic GC_PROTO((size_t size_in_bytes));
+extern GC_PTR GC_malloc_uncollectable GC_PROTO((size_t size_in_bytes));
+extern GC_PTR GC_malloc_stubborn GC_PROTO((size_t size_in_bytes));
/* Explicitly deallocate an object. Dangerous if used incorrectly. */
/* Requires a pointer to the base of an object. */
@@ -102,11 +119,7 @@ extern GC_word GC_free_space_divisor;
/* An object should not be enable for finalization when it is */
/* explicitly deallocated. */
/* GC_free(0) is a no-op, as required by ANSI C for free. */
-#if defined(__STDC__) || defined(__cplusplus)
- extern void GC_free(void * object_addr);
-# else
- extern void GC_free(/* object_addr */);
-# endif
+extern void GC_free GC_PROTO((GC_PTR object_addr));
/*
* Stubborn objects may be changed only if the collector is explicitly informed.
@@ -123,27 +136,19 @@ extern GC_word GC_free_space_divisor;
* do so. The same applies to dropping stubborn objects that are still
* changeable.
*/
-void GC_change_stubborn(/* p */);
-void GC_end_stubborn_change(/* p */);
+extern void GC_change_stubborn GC_PROTO((GC_PTR));
+extern void GC_end_stubborn_change GC_PROTO((GC_PTR));
/* Return a pointer to the base (lowest address) of an object given */
/* a pointer to a location within the object. */
/* Return 0 if displaced_pointer doesn't point to within a valid */
/* object. */
-# if defined(__STDC__) || defined(__cplusplus)
- void * GC_base(void * displaced_pointer);
-# else
- char * GC_base(/* char * displaced_pointer */);
-# endif
+extern GC_PTR GC_base GC_PROTO((GC_PTR displaced_pointer));
/* Given a pointer to the base of an object, return its size in bytes. */
/* The returned size may be slightly larger than what was originally */
/* requested. */
-# if defined(__STDC__) || defined(__cplusplus)
- size_t GC_size(void * object_addr);
-# else
- size_t GC_size(/* char * object_addr */);
-# endif
+extern size_t GC_size GC_PROTO((GC_PTR object_addr));
/* For compatibility with C library. This is occasionally faster than */
/* a malloc followed by a bcopy. But if you rely on that, either here */
@@ -153,22 +158,24 @@ void GC_end_stubborn_change(/* p */);
/* If the argument is stubborn, the result will have changes enabled. */
/* It is an error to have changes enabled for the original object. */
/* Follows ANSI comventions for NULL old_object. */
-# if defined(__STDC__) || defined(__cplusplus)
- extern void * GC_realloc(void * old_object, size_t new_size_in_bytes);
-# else
- extern char * GC_realloc(/* old_object, new_size_in_bytes */);
-# endif
-
-
+extern GC_PTR GC_realloc GC_PROTO((GC_PTR old_object,
+ size_t new_size_in_bytes));
+
/* Explicitly increase the heap size. */
/* Returns 0 on failure, 1 on success. */
-extern int GC_expand_hp(/* number_of_bytes */);
+extern int GC_expand_hp GC_PROTO((size_t number_of_bytes));
+
+/* Limit the heap size to n bytes. Useful when you're debugging, */
+/* especially on systems that don't handle running out of memory well. */
+/* n == 0 ==> unbounded. This is the default. */
+extern void GC_set_max_heap_size GC_PROTO((GC_word n));
-/* Clear the set of root segments */
-extern void GC_clear_roots();
+/* Clear the set of root segments. Wizards only. */
+extern void GC_clear_roots GC_PROTO((void));
-/* Add a root segment */
-extern void GC_add_roots(/* low_address, high_address_plus_1 */);
+/* Add a root segment. Wizards only. */
+extern void GC_add_roots GC_PROTO((char * low_address,
+ char * high_address_plus_1));
/* Add a displacement to the set of those considered valid by the */
/* collector. GC_register_displacement(n) means that if p was returned */
@@ -182,15 +189,33 @@ extern void GC_add_roots(/* low_address, high_address_plus_1 */);
/* retention. */
/* This is a no-op if the collector was compiled with recognition of */
/* arbitrary interior pointers enabled, which is now the default. */
-void GC_register_displacement(/* n */);
-
-/* Explicitly trigger a collection. */
-void GC_gcollect();
+void GC_register_displacement GC_PROTO((GC_word n));
+
+/* The following version should be used if any debugging allocation is */
+/* being done. */
+void GC_debug_register_displacement GC_PROTO((GC_word n));
+
+/* Explicitly trigger a full, world-stop collection. */
+void GC_gcollect GC_PROTO((void));
+
+/* Trigger a full world-stopped collection. Abort the collection if */
+/* and when stop_func returns a nonzero value. Stop_func will be */
+/* called frequently, and should be reasonably fast. This works even */
+/* if virtual dirty bits, and hence incremental collection is not */
+/* available for this architecture. Collections can be aborted faster */
+/* than normal pause times for incremental collection. However, */
+/* aborted collections do no useful work; the next collection needs */
+/* to start from the beginning. */
+typedef int (* GC_stop_func) GC_PROTO((void));
+int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
/* Return the number of bytes in the heap. Excludes collector private */
/* data structures. Includes empty blocks and fragmentation loss. */
/* Includes some pages that were allocated but never written. */
-size_t GC_get_heap_size();
+size_t GC_get_heap_size GC_PROTO((void));
+
+/* Return the number of bytes allocated since the last collection. */
+size_t GC_get_bytes_since_gc GC_PROTO((void));
/* Enable incremental/generational collection. */
/* Not advisable unless dirty bits are */
@@ -198,7 +223,16 @@ size_t GC_get_heap_size();
/* pointerfree(atomic) or immutable. */
/* Don't use in leak finding mode. */
/* Ignored if GC_dont_gc is true. */
-void GC_enable_incremental();
+void GC_enable_incremental GC_PROTO((void));
+
+/* Perform some garbage collection work, if appropriate. */
+/* Return 0 if there is no more work to be done. */
+/* Typically performs an amount of work corresponding roughly */
+/* to marking from one page. May do more work if further */
+/* progress requires it, e.g. if incremental collection is */
+/* disabled. It is reasonable to call this in a wait loop */
+/* until it returns 0. */
+int GC_collect_a_little GC_PROTO((void));
/* Allocate an object of size lb bytes. The client guarantees that */
/* as long as the object is live, it will be referenced by a pointer */
@@ -214,41 +248,26 @@ void GC_enable_incremental();
/* for arrays likely to be larger than 100K or so. For other systems, */
/* or if the collector is not configured to recognize all interior */
/* pointers, the threshold is normally much higher. */
-# if defined(__STDC__) || defined(__cplusplus)
- void * GC_malloc_ignore_off_page(size_t lb);
-# else
- char * GC_malloc_ignore_off_page(/* size_t lb */);
-# endif
+extern GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
+extern GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
/* Debugging (annotated) allocation. GC_gcollect will check */
/* objects allocated in this way for overwrites, etc. */
-# if defined(__STDC__) || defined(__cplusplus)
- extern void * GC_debug_malloc(size_t size_in_bytes,
- char * descr_string, int descr_int);
- extern void * GC_debug_malloc_atomic(size_t size_in_bytes,
- char * descr_string, int descr_int);
- extern void * GC_debug_malloc_uncollectable(size_t size_in_bytes,
- char * descr_string, int descr_int);
- extern void * GC_debug_malloc_stubborn(size_t size_in_bytes,
- char * descr_string, int descr_int);
- extern void GC_debug_free(void * object_addr);
- extern void * GC_debug_realloc(void * old_object,
- size_t new_size_in_bytes,
- char * descr_string, int descr_int);
-# else
- extern char * GC_debug_malloc(/* size_in_bytes, descr_string, descr_int */);
- extern char * GC_debug_malloc_atomic(/* size_in_bytes, descr_string,
- descr_int */);
- extern char * GC_debug_malloc_uncollectable(/* size_in_bytes, descr_string,
- descr_int */);
- extern char * GC_debug_malloc_stubborn(/* size_in_bytes, descr_string,
- descr_int */);
- extern void GC_debug_free(/* object_addr */);
- extern char * GC_debug_realloc(/* old_object, new_size_in_bytes,
- descr_string, descr_int */);
-# endif
-void GC_debug_change_stubborn(/* p */);
-void GC_debug_end_stubborn_change(/* p */);
+extern GC_PTR GC_debug_malloc
+ GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+extern GC_PTR GC_debug_malloc_atomic
+ GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+extern GC_PTR GC_debug_malloc_uncollectable
+ GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+extern GC_PTR GC_debug_malloc_stubborn
+ GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+extern void GC_debug_free GC_PROTO((GC_PTR object_addr));
+extern GC_PTR GC_debug_realloc
+ GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
+ char * descr_string, int descr_int));
+
+void GC_debug_change_stubborn GC_PROTO((GC_PTR));
+void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
# ifdef GC_DEBUG
# define GC_MALLOC(sz) GC_debug_malloc(sz, __FILE__, __LINE__)
# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, __FILE__, __LINE__)
@@ -260,10 +279,17 @@ void GC_debug_end_stubborn_change(/* p */);
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_register_finalizer(GC_base(p), GC_debug_invoke_finalizer, \
GC_make_closure(f,d), of, od)
+# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
+ GC_register_finalizer_ignore_self( \
+ GC_base(p), GC_debug_invoke_finalizer, \
+ GC_make_closure(f,d), of, od)
# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, __FILE__, \
__LINE__)
# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
+# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
+ GC_general_register_disappearing_link(link, GC_base(obj))
+# define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n)
# else
# define GC_MALLOC(sz) GC_malloc(sz)
# define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
@@ -272,9 +298,14 @@ void GC_debug_end_stubborn_change(/* p */);
# define GC_FREE(p) GC_free(p)
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_register_finalizer(p, f, d, of, od)
+# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
+ GC_register_finalizer_ignore_self(p, f, d, of, od)
# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
+# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
+ GC_general_register_disappearing_link(link, obj)
+# define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n)
# endif
/* The following are included because they are often convenient, and */
/* reduce the chance for a misspecifed size argument. But calls may */
@@ -292,15 +323,12 @@ void GC_debug_end_stubborn_change(/* p */);
/* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
/* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
/* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
-# if defined(__STDC__) || defined(__cplusplus)
- typedef void (*GC_finalization_proc)(void * obj, void * client_data);
-# else
- typedef void (*GC_finalization_proc)(/* void * obj, void * client_data */);
-# endif
-
-void GC_register_finalizer(/* void * obj,
- GC_finalization_proc fn, void * cd,
- GC_finalization_proc *ofn, void ** ocd */);
+typedef void (*GC_finalization_proc)
+ GC_PROTO((GC_PTR obj, GC_PTR client_data));
+
+extern void GC_register_finalizer
+ GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
+ GC_finalization_proc *ofn, GC_PTR *ocd));
/* When obj is no longer accessible, invoke */
/* (*fn)(obj, cd). If a and b are inaccessible, and */
/* a points to b (after disappearing links have been */
@@ -329,7 +357,20 @@ void GC_register_finalizer(/* void * obj,
/* finalization, even if neither the old nor new */
/* finalizer were NULL. */
/* Obj should be the nonNULL starting address of an */
- /* object allocated by GC_malloc or friends. */
+ /* object allocated by GC_malloc or friends. */
+ /* Note that any garbage collectable object referenced */
+ /* by cd will be considered accessible until the */
+ /* finalizer is invoked. */
+
+/* Another versions of the above follow. It ignores */
+/* self-cycles, i.e. pointers from a finalizable object to */
+/* itself. There is a stylistic argument that this is wrong, */
+/* but it's unavoidable for C++, since the compiler may */
+/* silently introduce these. It's also benign in that specific */
+/* case. */
+extern void GC_register_finalizer_ignore_self
+ GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
+ GC_finalization_proc *ofn, GC_PTR *ocd));
/* The following routine may be used to break cycles between */
/* finalizable objects, thus causing cyclic finalizable */
@@ -337,8 +378,8 @@ void GC_register_finalizer(/* void * obj,
/* use involves calling GC_register_disappearing_link(&p), */
/* where p is a pointer that is not followed by finalization */
/* code, and should not be considered in determining */
-/* finalization order. */
-int GC_register_disappearing_link(/* void ** link */);
+/* finalization order. */
+extern int GC_register_disappearing_link GC_PROTO((GC_PTR * /* link */));
/* Link should point to a field of a heap allocated */
/* object obj. *link will be cleared when obj is */
/* found to be inaccessible. This happens BEFORE any */
@@ -357,13 +398,17 @@ int GC_register_disappearing_link(/* void ** link */);
/* Returns 1 if link was already registered, 0 */
/* otherwise. */
/* Only exists for backward compatibility. See below: */
-int GC_general_register_disappearing_link(/* void ** link, void * obj */);
+
+extern int GC_general_register_disappearing_link
+ GC_PROTO((GC_PTR * /* link */, GC_PTR obj));
/* A slight generalization of the above. *link is */
/* cleared when obj first becomes inaccessible. This */
/* can be used to implement weak pointers easily and */
/* safely. Typically link will point to a location */
- /* holding a disguised pointer to obj. In this way */
- /* soft pointers are broken before any object */
+ /* holding a disguised pointer to obj. (A pointer */
+ /* inside an "atomic" object is effectively */
+ /* disguised.) In this way soft */
+ /* pointers are broken before any object */
/* reachable from them are finalized. Each link */
/* May be registered only once, i.e. with one obj */
/* value. This was added after a long email discussion */
@@ -373,47 +418,118 @@ int GC_general_register_disappearing_link(/* void ** link, void * obj */);
/* the object containing link. Explicitly deallocating */
/* obj may or may not cause link to eventually be */
/* cleared. */
-int GC_unregister_disappearing_link(/* void ** link */);
+extern int GC_unregister_disappearing_link GC_PROTO((GC_PTR * /* link */));
/* Returns 0 if link was not actually registered. */
/* Undoes a registration by either of the above two */
/* routines. */
/* Auxiliary fns to make finalization work correctly with displaced */
/* pointers introduced by the debugging allocators. */
-# if defined(__STDC__) || defined(__cplusplus)
- void * GC_make_closure(GC_finalization_proc fn, void * data);
- void GC_debug_invoke_finalizer(void * obj, void * data);
-# else
- char * GC_make_closure(/* GC_finalization_proc fn, char * data */);
- void GC_debug_invoke_finalizer(/* void * obj, void * data */);
-# endif
+extern GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data));
+extern void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data));
+/* GC_set_warn_proc can be used to redirect or filter warning messages. */
+typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg));
+extern GC_warn_proc GC_set_warn_proc GC_PROTO((GC_warn_proc p));
+ /* Returns old warning procedure. */
/* The following is intended to be used by a higher level */
/* (e.g. cedar-like) finalization facility. It is expected */
/* that finalization code will arrange for hidden pointers to */
/* disappear. Otherwise objects can be accessed after they */
/* have been collected. */
-# ifdef I_HIDE_POINTERS
-# if defined(__STDC__) || defined(__cplusplus)
-# define HIDE_POINTER(p) (~(size_t)(p))
-# define REVEAL_POINTER(p) ((void *)(HIDE_POINTER(p)))
-# else
-# define HIDE_POINTER(p) (~(unsigned long)(p))
-# define REVEAL_POINTER(p) ((char *)(HIDE_POINTER(p)))
-# endif
+/* Note that putting pointers in atomic objects or in */
+/* nonpointer slots of "typed" objects is equivalent to */
+/* disguising them in this way, and may have other advantages. */
+# if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS)
+ typedef GC_word GC_hidden_pointer;
+# define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
+# define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p)))
/* Converting a hidden pointer to a real pointer requires verifying */
/* that the object still exists. This involves acquiring the */
/* allocator lock to avoid a race with the collector. */
-
-# if defined(__STDC__) || defined(__cplusplus)
- typedef void * (*GC_fn_type)();
- void * GC_call_with_alloc_lock(GC_fn_type fn, void * client_data);
+# endif /* I_HIDE_POINTERS */
+
+typedef GC_PTR (*GC_fn_type) GC_PROTO((GC_PTR client_data));
+extern GC_PTR GC_call_with_alloc_lock
+ GC_PROTO((GC_fn_type fn, GC_PTR client_data));
+
+/* Check that p and q point to the same object. */
+/* Fail conspicuously if they don't. */
+/* Returns the first argument. */
+/* Succeeds if neither p nor q points to the heap. */
+/* May succeed if both p and q point to between heap objects. */
+extern GC_PTR GC_same_obj GC_PROTO((GC_PTR p, GC_PTR q));
+
+/* Checked pointer pre- and post- increment operations. Note that */
+/* the second argument is in units of bytes, not multiples of the */
+/* object size. This should either be invoked from a macro, or the */
+/* call should be automatically generated. */
+extern GC_PTR GC_pre_incr GC_PROTO((GC_PTR *p, size_t how_much));
+extern GC_PTR GC_post_incr GC_PROTO((GC_PTR *p, size_t how_much));
+
+/* Check that p is visible */
+/* to the collector as a possibly pointer containing location. */
+/* If it isn't fail conspicuously. */
+/* Returns the argument in all cases. May erroneously succeed */
+/* in hard cases. (This is intended for debugging use with */
+/* untyped allocations. The idea is that it should be possible, though */
+/* slow, to add such a call to all indirect pointer stores.) */
+/* Currently useless for multithreaded worlds. */
+extern GC_PTR GC_is_visible GC_PROTO((GC_PTR p));
+
+/* Check that if p is a pointer to a heap page, then it points to */
+/* a valid displacement within a heap object. */
+/* Fail conspicuously if this property does not hold. */
+/* Uninteresting with ALL_INTERIOR_POINTERS. */
+/* Always returns its argument. */
+extern GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR p));
+
+/* Safer, but slow, pointer addition. Probably useful mainly with */
+/* a preprocessor. Useful only for heap pointers. */
+#ifdef GC_DEBUG
+# define GC_PTR_ADD3(x, n, type_of_result) \
+ ((type_of_result)GC_same_obj((x)+(n), (x)))
+# define GC_PRE_INCR3(x, n, type_of_result) \
+ ((type_of_result)GC_pre_incr(&(x), (n)*sizeof(*x))
+# define GC_POST_INCR2(x, type_of_result) \
+ ((type_of_result)GC_post_incr(&(x), sizeof(*x))
+# ifdef __GNUC__
+# define GC_PTR_ADD(x, n) \
+ GC_PTR_ADD3(x, n, typeof(x))
+# define GC_PRE_INCR(x, n) \
+ GC_PRE_INCR3(x, n, typeof(x))
+# define GC_POST_INCR(x, n) \
+ GC_POST_INCR3(x, typeof(x))
# else
- typedef char * (*GC_fn_type)();
- char * GC_call_with_alloc_lock(/* GC_fn_type fn, char * client_data */);
+ /* We can't do this right without typeof, which ANSI */
+ /* decided was not sufficiently useful. Repeatedly */
+ /* mentioning the arguments seems too dangerous to be */
+ /* useful. So does not casting the result. */
+# define GC_PTR_ADD(x, n) ((x)+(n))
# endif
+#else /* !GC_DEBUG */
+# define GC_PTR_ADD3(x, n, type_of_result) ((x)+(n))
+# define GC_PTR_ADD(x, n) ((x)+(n))
+# define GC_PRE_INCR3(x, n, type_of_result) ((x) += (n))
+# define GC_PRE_INCR(x, n) ((x) += (n))
+# define GC_POST_INCR2(x, n, type_of_result) ((x)++)
+# define GC_POST_INCR(x, n) ((x)++)
+#endif
+
+/* Safer assignment of a pointer to a nonstack location. */
+#ifdef GC_DEBUG
+# ifdef __STDC__
+# define GC_PTR_STORE(p, q) \
+ (*(void **)GC_is_visible(p) = GC_is_valid_displacement(q))
+# else
+# define GC_PTR_STORE(p, q) \
+ (*(char **)GC_is_visible(p) = GC_is_valid_displacement(q))
# endif
+#else /* !GC_DEBUG */
+# define GC_PTR_STORE(p, q) *((p) = (q))
+#endif
+
#ifdef SOLARIS_THREADS
/* We need to intercept calls to many of the threads primitives, so */
@@ -423,6 +539,7 @@ int GC_unregister_disappearing_link(/* void ** link */);
/* uncollectable objects, which are deallocated using the destructor */
/* facility in thr_keycreate. */
# include <thread.h>
+# include <signal.h>
int GC_thr_create(void *stack_base, size_t stack_size,
void *(*start_routine)(void *), void *arg, long flags,
thread_t *new_thread);
@@ -440,10 +557,27 @@ int GC_unregister_disappearing_link(/* void ** link */);
/* This returns a list of objects, linked through their first */
/* word. Its use can greatly reduce lock contention problems, since */
/* the allocation lock can be acquired and released many fewer times. */
-void * GC_malloc_many(size_t lb);
-#define GC_NEXT(p) (*(void **)(p)) /* Retrieve the next element */
+GC_PTR GC_malloc_many(size_t lb);
+#define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
/* in returned list. */
#endif /* SOLARIS_THREADS */
+/*
+ * If you are planning on putting
+ * the collector in a SunOS 5 dynamic library, you need to call GC_INIT()
+ * from the statically loaded program section.
+ * This circumvents a Solaris 2.X (X<=4) linker bug.
+ */
+#if defined(sparc) || defined(__sparc)
+# define GC_INIT() { extern end, etext; \
+ GC_noop(&end, &etext); }
+#else
+# define GC_INIT()
+#endif
+
+#ifdef __cplusplus
+ } /* end of extern "C" */
+#endif
+
#endif /* _GC_H */
diff --git a/gc_c++.h b/gc_c++.h
deleted file mode 100644
index 26019076..00000000
--- a/gc_c++.h
+++ /dev/null
@@ -1,161 +0,0 @@
-
-/****************************************************************************
-
-Copyright (c) 1994 by Xerox Corporation. All rights reserved.
-
-THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
-OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
-
-Permission is hereby granted to use or copy this program
-for any purpose, provided the above notices are retained on all copies.
-Permission to modify the code and to distribute modified code is granted,
-provided the above notices are retained, and a notice that the code was
-modified is included with the above copyright notice.
-
-C++ Interface to the Boehm Collector
-
- Jesse Hull and John Ellis
- Last modified on Tue Feb 15 14:43:02 PST 1994 by ellis
-
-This interface provides access to the Boehm collector (versions 3.6
-and later). It is intended to provide facilities similar to those
-described in the Ellis-Detlefs proposal for C++ garbage collection.
-
-To make a class collectable, derive it from the base class "gc":
-
- class MyClass: gc {...}
-
-Then, "new MyClass" will allocate intances that will be automatically
-garbage collected.
-
-Collected objects can be explicitly deleted with "delete", e.g.
-
- MyClass* m = ...;
- delete m;
-
-This will free the object's storage immediately.
-
-Collected instances of non-class types can be allocated using
-placement syntax with the argument "GC":
-
- typedef int A[ 10 ];
- A* a = new (GC) A;
-
-The built-in "operator new" continues to allocate non-collectible
-objects that the programmer must explicitly delete. Collected object
-may freely point at non-collected objects, and vice versa.
-
-Object clean-up (finalization) can be specified using class
-"gc_cleanup". When an object derived from "gc_cleanup" is discovered
-to be inaccessible by the collector, or when it is explicitly deleted,
-its destructors will be invoked first.
-
-Clean-up functions for non-class types can be specified as additional
-placement arguments:
-
- A* a = new (GC, MyCleanup) A;
-
-An object is considered "accessible" by the collector if it can be
-reached by a path of pointers from static variables, automatic
-variables of active functions, or from another object with clean-up
-enabled. This implies that if object A and B both have clean-up
-enabled, and A points at B, B will be considered accessible, and A's
-clean-up will be be invoked before B's. If A points at B and B points
-back to A, forming a cycle, that's considered a storage leak, and
-neither will ever become inaccessible. See the C interface gc.h for
-low-level facilities for handling such cycles of objects with cleanup.
-
-****************************************************************************/
-
-#ifndef GC_CPP_H
-#define GC_CPP_H
-
-extern "C" {
-#include "gc.h"
-}
-
-enum GCPlacement {GC, NoGC};
-
-class gc {
-public:
- void* operator new( size_t size );
- void* operator new( size_t size, GCPlacement gcp );
- void operator delete( void* obj ); };
- /*
- Intances of classes derived from "gc" will be allocated in the
- collected heap by default, unless an explicit NoGC placement is
- specified. */
-
-class gc_cleanup: public gc {
-public:
- gc_cleanup();
- virtual ~gc_cleanup();
-private:
- static void cleanup( void* obj, void* clientData ); };
- /*
- Instances of classes derived from "gc_cleanup" will be allocated
- in the collected heap by default. Further, when the collector
- discovers an instance is inaccessible (see above) or when the
- instance is explicitly deleted, its destructors will be invoked.
- NOTE: Only one instance of "gc_cleanup" should occur in the
- inheritance heirarchy -- i.e. it should always be a virtual
- base. */
-
-void* operator new(
- size_t size,
- GCPlacement gcp,
- void (*cleanup)( void*, void* ) = 0,
- void* clientData = 0 );
- /*
- If "gcp = GC", then this "operator new" allocates in the collected
- heap, otherwise in the non-collected heap. When the allocated
- object "obj" becomes inaccessible, the collector will invoke the
- function "cleanup( obj, clientData )". It is an error to specify
- a non-null "cleanup" when "gcp = NoGC". */
-
-/****************************************************************************
-
-Inline implementation
-
-****************************************************************************/
-
-inline void* gc::operator new( size_t size ) {
- return GC_MALLOC( size ); };
-
-inline void* gc::operator new( size_t size, GCPlacement gcp ) {
- if (gcp == GC)
- return GC_MALLOC( size );
- else
- return GC_MALLOC_UNCOLLECTABLE( size ); }
-
-inline void gc::operator delete( void* obj ) {
- GC_FREE( obj ); };
-
-inline gc_cleanup::gc_cleanup() {
- GC_REGISTER_FINALIZER( GC_base( this ), cleanup, this, 0, 0 ); }
-
-inline void gc_cleanup::cleanup( void* obj, void* realThis ) {
- ((gc_cleanup*) realThis)->~gc_cleanup(); }
-
-inline gc_cleanup::~gc_cleanup() {
- GC_REGISTER_FINALIZER( this, 0, 0, 0, 0 ); }
-
-inline void* operator new(
- size_t size,
- GCPlacement gcp,
- void (*cleanup)( void*, void* ) = 0,
- void* clientData = 0 )
-{
- void* obj;
-
- if (gcp == GC) {
- obj = GC_MALLOC( size );
- if (cleanup != 0)
- GC_REGISTER_FINALIZER( obj, cleanup, clientData, 0, 0 ); }
- else {
- obj = GC_MALLOC_UNCOLLECTABLE( size ); };
- return obj; }
-
-
-#endif
-
diff --git a/gc_c++.cc b/gc_cpp.cc
index 6654241a..a766a01a 100644
--- a/gc_c++.cc
+++ b/gc_cpp.cc
@@ -1,11 +1,12 @@
/*************************************************************************
-
-
Copyright (c) 1994 by Xerox Corporation. All rights reserved.
THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ Last modified on Sat Nov 19 19:31:14 PST 1994 by ellis
+ on Sat Jun 8 15:10:00 PST 1994 by boehm
+
Permission is hereby granted to copy this code for any purpose,
provided the above notices are retained on all copies.
@@ -18,16 +19,28 @@ You should ensure (using implementation-dependent techniques) that the
linker finds this module before the library that defines the default
built-in "new" and "delete".
+Authors: John R. Ellis and Jesse Hull
**************************************************************************/
+/* Boehm, December 20, 1994 7:26 pm PST */
-#include "gc_c++.h"
+#include "gc_cpp.h"
void* operator new( size_t size ) {
- return GC_MALLOC_UNCOLLECTABLE( size ); }
+ return GC_MALLOC_UNCOLLECTABLE( size );}
void operator delete( void* obj ) {
- return GC_FREE( obj ); }
+ GC_FREE( obj );}
+#ifdef OPERATOR_NEW_ARRAY
+
+void* operator new[]( size_t size ) {
+ return GC_MALLOC_UNCOLLECTABLE( size );}
+
+void operator delete[]( void* obj ) {
+ GC_FREE( obj );}
+
+#endif /* OPERATOR_NEW_ARRAY */
+
diff --git a/gc_cpp.h b/gc_cpp.h
new file mode 100644
index 00000000..812bb653
--- /dev/null
+++ b/gc_cpp.h
@@ -0,0 +1,285 @@
+#ifndef GC_CPP_H
+#define GC_CPP_H
+/****************************************************************************
+Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+
+THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+Permission is hereby granted to use or copy this program for any
+purpose, provided the above notices are retained on all copies.
+Permission to modify the code and to distribute modified code is
+granted, provided the above notices are retained, and a notice that
+the code was modified is included with the above copyright notice.
+****************************************************************************
+
+C++ Interface to the Boehm Collector
+
+ John R. Ellis and Jesse Hull
+ Last modified on Mon Jul 24 15:43:42 PDT 1995 by ellis
+
+This interface provides access to the Boehm collector. It provides
+basic facilities similar to those described in "Safe, Efficient
+Garbage Collection for C++", by John R. Elis and David L. Detlefs
+(ftp.parc.xerox.com:/pub/ellis/gc).
+
+All heap-allocated objects are either "collectable" or
+"uncollectable". Programs must explicitly delete uncollectable
+objects, whereas the garbage collector will automatically delete
+collectable objects when it discovers them to be inaccessible.
+Collectable objects may freely point at uncollectable objects and vice
+versa.
+
+Objects allocated with the built-in "::operator new" are uncollectable.
+
+Objects derived from class "gc" are collectable. For example:
+
+ class A: public gc {...};
+ A* a = new A; // a is collectable.
+
+Collectable instances of non-class types can be allocated using the GC
+placement:
+
+ typedef int A[ 10 ];
+ A* a = new (GC) A;
+
+Uncollectable instances of classes derived from "gc" can be allocated
+using the NoGC placement:
+
+ class A: public gc {...};
+ A* a = new (NoGC) A; // a is uncollectable.
+
+Both uncollectable and collectable objects can be explicitly deleted
+with "delete", which invokes an object's destructors and frees its
+storage immediately.
+
+A collectable object may have a clean-up function, which will be
+invoked when the collector discovers the object to be inaccessible.
+An object derived from "gc_cleanup" or containing a member derived
+from "gc_cleanup" has a default clean-up function that invokes the
+object's destructors. Explicit clean-up functions may be specified as
+an additional placement argument:
+
+ A* a = ::new (GC, MyCleanup) A;
+
+An object is considered "accessible" by the collector if it can be
+reached by a path of pointers from static variables, automatic
+variables of active functions, or from some object with clean-up
+enabled; pointers from an object to itself are ignored.
+
+Thus, if objects A and B both have clean-up functions, and A points at
+B, B is considered accessible. After A's clean-up is invoked and its
+storage released, B will then become inaccessible and will have its
+clean-up invoked. If A points at B and B points to A, forming a
+cycle, then that's considered a storage leak, and neither will be
+collectable. See the interface gc.h for low-level facilities for
+handling such cycles of objects with clean-up.
+
+The collector cannot guarrantee that it will find all inaccessible
+objects. In practice, it finds almost all of them.
+
+
+Cautions:
+
+1. Be sure the collector has been augmented with "make c++".
+
+2. If your compiler supports the new "operator new[]" syntax, then
+add -DOPERATOR_NEW_ARRAY to the Makefile.
+
+If your compiler doesn't support "operator new[]", beware that an
+array of type T, where T is derived from "gc", may or may not be
+allocated as a collectable object (it depends on the compiler). Use
+the explicit GC placement to make the array collectable. For example:
+
+ class A: public gc {...};
+ A* a1 = new A[ 10 ]; // collectable or uncollectable?
+ A* a2 = new (GC) A[ 10 ]; // collectable
+
+3. The destructors of collectable arrays of objects derived from
+"gc_cleanup" will not be invoked properly. For example:
+
+ class A: public gc_cleanup {...};
+ A* a = new (GC) A[ 10 ]; // destructors not invoked correctly
+
+Typically, only the destructor for the first element of the array will
+be invoked when the array is garbage-collected. To get all the
+destructors of any array executed, you must supply an explicit
+clean-up function:
+
+ A* a = new (GC, MyCleanUp) A[ 10 ];
+
+(Implementing clean-up of arrays correctly, portably, and in a way
+that preserves the correct exception semantics requires a language
+extension, e.g. the "gc" keyword.)
+
+4. Compiler bugs:
+
+* Solaris 2's CC (SC3.0) doesn't implement t->~T() correctly, so the
+destructors of classes derived from gc_cleanup won't be invoked.
+You'll have to explicitly register a clean-up function with
+new-placement syntax.
+
+* Evidently cfront 3.0 does not allow destructors to be explicitly
+invoked using the ANSI-conforming syntax t->~T(). If you're using
+cfront 3.0, you'll have to comment out the class gc_cleanup, which
+uses explicit invocation.
+
+****************************************************************************/
+
+#include "gc.h"
+
+#ifndef THINK_CPLUS
+#define _cdecl
+#endif
+
+#if ! defined( OPERATOR_NEW_ARRAY ) \
+ && (__BORLANDC__ >= 0x450 || (__GNUC__ >= 2 && __GNUC_MINOR__ >= 6))
+# define OPERATOR_NEW_ARRAY
+#endif
+
+enum GCPlacement {GC, NoGC};
+
+class gc {public:
+ inline void* operator new( size_t size );
+ inline void* operator new( size_t size, GCPlacement gcp );
+ inline void operator delete( void* obj );
+
+#ifdef OPERATOR_NEW_ARRAY
+ inline void* operator new[]( size_t size );
+ inline void* operator new[]( size_t size, GCPlacement gcp );
+ inline void operator delete[]( void* obj );
+#endif /* OPERATOR_NEW_ARRAY */
+ };
+ /*
+ Instances of classes derived from "gc" will be allocated in the
+ collected heap by default, unless an explicit NoGC placement is
+ specified. */
+
+class gc_cleanup: virtual public gc {public:
+ inline gc_cleanup();
+ inline virtual ~gc_cleanup();
+private:
+ inline static void _cdecl cleanup( void* obj, void* clientData );};
+ /*
+ Instances of classes derived from "gc_cleanup" will be allocated
+ in the collected heap by default. When the collector discovers an
+ inaccessible object derived from "gc_cleanup" or containing a
+ member derived from "gc_cleanup", its destructors will be
+ invoked. */
+
+extern "C" {typedef void (*GCCleanUpFunc)( void* obj, void* clientData );}
+
+inline void* operator new(
+ size_t size,
+ GCPlacement gcp,
+ GCCleanUpFunc cleanup = 0,
+ void* clientData = 0 );
+ /*
+ Allocates a collectable or uncollected object, according to the
+ value of "gcp".
+
+ For collectable objects, if "cleanup" is non-null, then when the
+ allocated object "obj" becomes inaccessible, the collector will
+ invoke the function "cleanup( obj, clientData )" but will not
+ invoke the object's destructors. It is an error to explicitly
+ delete an object allocated with a non-null "cleanup".
+
+ It is an error to specify a non-null "cleanup" with NoGC or for
+ classes derived from "gc_cleanup" or containing members derived
+ from "gc_cleanup". */
+
+#ifdef OPERATOR_NEW_ARRAY
+
+inline void* operator new[](
+ size_t size,
+ GCPlacement gcp,
+ GCCleanUpFunc cleanup = 0,
+ void* clientData = 0 );
+ /*
+ The operator new for arrays, identical to the above. */
+
+#endif /* OPERATOR_NEW_ARRAY */
+
+/****************************************************************************
+
+Inline implementation
+
+****************************************************************************/
+
+inline void* gc::operator new( size_t size ) {
+ return GC_MALLOC( size );}
+
+inline void* gc::operator new( size_t size, GCPlacement gcp ) {
+ if (gcp == GC)
+ return GC_MALLOC( size );
+ else
+ return GC_MALLOC_UNCOLLECTABLE( size );}
+
+inline void gc::operator delete( void* obj ) {
+ GC_FREE( obj );}
+
+
+#ifdef OPERATOR_NEW_ARRAY
+
+inline void* gc::operator new[]( size_t size ) {
+ return gc::operator new( size );}
+
+inline void* gc::operator new[]( size_t size, GCPlacement gcp ) {
+ return gc::operator new( size, gcp );}
+
+inline void gc::operator delete[]( void* obj ) {
+ gc::operator delete( obj );}
+
+#endif /* OPERATOR_NEW_ARRAY */
+
+
+inline gc_cleanup::~gc_cleanup() {
+ GC_REGISTER_FINALIZER_IGNORE_SELF( this, 0, 0, 0, 0 );}
+
+inline void gc_cleanup::cleanup( void* obj, void* displ ) {
+ ((gc_cleanup*) ((char*) obj + (ptrdiff_t) displ))->~gc_cleanup();}
+
+inline gc_cleanup::gc_cleanup() {
+ GC_finalization_proc oldProc;
+ void* oldData;
+ void* base = GC_base( (void *) this );
+ if (0 == base) return;
+ GC_REGISTER_FINALIZER_IGNORE_SELF(
+ base, cleanup, (void*) ((char*) this - (char*) base),
+ &oldProc, &oldData );
+ if (0 != oldProc) {
+ GC_REGISTER_FINALIZER_IGNORE_SELF( base, oldProc, oldData, 0, 0 );}}
+
+inline void* operator new(
+ size_t size,
+ GCPlacement gcp,
+ GCCleanUpFunc cleanup,
+ void* clientData )
+{
+ void* obj;
+
+ if (gcp == GC) {
+ obj = GC_MALLOC( size );
+ if (cleanup != 0)
+ GC_REGISTER_FINALIZER_IGNORE_SELF(
+ obj, cleanup, clientData, 0, 0 );}
+ else {
+ obj = GC_MALLOC_UNCOLLECTABLE( size );};
+ return obj;}
+
+
+#ifdef OPERATOR_NEW_ARRAY
+
+inline void* operator new[](
+ size_t size,
+ GCPlacement gcp,
+ GCCleanUpFunc cleanup,
+ void* clientData )
+{
+ return ::operator new( size, gcp, cleanup, clientData );}
+
+#endif /* OPERATOR_NEW_ARRAY */
+
+
+#endif /* GC_CPP_H */
+
diff --git a/gc_hdrs.h b/gc_hdrs.h
index c4fd5577..2f2d1bf9 100644
--- a/gc_hdrs.h
+++ b/gc_hdrs.h
@@ -11,7 +11,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:16 pm PDT */
+/* Boehm, July 11, 1995 11:54 am PDT */
# ifndef GC_HEADERS_H
# define GC_HEADERS_H
typedef struct hblkhdr hdr;
@@ -80,11 +80,12 @@ typedef struct bi {
# define MAX_JUMP (HBLKSIZE - 1)
+# define HDR_FROM_BI(bi, p) \
+ ((bi)->index[((word)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
# ifndef HASH_TL
# define BI(p) (GC_top_index \
[(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)])
-# define HDR_INNER(p) (BI(p)->index \
- [((word)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
+# define HDR_INNER(p) HDR_FROM_BI(BI(p),p)
# ifdef SMALL_CONFIG
# define HDR(p) GC_find_header((ptr_t)(p))
# else
@@ -104,7 +105,7 @@ typedef struct bi {
(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
\
- while (_bi -> key != hi && _bi != &GC_all_nils) \
+ while (_bi -> key != hi && _bi != GC_all_nils) \
_bi = _bi -> hash_link; \
(bottom_indx) = _bi; \
}
@@ -113,8 +114,7 @@ typedef struct bi {
register bottom_index * bi; \
\
GET_BI(p, bi); \
- (ha) = &(bi->index[((unsigned long)(p)>>LOG_HBLKSIZE) \
- & (BOTTOM_SZ - 1)]); \
+ (ha) = &(HDR_FROM_BI(bi, p)); \
}
# define GET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
(hhdr) = *_ha; }
diff --git a/gc_mark.h b/gc_mark.h
index b1a7c37e..e01cf46a 100644
--- a/gc_mark.h
+++ b/gc_mark.h
@@ -11,7 +11,7 @@
* modified is included with the above copyright notice.
*
*/
-/* Boehm, May 19, 1994 2:15 pm PDT */
+/* Boehm, November 7, 1994 4:56 pm PST */
/*
* Declarations of mark stack. Needed by marker and client supplied mark
@@ -21,7 +21,6 @@
# define GC_MARK_H
/* A client supplied mark procedure. Returns new mark stack pointer. */
-/* Not currently used for predefined object kinds. */
/* Primary effect should be to push new entries on the mark stack. */
/* Mark stack pointer values are passed and returned explicitly. */
/* Global variables decribing mark stack are not necessarily valid. */
@@ -35,7 +34,9 @@
/* overflows. */
/* This procedure is always called with at least one empty entry on the */
/* mark stack. */
-/* Boehm, March 15, 1994 2:38 pm PST */
+/* Currently we require that mark procedures look for pointers in a */
+/* subset of the places the conservative marker would. It must be safe */
+/* to invoke the normal mark procedure instead. */
# define PROC_BYTES 100
typedef struct ms_entry * (*mark_proc)(/* word * addr, mark_stack_ptr,
mark_stack_limit, env */);
@@ -160,6 +161,36 @@ mse * GC_signal_mark_stack_overflow();
mark_stack_top, mark_stack_limit) \
}
+/*
+ * Push a single value onto mark stack. Mark from the object pointed to by p.
+ * GC_push_one is normally called by GC_push_regs, and thus must be defined.
+ * P is considered valid even if it is an interior pointer.
+ * Previously marked objects are not pushed. Hence we make progress even
+ * if the mark stack overflows.
+ */
+# define GC_PUSH_ONE_STACK(p) \
+ if ((ptr_t)(p) >= GC_least_plausible_heap_addr \
+ && (ptr_t)(p) < GC_greatest_plausible_heap_addr) { \
+ GC_push_one_checked(p,TRUE); \
+ }
+
+/*
+ * As above, but interior pointer recognition as for
+ * normal for heap pointers.
+ */
+# ifdef ALL_INTERIOR_POINTERS
+# define AIP TRUE
+# else
+# define AIP FALSE
+# endif
+# define GC_PUSH_ONE_HEAP(p) \
+ if ((ptr_t)(p) >= GC_least_plausible_heap_addr \
+ && (ptr_t)(p) < GC_greatest_plausible_heap_addr) { \
+ GC_push_one_checked(p,AIP); \
+ }
+
+
+
extern bool GC_mark_stack_too_small;
/* We need a larger mark stack. May be */
/* set by client supplied mark routines.*/
diff --git a/gc_priv.h b/gc_priv.h
index 501e6f3b..357a390d 100644
--- a/gc_priv.h
+++ b/gc_priv.h
@@ -11,12 +11,29 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:17 pm PDT */
+/* Boehm, August 9, 1995 5:49 pm PDT */
# ifndef GC_PRIVATE_H
# define GC_PRIVATE_H
+#if defined(mips) && defined(SYSTYPE_BSD) && defined(sony_news)
+ /* sony RISC NEWS, NEWSOS 4 */
+# define BSD_TIME
+ typedef long ptrdiff_t;
+#endif
+
+#if defined(mips) && defined(SYSTYPE_BSD43)
+ /* MIPS RISCOS 4 */
+# define BSD_TIME
+#endif
+
+#ifdef BSD_TIME
+# include <sys/types.h>
+# include <sys/time.h>
+# include <sys/resource.h>
+#endif /* BSD_TIME */
+
# ifndef GC_H
# include "gc.h"
# endif
@@ -39,8 +56,8 @@ typedef GC_signed_word signed_word;
# define FALSE 0
typedef char * ptr_t; /* A generic pointer to which we can add */
- /* byte displacments. */
- /* Prefereably identical to caddr_t, if it */
+ /* byte displacements. */
+ /* Preferably identical to caddr_t, if it */
/* exists. */
#if defined(__STDC__)
@@ -48,13 +65,11 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# if !(defined( sony_news ) )
# include <stddef.h>
# endif
- typedef void * extern_ptr_t;
# define VOLATILE volatile
#else
# ifdef MSWIN32
# include <stdlib.h>
# endif
- typedef char * extern_ptr_t;
# define VOLATILE
#endif
@@ -151,7 +166,7 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# define THREADS
# endif
-#ifdef SPARC
+#if defined(SPARC)
# define ALIGN_DOUBLE /* Align objects of size > 1 word on 2 word */
/* boundaries. Wasteful of memory, but */
/* apparently required by SPARC architecture. */
@@ -159,6 +174,10 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
/* include assembly code to do it well. */
#endif
+#ifdef HP_PA
+# define ALIGN_DOUBLE
+#endif
+
#define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */
/* free lists are actually maintained. This applies */
/* only to the top level routines in misc.c, not to */
@@ -180,39 +199,102 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
#endif
-# define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
-# define MAXHINCR 512 /* Maximum heap increment, in blocks */
+# ifndef LARGE_CONFIG
+# define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
+# define MAXHINCR 512 /* Maximum heap increment, in blocks */
+# else
+# define MINHINCR 64
+# define MAXHINCR 4096
+# endif
# define TIME_LIMIT 50 /* We try to keep pause times from exceeding */
/* this by much. In milliseconds. */
+# define BL_LIMIT GC_black_list_spacing
+ /* If we need a block of N bytes, and we have */
+ /* a block of N + BL_LIMIT bytes available, */
+ /* and N > BL_LIMIT, */
+ /* but all possible positions in it are */
+ /* blacklisted, we just use it anyway (and */
+ /* print a warning, if warnings are enabled). */
+ /* This risks subsequently leaking the block */
+ /* due to a false reference. But not using */
+ /* the block risks unreasonable immediate */
+ /* heap growth. */
+
/*********************************/
/* */
-/* OS interface routines */
+/* Stack saving for debugging */
/* */
/*********************************/
-#include <time.h>
-#if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4)
- clock_t clock(); /* Not in time.h, where it belongs */
+/*
+ * Number of frames and arguments to save in objects allocated by
+ * debugging allocator.
+ */
+# define NFRAMES 6 /* Number of frames to save. Even for */
+ /* alignment reasons. */
+# define NARGS 2 /* Mumber of arguments to save for each call. */
+
+
+#ifdef SAVE_CALL_CHAIN
+ struct callinfo {
+ word ci_pc;
+ word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
+ };
+
+/* Fill in the pc and argument information for up to NFRAMES of my */
+/* callers. Ignore my frame and my callers frame. */
+void GC_save_callers (/* struct callinfo info[NFRAMES] */);
+
+void GC_print_callers (/* struct callinfo info[NFRAMES] */);
+
#endif
-#if !defined(CLOCKS_PER_SEC)
-# define CLOCKS_PER_SEC 1000000
+
+
+/*********************************/
+/* */
+/* OS interface routines */
+/* */
+/*********************************/
+
+#ifdef BSD_TIME
+# undef CLOCK_TYPE
+# undef GET_TIME
+# undef MS_TIME_DIFF
+# define CLOCK_TYPE struct timeval
+# define GET_TIME(x) { struct rusage rusage; \
+ getrusage (RUSAGE_SELF, &rusage); \
+ x = rusage.ru_utime; }
+# define MS_TIME_DIFF(a,b) ((double) (a.tv_sec - b.tv_sec) * 1000.0 \
+ + (double) (a.tv_usec - b.tv_usec) / 1000.0)
+#else /* !BSD_TIME */
+# include <time.h>
+# if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4)
+ clock_t clock(); /* Not in time.h, where it belongs */
+# endif
+# if defined(FREEBSD) && !defined(CLOCKS_PER_SEC)
+# include <machine/limits.h>
+# define CLOCKS_PER_SEC CLK_TCK
+# endif
+# if !defined(CLOCKS_PER_SEC)
+# define CLOCKS_PER_SEC 1000000
/*
* This is technically a bug in the implementation. ANSI requires that
* CLOCKS_PER_SEC be defined. But at least under SunOS4.1.1, it isn't.
* Also note that the combination of ANSI C and POSIX is incredibly gross
* here. The type clock_t is used by both clock() and times(). But on
- * some machines thes use different notions of a clock tick, CLOCKS_PER_SEC
+ * some machines these use different notions of a clock tick, CLOCKS_PER_SEC
* seems to apply only to clock. Hence we use it here. On many machines,
* including SunOS, clock actually uses units of microseconds (which are
* not really clock ticks).
*/
-#endif
-#define CLOCK_TYPE clock_t
-#define GET_TIME(x) x = clock()
-#define MS_TIME_DIFF(a,b) ((unsigned long) \
+# endif
+# define CLOCK_TYPE clock_t
+# define GET_TIME(x) x = clock()
+# define MS_TIME_DIFF(a,b) ((unsigned long) \
(1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC))
+#endif /* !BSD_TIME */
/* We use bzero and bcopy internally. They may not be available. */
# if defined(SPARC) && defined(SUNOS4)
@@ -254,15 +336,28 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
+ HBLKSIZE-1)
# else
# if defined(AMIGA) || defined(NEXT)
-# define GET_MEM(bytes) HBLKPTR(calloc(1, (size_t)bytes + HBLKSIZE) \
- + HBLKSIZE-1)
+# define GET_MEM(bytes) HBLKPTR((size_t) \
+ calloc(1, (size_t)bytes + HBLKSIZE) \
+ + HBLKSIZE-1)
# else
# ifdef MSWIN32
extern ptr_t GC_win32_get_mem();
# define GET_MEM(bytes) (struct hblk *)GC_win32_get_mem(bytes)
# else
- extern ptr_t GC_unix_get_mem();
-# define GET_MEM(bytes) (struct hblk *)GC_unix_get_mem(bytes)
+# ifdef MACOS
+# if defined(USE_TEMPORARY_MEMORY)
+ extern Ptr GC_MacTemporaryNewPtr(size_t size,
+ Boolean clearMemory);
+# define GET_MEM(bytes) HBLKPTR( \
+ GC_MacTemporaryNewPtr(bytes + HBLKSIZE, true) + HBLKSIZE-1)
+# else
+# define GET_MEM(bytes) HBLKPTR( \
+ NewPtrClear(bytes + HBLKSIZE) + HBLKSIZE-1)
+# endif
+# else
+ extern ptr_t GC_unix_get_mem();
+# define GET_MEM(bytes) (struct hblk *)GC_unix_get_mem(bytes)
+# endif
# endif
# endif
# endif
@@ -302,8 +397,8 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
# include <base/PCR_Base.h>
# include <th/PCR_Th.h>
extern PCR_Th_ML GC_allocate_ml;
-# define DCL_LOCK_STATE PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mas
-k
+# define DCL_LOCK_STATE \
+ PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
# define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
@@ -349,9 +444,10 @@ k
# define ENABLE_SIGNALS() \
PCR_Th_SetSigMask(&GC_old_sig_mask, NIL)
# else
-# if defined(SRC_M3) || defined(AMIGA) || defined(SOLARIS_THREADS) || defined(MSWIN32)
- /* Also useful for debugging, and unusually */
- /* correct client code. */
+# if defined(SRC_M3) || defined(AMIGA) || defined(SOLARIS_THREADS) \
+ || defined(MSWIN32) || defined(MACOS) || defined(DJGPP) \
+ || defined(NO_SIGNALS)
+ /* Also useful for debugging. */
/* Should probably use thr_sigsetmask for SOLARIS_THREADS. */
# define DISABLE_SIGNALS()
# define ENABLE_SIGNALS()
@@ -388,7 +484,6 @@ k
/* Abandon ship */
# ifdef PCR
- void PCR_Base_Panic(const char *fmt, ...);
# define ABORT(s) PCR_Base_Panic(s)
# else
# ifdef SMALL_CONFIG
@@ -401,14 +496,14 @@ k
/* Exit abnormally, but without making a mess (e.g. out of memory) */
# ifdef PCR
- void PCR_Base_Exit(int status);
-# define EXIT() PCR_Base_Exit(1)
+# define EXIT() PCR_Base_Exit(1,PCR_waitForever)
# else
# define EXIT() (void)exit(1)
# endif
/* Print warning message, e.g. almost out of memory. */
-# define WARN(s) GC_printf0(s)
+# define WARN(msg,arg) (*GC_current_warn_proc)(msg, (GC_word)(arg))
+extern GC_warn_proc GC_current_warn_proc;
/*********************************/
/* */
@@ -420,14 +515,20 @@ k
# define WORDS_TO_BYTES(x) ((x)<<2)
# define BYTES_TO_WORDS(x) ((x)>>2)
# define LOGWL ((word)5) /* log[2] of CPP_WORDSZ */
-# define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */
+# define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */
+# if ALIGNMENT != 4
+# define UNALIGNED
+# endif
#endif
#if CPP_WORDSZ == 64
# define WORDS_TO_BYTES(x) ((x)<<3)
# define BYTES_TO_WORDS(x) ((x)>>3)
# define LOGWL ((word)6) /* log[2] of CPP_WORDSZ */
-# define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */
+# define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */
+# if ALIGNMENT != 8
+# define UNALIGNED
+# endif
#endif
#define WORDSZ ((word)CPP_WORDSZ)
@@ -483,10 +584,21 @@ k
/* Round up byte allocation requests to integral number of words, etc. */
# ifdef ADD_BYTE_AT_END
# define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1))
+# ifdef ALIGN_DOUBLE
+# define ALIGNED_WORDS(n) (BYTES_TO_WORDS((n) + WORDS_TO_BYTES(2)) & ~1)
+# else
+# define ALIGNED_WORDS(n) ROUNDED_UP_WORDS(n)
+# endif
# define SMALL_OBJ(bytes) ((bytes) < WORDS_TO_BYTES(MAXOBJSZ))
# define ADD_SLOP(bytes) ((bytes)+1)
# else
# define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1))
+# ifdef ALIGN_DOUBLE
+# define ALIGNED_WORDS(n) \
+ (BYTES_TO_WORDS((n) + WORDS_TO_BYTES(2) - 1) & ~1)
+# else
+# define ALIGNED_WORDS(n) ROUNDED_UP_WORDS(n)
+# endif
# define SMALL_OBJ(bytes) ((bytes) <= WORDS_TO_BYTES(MAXOBJSZ))
# define ADD_SLOP(bytes) (bytes)
# endif
@@ -498,9 +610,13 @@ k
* Used by black-listing code, and perhaps by dirty bit maintenance code.
*/
-# define LOG_PHT_ENTRIES 14 /* Collisions are likely if heap grows */
+# ifdef LARGE_CONFIG
+# define LOG_PHT_ENTRIES 17
+# else
+# define LOG_PHT_ENTRIES 14 /* Collisions are likely if heap grows */
/* to more than 16K hblks = 64MB. */
/* Each hash table occupies 2K bytes. */
+# endif
# define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES)
# define PHT_SIZE (PHT_ENTRIES >> LOGWL)
typedef word page_hash_table[PHT_SIZE];
@@ -609,27 +725,29 @@ struct hblk {
/* single load of a base register will do. */
/* Scalars that could easily appear to */
/* be pointers are also put here. */
+/* The main fields should precede any */
+/* conditionally included fields, so that */
+/* gc_inl.h will work even if a different set */
+/* of macros is defined when the client is */
+/* compiled. */
struct _GC_arrays {
word _heapsize;
+ word _max_heapsize;
ptr_t _last_heap_addr;
ptr_t _prev_heap_addr;
word _words_allocd_before_gc;
/* Number of words allocated before this */
/* collection cycle. */
-# ifdef GATHERSTATS
- word _composite_in_use;
- /* Number of words in accessible composite */
- /* objects. */
- word _atomic_in_use;
- /* Number of words in accessible atomic */
- /* objects. */
-# endif
word _words_allocd;
/* Number of words allocated during this collection cycle */
word _words_wasted;
/* Number of words wasted due to internal fragmentation */
/* in large objects allocated since last gc. Approximate.*/
+ word _words_finalized;
+ /* Approximate number of words in objects (and headers) */
+ /* That became ready for finalization in the last */
+ /* collection. */
word _non_gc_bytes_at_gc;
/* Number of explicitly managed bytes of storage */
/* at last collection. */
@@ -639,24 +757,33 @@ struct _GC_arrays {
ptr_t _objfreelist[MAXOBJSZ+1];
/* free list for objects */
-# ifdef MERGE_SIZES
- unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
- /* Number of words to allocate for a given allocation request in */
- /* bytes. */
-# endif
ptr_t _aobjfreelist[MAXOBJSZ+1];
/* free list for atomic objs */
ptr_t _uobjfreelist[MAXOBJSZ+1];
/* uncollectable but traced objs */
+# ifdef GATHERSTATS
+ word _composite_in_use;
+ /* Number of words in accessible composite */
+ /* objects. */
+ word _atomic_in_use;
+ /* Number of words in accessible atomic */
+ /* objects. */
+# endif
+# ifdef MERGE_SIZES
+ unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
+ /* Number of words to allocate for a given allocation request in */
+ /* bytes. */
+# endif
+
# ifdef STUBBORN_ALLOC
ptr_t _sobjfreelist[MAXOBJSZ+1];
# endif
/* free list for immutable objects */
ptr_t _obj_map[MAXOBJSZ+1];
/* If not NIL, then a pointer to a map of valid */
- /* object addresses. hbh_map[sz][i] is j if the */
+ /* object addresses. _obj_map[sz][i] is j if the */
/* address block_start+i is a valid pointer */
/* to an object at */
/* block_start+i&~3 - WORDS_TO_BYTES(j). */
@@ -668,7 +795,7 @@ struct _GC_arrays {
/* It is OBJ_INVALID if */
/* block_start+WORDS_TO_BYTES(i) is not */
/* valid as a pointer to an object. */
- /* We assume that all values of j <= OBJ_INVALID */
+ /* We assume all values of j <= OBJ_INVALID. */
/* The zeroth entry corresponds to large objects.*/
# ifdef ALL_INTERIOR_POINTERS
# define map_entry_type short
@@ -701,11 +828,7 @@ struct _GC_arrays {
/* GC_valid_offsets[i] ==> */
/* GC_modws_valid_offsets[i%sizeof(word)] */
# endif
- struct hblk * _reclaim_list[MAXOBJSZ+1];
- struct hblk * _areclaim_list[MAXOBJSZ+1];
- struct hblk * _ureclaim_list[MAXOBJSZ+1];
# ifdef STUBBORN_ALLOC
- struct hblk * _sreclaim_list[MAXOBJSZ+1];
page_hash_table _changed_pages;
/* Stubborn object pages that were changes since last call to */
/* GC_read_changed. */
@@ -717,7 +840,15 @@ struct _GC_arrays {
page_hash_table _grungy_pages; /* Pages that were dirty at last */
/* GC_read_dirty. */
# endif
-# define MAX_HEAP_SECTS 256 /* Separately added heap sections. */
+# ifdef LARGE_CONFIG
+# if CPP_WORDSZ > 32
+# define MAX_HEAP_SECTS 4096 /* overflows at roughly 64 GB */
+# else
+# define MAX_HEAP_SECTS 768 /* Separately added heap sections. */
+# endif
+# else
+# define MAX_HEAP_SECTS 256
+# endif
struct HeapSect {
ptr_t hs_start; word hs_bytes;
} _heap_sects[MAX_HEAP_SECTS];
@@ -726,8 +857,16 @@ struct _GC_arrays {
/* Start address of memory regions obtained from kernel. */
# endif
/* Block header index; see gc_headers.h */
- bottom_index _all_nils;
+ bottom_index * _all_nils;
bottom_index * _top_index [TOP_SZ];
+#ifdef SAVE_CALL_CHAIN
+ struct callinfo _last_stack[NFRAMES]; /* Stack at last garbage collection.*/
+ /* Useful for debugging mysterious */
+ /* object disappearances. */
+ /* In the multithreaded case, we */
+ /* currently only save the calling */
+ /* stack. */
+#endif
};
extern GC_FAR struct _GC_arrays GC_arrays;
@@ -738,11 +877,7 @@ extern GC_FAR struct _GC_arrays GC_arrays;
# define GC_sobjfreelist GC_arrays._sobjfreelist
# define GC_valid_offsets GC_arrays._valid_offsets
# define GC_modws_valid_offsets GC_arrays._modws_valid_offsets
-# define GC_reclaim_list GC_arrays._reclaim_list
-# define GC_areclaim_list GC_arrays._areclaim_list
-# define GC_ureclaim_list GC_arrays._ureclaim_list
# ifdef STUBBORN_ALLOC
-# define GC_sreclaim_list GC_arrays._sreclaim_list
# define GC_changed_pages GC_arrays._changed_pages
# define GC_prev_changed_pages GC_arrays._prev_changed_pages
# endif
@@ -751,11 +886,14 @@ extern GC_FAR struct _GC_arrays GC_arrays;
# define GC_prev_heap_addr GC_arrays._prev_heap_addr
# define GC_words_allocd GC_arrays._words_allocd
# define GC_words_wasted GC_arrays._words_wasted
+# define GC_words_finalized GC_arrays._words_finalized
# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
# define GC_mem_freed GC_arrays._mem_freed
# define GC_heapsize GC_arrays._heapsize
+# define GC_max_heapsize GC_arrays._max_heapsize
# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
# define GC_heap_sects GC_arrays._heap_sects
+# define GC_last_stack GC_arrays._last_stack
# ifdef MSWIN32
# define GC_heap_bases GC_arrays._heap_bases
# endif
@@ -809,6 +947,13 @@ extern word GC_n_heap_sects; /* Number of separately added heap */
extern word GC_n_heap_bases; /* See GC_heap_bases. */
# endif
+extern word GC_total_black_listed;
+ /* Number of bytes on stack blacklist. */
+
+extern word GC_black_list_spacing;
+ /* Average number of bytes between blacklisted */
+ /* blocks. Approximate. */
+
extern char * GC_invalid_map;
/* Pointer to the nowhere valid hblk map */
/* Blocks pointing to this map are free. */
@@ -881,6 +1026,11 @@ void GC_apply_to_all_blocks(/*fn, client_data*/);
struct hblk * GC_next_block(/* struct hblk * h */);
void GC_mark_init();
void GC_clear_marks(); /* Clear mark bits for all heap objects. */
+void GC_invalidate_mark_state(); /* Tell the marker that marked */
+ /* objects may point to unmarked */
+ /* ones, and roots may point to */
+ /* unmarked objects. */
+ /* Reset mark stack. */
void GC_mark_from_mark_stack(); /* Mark from everything on the mark stack. */
/* Return after about one pages worth of */
/* work. */
@@ -937,6 +1087,9 @@ bool GC_stopped_mark(); /* Stop world and mark from all roots */
/* and rescuers. */
void GC_clear_hdr_marks(/* hhdr */); /* Clear the mark bits in a header */
void GC_add_roots_inner();
+bool GC_is_static_root(/* ptr_t p */);
+ /* Is the address p in one of the registered static */
+ /* root sections? */
void GC_register_dynamic_libraries();
/* Add dynamic library data sections to the root set. */
@@ -964,6 +1117,14 @@ struct hblk * GC_is_black_listed(/* h, len */);
/* these false references. */
void GC_promote_black_lists();
/* Declare an end to a black listing phase. */
+void GC_unpromote_black_lists();
+ /* Approximately undo the effect of the above. */
+ /* This actually loses some information, but */
+ /* only in a reasonably safe way. */
+word GC_number_stack_black_listed(/*struct hblk *start, struct hblk *endp1 */);
+ /* Return the number of (stack) blacklisted */
+ /* blocks in the range for statistical */
+ /* purposes. */
ptr_t GC_scratch_alloc(/*bytes*/);
/* GC internal memory allocation for */
@@ -1016,21 +1177,29 @@ void GC_reclaim_or_delete_all();
/* Arrange for all reclaim lists to be */
/* empty. Judiciously choose between */
/* sweeping and discarding each page. */
+bool GC_reclaim_all(/* GC_stop_func f*/);
+ /* Reclaim all blocks. Abort (in a */
+ /* consistent state) if f returns TRUE. */
bool GC_block_empty(/* hhdr */); /* Block completely unmarked? */
-void GC_gcollect_inner();
+bool GC_never_stop_func(); /* Returns FALSE. */
+bool GC_try_to_collect_inner(/* GC_stop_func f */);
/* Collect; caller must have acquired */
/* lock and disabled signals. */
- /* FALSE return indicates nothing was */
- /* done due to insufficient allocation. */
+ /* Collection is aborted if f returns */
+ /* TRUE. Returns TRUE if it completes */
+ /* successfully. */
+# define GC_gcollect_inner() \
+ (void) GC_try_to_collect_inner(GC_never_stop_func)
void GC_finish_collection(); /* Finish collection. Mark bits are */
/* consistent and lock is still held. */
bool GC_collect_or_expand(/* needed_blocks */);
/* Collect or expand heap in an attempt */
/* make the indicated number of free */
/* blocks available. Should be called */
+ /* until the blocks are available or */
/* until it fails by returning FALSE. */
void GC_init(); /* Initialize collector. */
-void GC_collect_a_little(/* n */);
+void GC_collect_a_little_inner(/* int n */);
/* Do n units worth of garbage */
/* collection work, if appropriate. */
/* A unit is an amount appropriate for */
@@ -1050,7 +1219,7 @@ ptr_t GC_generic_malloc_words_small(/*words, kind*/);
/* As above, but size in units of words */
/* Bypasses MERGE_SIZES. Assumes */
/* words <= MAXOBJSZ. */
-ptr_t GC_malloc_ignore_off_page_inner(/* bytes */);
+ptr_t GC_generic_malloc_inner_ignore_off_page(/* bytes, kind */);
/* Allocate an object, where */
/* the client guarantees that there */
/* will always be a pointer to the */
@@ -1123,12 +1292,15 @@ void GC_stubborn_init();
/* Debugging print routines: */
void GC_print_block_list();
void GC_print_hblkfreelist();
+void GC_print_heap_sects();
+void GC_print_static_roots();
+void GC_dump();
/* Make arguments appear live to compiler */
void GC_noop();
/* Logging and diagnostic output: */
-void GC_printf(/* format, a, b, c, d, e, f */);
+void GC_printf GC_PROTO((char * format, long, long, long, long, long, long));
/* A version of printf that doesn't allocate, */
/* is restricted to long arguments, and */
/* (unfortunately) doesn't use varargs for */
diff --git a/gcc_support.c b/gcc_support.c
new file mode 100644
index 00000000..e8a7b820
--- /dev/null
+++ b/gcc_support.c
@@ -0,0 +1,516 @@
+/***************************************************************************
+
+Interface between g++ and Boehm GC
+
+ Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
+
+ THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+ Permission is hereby granted to copy this code for any purpose,
+ provided the above notices are retained on all copies.
+
+ Last modified on Sun Jul 16 23:21:14 PDT 1995 by ellis
+
+This module provides runtime support for implementing the
+Ellis/Detlefs GC proposal, "Safe, Efficient Garbage Collection for
+C++", within g++, using its -fgc-keyword extension. It defines
+versions of __builtin_new, __builtin_new_gc, __builtin_vec_new,
+__builtin_vec_new_gc, __builtin_delete, and __builtin_vec_delete that
+invoke the Bohem GC. It also implements the WeakPointer.h interface.
+
+This module assumes the following configuration options of the Boehm GC:
+
+ -DALL_INTERIOR_POINTERS
+ -DDONT_ADD_BYTE_AT_END
+
+This module adds its own required padding to the end of objects to
+support C/C++ "one-past-the-object" pointer semantics.
+
+****************************************************************************/
+
+#include <stddef.h>
+#include "gc.h"
+
+#if defined(__STDC__)
+# define PROTO( args ) args
+#else
+# define PROTO( args ) ()
+# endif
+
+#define BITSPERBYTE 8
+ /* What's the portable way to do this? */
+
+
+typedef void (*vfp) PROTO(( void ));
+extern vfp __new_handler;
+extern void __default_new_handler PROTO(( void ));
+
+
+/* A destructor_proc is the compiler generated procedure representing a
+C++ destructor. The "flag" argument is a hidden argument following some
+compiler convention. */
+
+typedef (*destructor_proc) PROTO(( void* this, int flag ));
+
+
+/***************************************************************************
+
+A BI_header is the header the compiler adds to the front of
+new-allocated arrays of objects with destructors. The header is
+padded out to a double, because that's what the compiler does to
+ensure proper alignment of array elements on some architectures.
+
+int NUM_ARRAY_ELEMENTS (void* o)
+ returns the number of array elements for array object o.
+
+char* FIRST_ELEMENT_P (void* o)
+ returns the address of the first element of array object o.
+
+***************************************************************************/
+
+typedef struct BI_header {
+ int nelts;
+ char padding [sizeof( double ) - sizeof( int )];
+ /* Better way to do this? */
+} BI_header;
+
+#define NUM_ARRAY_ELEMENTS( o ) \
+ (((BI_header*) o)->nelts)
+
+#define FIRST_ELEMENT_P( o ) \
+ ((char*) o + sizeof( BI_header ))
+
+
+/***************************************************************************
+
+The __builtin_new routines add a descriptor word to the end of each
+object. The descriptor serves two purposes.
+
+First, the descriptor acts as padding, implementing C/C++ pointer
+semantics. C and C++ allow a valid array pointer to be incremented
+one past the end of an object. The extra padding ensures that the
+collector will recognize that such a pointer points to the object and
+not the next object in memory.
+
+Second, the descriptor stores three extra pieces of information,
+whether an object has a registered finalizer (destructor), whether it
+may have any weak pointers referencing it, and for collectible arrays,
+the element size of the array. The element size is required for the
+array's finalizer to iterate through the elements of the array. (An
+alternative design would have the compiler generate a finalizer
+procedure for each different array type. But given the overhead of
+finalization, there isn't any efficiency to be gained by that.)
+
+The descriptor must be added to non-collectible as well as collectible
+objects, since the Ellis/Detlefs proposal allows "pointer to gc T" to
+be assigned to a "pointer to T", which could then be deleted. Thus,
+__builtin_delete must determine at runtime whether an object is
+collectible, whether it has weak pointers referencing it, and whether
+it may have a finalizer that needs unregistering. Though
+GC_REGISTER_FINALIZER doesn't care if you ask it to unregister a
+finalizer for an object that doesn't have one, it is a non-trivial
+procedure that does a hash look-up, etc. The descriptor trades a
+little extra space for a significant increase in time on the fast path
+through delete. (A similar argument applies to
+GC_UNREGISTER_DISAPPEARING_LINK).
+
+For non-array types, the space for the descriptor could be shrunk to a
+single byte for storing the "has finalizer" flag. But this would save
+space only on arrays of char (whose size is not a multiple of the word
+size) and structs whose largest member is less than a word in size
+(very infrequent). And it would require that programmers actually
+remember to call "delete[]" instead of "delete" (which they should,
+but there are probably lots of buggy programs out there). For the
+moment, the space savings seems not worthwhile, especially considering
+that the Boehm GC is already quite space competitive with other
+malloc's.
+
+
+Given a pointer o to the base of an object:
+
+Descriptor* DESCRIPTOR (void* o)
+ returns a pointer to the descriptor for o.
+
+The implementation of descriptors relies on the fact that the GC
+implementation allocates objects in units of the machine's natural
+word size (e.g. 32 bits on a SPARC, 64 bits on an Alpha).
+
+**************************************************************************/
+
+typedef struct Descriptor {
+ unsigned has_weak_pointers: 1;
+ unsigned has_finalizer: 1;
+ unsigned element_size: BITSPERBYTE * sizeof( unsigned ) - 2;
+} Descriptor;
+
+#define DESCRIPTOR( o ) \
+ ((Descriptor*) ((char*)(o) + GC_size( o ) - sizeof( Descriptor )))
+
+
+/**************************************************************************
+
+Implementations of global operator new() and operator delete()
+
+***************************************************************************/
+
+
+void* __builtin_new( size )
+ size_t size;
+ /*
+ For non-gc non-array types, the compiler generates calls to
+ __builtin_new, which allocates non-collected storage via
+ GC_MALLOC_UNCOLLECTABLE. This ensures that the non-collected
+ storage will be part of the collector's root set, required by the
+ Ellis/Detlefs semantics. */
+{
+ vfp handler = __new_handler ? __new_handler : __default_new_handler;
+
+ while (1) {
+ void* o = GC_MALLOC_UNCOLLECTABLE( size + sizeof( Descriptor ) );
+ if (o != 0) return o;
+ (*handler) ();}}
+
+
+void* __builtin_vec_new( size )
+ size_t size;
+ /*
+ For non-gc array types, the compiler generates calls to
+ __builtin_vec_new. */
+{
+ return __builtin_new( size );}
+
+
+void* __builtin_new_gc( size )
+ size_t size;
+ /*
+ For gc non-array types, the compiler generates calls to
+ __builtin_new_gc, which allocates collected storage via
+ GC_MALLOC. */
+{
+ vfp handler = __new_handler ? __new_handler : __default_new_handler;
+
+ while (1) {
+ void* o = GC_MALLOC( size + sizeof( Descriptor ) );
+ if (o != 0) return o;
+ (*handler) ();}}
+
+
+void* __builtin_new_gc_a( size )
+ size_t size;
+ /*
+ For non-pointer-containing gc non-array types, the compiler
+ generates calls to __builtin_new_gc_a, which allocates collected
+ storage via GC_MALLOC_ATOMIC. */
+{
+ vfp handler = __new_handler ? __new_handler : __default_new_handler;
+
+ while (1) {
+ void* o = GC_MALLOC_ATOMIC( size + sizeof( Descriptor ) );
+ if (o != 0) return o;
+ (*handler) ();}}
+
+
+void* __builtin_vec_new_gc( size )
+ size_t size;
+ /*
+ For gc array types, the compiler generates calls to
+ __builtin_vec_new_gc. */
+{
+ return __builtin_new_gc( size );}
+
+
+void* __builtin_vec_new_gc_a( size )
+ size_t size;
+ /*
+ For non-pointer-containing gc array types, the compiler generates
+ calls to __builtin_vec_new_gc_a. */
+{
+ return __builtin_new_gc_a( size );}
+
+
+static void call_destructor( o, data )
+ void* o;
+ void* data;
+ /*
+ call_destructor is the GC finalizer proc registered for non-array
+ gc objects with destructors. Its client data is the destructor
+ proc, which it calls with the magic integer 2, a special flag
+ obeying the compiler convention for destructors. */
+{
+ ((destructor_proc) data)( o, 2 );}
+
+
+void* __builtin_new_gc_dtor( o, d )
+ void* o;
+ destructor_proc d;
+ /*
+ The compiler generates a call to __builtin_new_gc_dtor to register
+ the destructor "d" of a non-array gc object "o" as a GC finalizer.
+ The destructor is registered via
+ GC_REGISTER_FINALIZER_IGNORE_SELF, which causes the collector to
+ ignore pointers from the object to itself when determining when
+ the object can be finalized. This is necessary due to the self
+ pointers used in the internal representation of multiply-inherited
+ objects. */
+{
+ Descriptor* desc = DESCRIPTOR( o );
+
+ GC_REGISTER_FINALIZER_IGNORE_SELF( o, call_destructor, d, 0, 0 );
+ desc->has_finalizer = 1;}
+
+
+static void call_array_destructor( o, data )
+ void* o;
+ void* data;
+ /*
+ call_array_destructor is the GC finalizer proc registered for gc
+ array objects whose elements have destructors. Its client data is
+ the destructor proc. It iterates through the elements of the
+ array in reverse order, calling the destructor on each. */
+{
+ int num = NUM_ARRAY_ELEMENTS( o );
+ Descriptor* desc = DESCRIPTOR( o );
+ size_t size = desc->element_size;
+ char* first_p = FIRST_ELEMENT_P( o );
+ char* p = first_p + (num - 1) * size;
+
+ if (num > 0) {
+ while (1) {
+ ((destructor_proc) data)( p, 2 );
+ if (p == first_p) break;
+ p -= size;}}}
+
+
+void* __builtin_vec_new_gc_dtor( first_elem, d, element_size )
+ void* first_elem;
+ destructor_proc d;
+ size_t element_size;
+ /*
+ The compiler generates a call to __builtin_vec_new_gc_dtor to
+ register the destructor "d" of a gc array object as a GC
+ finalizer. "first_elem" points to the first element of the array,
+ *not* the beginning of the object (this makes the generated call
+ to this function smaller). The elements of the array are of size
+ "element_size". The destructor is registered as in
+ _builtin_new_gc_dtor. */
+{
+ void* o = (char*) first_elem - sizeof( BI_header );
+ Descriptor* desc = DESCRIPTOR( o );
+
+ GC_REGISTER_FINALIZER_IGNORE_SELF( o, call_array_destructor, d, 0, 0 );
+ desc->element_size = element_size;
+ desc->has_finalizer = 1;}
+
+
+void __builtin_delete( o )
+ void* o;
+ /*
+ The compiler generates calls to __builtin_delete for operator
+ delete(). The GC currently requires that any registered
+ finalizers be unregistered before explicitly freeing an object.
+ If the object has any weak pointers referencing it, we can't
+ actually free it now. */
+{
+ if (o != 0) {
+ Descriptor* desc = DESCRIPTOR( o );
+ if (desc->has_finalizer) GC_REGISTER_FINALIZER( o, 0, 0, 0, 0 );
+ if (! desc->has_weak_pointers) GC_FREE( o );}}
+
+
+void __builtin_vec_delete( o )
+ void* o;
+ /*
+ The compiler generates calls to __builitn_vec_delete for operator
+ delete[](). */
+{
+ __builtin_delete( o );}
+
+
+/**************************************************************************
+
+Implementations of the template class WeakPointer from WeakPointer.h
+
+***************************************************************************/
+
+typedef struct WeakPointer {
+ void* pointer;
+} WeakPointer;
+
+
+void* _WeakPointer_New( t )
+ void* t;
+{
+ if (t == 0) {
+ return 0;}
+ else {
+ void* base = GC_base( t );
+ WeakPointer* wp =
+ (WeakPointer*) GC_MALLOC_ATOMIC( sizeof( WeakPointer ) );
+ Descriptor* desc = DESCRIPTOR( base );
+
+ wp->pointer = t;
+ desc->has_weak_pointers = 1;
+ GC_general_register_disappearing_link( &wp->pointer, base );
+ return wp;}}
+
+
+static void* PointerWithLock( wp )
+ WeakPointer* wp;
+{
+ if (wp == 0 || wp->pointer == 0) {
+ return 0;}
+ else {
+ return (void*) wp->pointer;}}
+
+
+void* _WeakPointer_Pointer( wp )
+ WeakPointer* wp;
+{
+ return (void*) GC_call_with_alloc_lock( PointerWithLock, wp );}
+
+
+typedef struct EqualClosure {
+ WeakPointer* wp1;
+ WeakPointer* wp2;
+} EqualClosure;
+
+
+static void* EqualWithLock( ec )
+ EqualClosure* ec;
+{
+ if (ec->wp1 == 0 || ec->wp2 == 0) {
+ return (void*) (ec->wp1 == ec->wp2);}
+ else {
+ return (void*) (ec->wp1->pointer == ec->wp2->pointer);}}
+
+
+int _WeakPointer_Equal( wp1, wp2 )
+ WeakPointer* wp1;
+ WeakPointer* wp2;
+{
+ EqualClosure ec;
+
+ ec.wp1 = wp1;
+ ec.wp2 = wp2;
+ return (int) GC_call_with_alloc_lock( EqualWithLock, &ec );}
+
+
+int _WeakPointer_Hash( wp )
+ WeakPointer* wp;
+{
+ return (int) _WeakPointer_Pointer( wp );}
+
+
+/**************************************************************************
+
+Implementations of the template class CleanUp from WeakPointer.h
+
+***************************************************************************/
+
+typedef struct Closure {
+ void (*c) PROTO(( void* d, void* t ));
+ ptrdiff_t t_offset;
+ void* d;
+} Closure;
+
+
+static void _CleanUp_CallClosure( obj, data )
+ void* obj;
+ void* data;
+{
+ Closure* closure = (Closure*) data;
+ closure->c( closure->d, (char*) obj + closure->t_offset );}
+
+
+void _CleanUp_Set( t, c, d )
+ void* t;
+ void (*c) PROTO(( void* d, void* t ));
+ void* d;
+{
+ void* base = GC_base( t );
+ Descriptor* desc = DESCRIPTOR( t );
+
+ if (c == 0) {
+ GC_REGISTER_FINALIZER_IGNORE_SELF( base, 0, 0, 0, 0 );
+ desc->has_finalizer = 0;}
+ else {
+ Closure* closure = (Closure*) GC_MALLOC( sizeof( Closure ) );
+ closure->c = c;
+ closure->t_offset = (char*) t - (char*) base;
+ closure->d = d;
+ GC_REGISTER_FINALIZER_IGNORE_SELF( base, _CleanUp_CallClosure,
+ closure, 0, 0 );
+ desc->has_finalizer = 1;}}
+
+
+void _CleanUp_Call( t )
+ void* t;
+{
+ /* ? Aren't we supposed to deactivate weak pointers to t too?
+ Why? */
+ void* base = GC_base( t );
+ void* d;
+ GC_finalization_proc f;
+
+ GC_REGISTER_FINALIZER( base, 0, 0, &f, &d );
+ f( base, d );}
+
+
+typedef struct QueueElem {
+ void* o;
+ GC_finalization_proc f;
+ void* d;
+ struct QueueElem* next;
+} QueueElem;
+
+
+void* _CleanUp_Queue_NewHead()
+{
+ return GC_MALLOC( sizeof( QueueElem ) );}
+
+
+static void _CleanUp_Queue_Enqueue( obj, data )
+ void* obj;
+ void* data;
+{
+ QueueElem* q = (QueueElem*) data;
+ QueueElem* head = q->next;
+
+ q->o = obj;
+ q->next = head->next;
+ head->next = q;}
+
+
+void _CleanUp_Queue_Set( h, t )
+ void* h;
+ void* t;
+{
+ QueueElem* head = (QueueElem*) h;
+ void* base = GC_base( t );
+ void* d;
+ GC_finalization_proc f;
+ QueueElem* q = (QueueElem*) GC_MALLOC( sizeof( QueueElem ) );
+
+ GC_REGISTER_FINALIZER( base, _CleanUp_Queue_Enqueue, q, &f, &d );
+ q->f = f;
+ q->d = d;
+ q->next = head;}
+
+
+int _CleanUp_Queue_Call( h )
+ void* h;
+{
+ QueueElem* head = (QueueElem*) h;
+ QueueElem* q = head->next;
+
+ if (q == 0) {
+ return 0;}
+ else {
+ head->next = q->next;
+ q->next = 0;
+ if (q->f != 0) q->f( q->o, q->d );
+ return 1;}}
+
+
+
diff --git a/headers.c b/headers.c
index 2efa27a8..9fac0bf0 100644
--- a/headers.c
+++ b/headers.c
@@ -11,7 +11,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:08 pm PDT */
+/* Boehm, October 7, 1994 9:54 pm PDT */
/*
* This implements:
@@ -102,9 +102,11 @@ hdr * hhdr;
void GC_init_headers()
{
register int i;
-
+
+ GC_all_nils = (bottom_index *)GC_scratch_alloc((word)sizeof(bottom_index));
+ BZERO(GC_all_nils, sizeof(bottom_index));
for (i = 0; i < TOP_SZ; i++) {
- GC_top_index[i] = &GC_all_nils;
+ GC_top_index[i] = GC_all_nils;
}
}
@@ -123,7 +125,7 @@ register word addr;
register bottom_index * old;
old = p = GC_top_index[i];
- while(p != &GC_all_nils) {
+ while(p != GC_all_nils) {
if (p -> key == hi) return(TRUE);
p = p -> hash_link;
}
@@ -133,7 +135,7 @@ register word addr;
r -> hash_link = old;
GC_top_index[i] = r;
# else
- if (GC_top_index[hi] != &GC_all_nils) return(TRUE);
+ if (GC_top_index[hi] != GC_all_nils) return(TRUE);
r = (bottom_index*)GC_scratch_alloc((word)(sizeof (bottom_index)));
if (r == 0) return(FALSE);
GC_top_index[hi] = r;
@@ -227,7 +229,7 @@ word client_data;
} else if (index_p->index[j] == 0) {
j--;
} else {
- j -= (int)(index_p->index[j]);
+ j -= (word)(index_p->index[j]);
}
}
}
@@ -242,7 +244,7 @@ struct hblk * h;
register word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
GET_BI(h, bi);
- if (bi == &GC_all_nils) {
+ if (bi == GC_all_nils) {
register word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
bi = GC_all_bottom_indices;
while (bi != 0 && bi -> key < hi) bi = bi -> asc_link;
diff --git a/if_mach.c b/if_mach.c
index 7359a4f7..f339ab3e 100644
--- a/if_mach.c
+++ b/if_mach.c
@@ -1,4 +1,5 @@
/* Conditionally execute a command based on machine and OS from config.h */
+/* Boehm, November 21, 1994 1:40 pm PST */
# include "config.h"
# include <stdio.h>
@@ -12,6 +13,7 @@ char ** envp;
if (strcmp(OS_TYPE, "") != 0 && strcmp(argv[2], "") != 0
&& strcmp(OS_TYPE, argv[2]) != 0) return(0);
execvp(argv[3], argv+3);
+ perror("Couldn't execute");
Usage:
fprintf(stderr, "Usage: %s mach_type os_type command\n", argv[0]);
diff --git a/include/cord.h b/include/cord.h
new file mode 100644
index 00000000..df21056c
--- /dev/null
+++ b/include/cord.h
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Author: Hans-J. Boehm (boehm@parc.xerox.com)
+ */
+/* Boehm, October 4, 1994 5:34 pm PDT */
+
+/*
+ * Cords are immutable character strings. A number of operations
+ * on long cords are much more efficient than their strings.h counterpart.
+ * In particular, concatenation takes constant time independent of the length
+ * of the arguments. (Cords are represented as trees, with internal
+ * nodes representing concatenation and leaves consisting of either C
+ * strings or a functional description of the string.)
+ *
+ * The following are reasonable applications of cords. They would perform
+ * unacceptably if C strings were used:
+ * - A compiler that produces assembly language output by repeatedly
+ * concatenating instructions onto a cord representing the output file.
+ * - A text editor that converts the input file to a cord, and then
+ * performs editing operations by producing a new cord representing
+ * the file after echa character change (and keeping the old ones in an
+ * edit history)
+ *
+ * For optimal performance, cords should be built by
+ * concatenating short sections.
+ * This interface is designed for maximum compatibility with C strings.
+ * ASCII NUL characters may be embedded in cords using CORD_from_fn.
+ * This is handled correctly, but CORD_to_char_star will produce a string
+ * with embedded NULs when given such a cord.
+ *
+ * This interface is fairly big, largely for performance reasons.
+ * The most basic constants and functions:
+ *
+ * CORD - the type fo a cord;
+ * CORD_EMPTY - empty cord;
+ * CORD_len(cord) - length of a cord;
+ * CORD_cat(cord1,cord2) - concatenation of two cords;
+ * CORD_substr(cord, start, len) - substring (or subcord);
+ * CORD_pos i; CORD_FOR(i, cord) { ... CORD_pos_fetch(i) ... } -
+ * examine each character in a cord. CORD_pos_fetch(i) is the char.
+ * CORD_fetch(int i) - Retrieve i'th character (slowly).
+ * CORD_cmp(cord1, cord2) - compare two cords.
+ * CORD_from_file(FILE * f) - turn a read-only file into a cord.
+ * CORD_to_char_star(cord) - convert to C string.
+ * (Non-NULL C constant strings are cords.)
+ * CORD_printf (etc.) - cord version of printf. Use %r for cords.
+ */
+# ifndef CORD_H
+
+# define CORD_H
+# include <stddef.h>
+# include <stdio.h>
+/* Cords have type const char *. This is cheating quite a bit, and not */
+/* 100% portable. But it means that nonempty character string */
+/* constants may be used as cords directly, provided the string is */
+/* never modified in place. The empty cord is represented by, and */
+/* can be written as, 0. */
+
+typedef const char * CORD;
+
+/* An empty cord is always represented as nil */
+# define CORD_EMPTY 0
+
+/* Is a nonempty cord represented as a C string? */
+#define CORD_IS_STRING(s) (*(s) != '\0')
+
+/* Concatenate two cords. If the arguments are C strings, they may */
+/* not be subsequently altered. */
+CORD CORD_cat(CORD x, CORD y);
+
+/* Concatenate a cord and a C string with known length. Except for the */
+/* empty string case, this is a special case of CORD_cat. Since the */
+/* length is known, it can be faster. */
+/* The string y is shared with the resulting CORD. Hence it should */
+/* not be altered by the caller. */
+CORD CORD_cat_char_star(CORD x, const char * y, size_t leny);
+
+/* Compute the length of a cord */
+size_t CORD_len(CORD x);
+
+/* Cords may be represented by functions defining the ith character */
+typedef char (* CORD_fn)(size_t i, void * client_data);
+
+/* Turn a functional description into a cord. */
+CORD CORD_from_fn(CORD_fn fn, void * client_data, size_t len);
+
+/* Return the substring (subcord really) of x with length at most n, */
+/* starting at position i. (The initial character has position 0.) */
+CORD CORD_substr(CORD x, size_t i, size_t n);
+
+/* Return the argument, but rebalanced to allow more efficient */
+/* character retrieval, substring operations, and comparisons. */
+/* This is useful only for cords that were built using repeated */
+/* concatenation. Guarantees log time access to the result, unless */
+/* x was obtained through a large number of repeated substring ops */
+/* or the embedded functional descriptions take longer to evaluate. */
+/* May reallocate significant parts of the cord. The argument is not */
+/* modified; only the result is balanced. */
+CORD CORD_balance(CORD x);
+
+/* The following traverse a cord by applying a function to each */
+/* character. This is occasionally appropriate, especially where */
+/* speed is crucial. But, since C doesn't have nested functions, */
+/* clients of this sort of traversal are clumsy to write. Consider */
+/* the functions that operate on cord positions instead. */
+
+/* Function to iteratively apply to individual characters in cord. */
+typedef int (* CORD_iter_fn)(char c, void * client_data);
+
+/* Function to apply to substrings of a cord. Each substring is a */
+/* a C character string, not a general cord. */
+typedef int (* CORD_batched_iter_fn)(const char * s, void * client_data);
+# define CORD_NO_FN ((CORD_batched_iter_fn)0)
+
+/* Apply f1 to each character in the cord, in ascending order, */
+/* starting at position i. If */
+/* f2 is not CORD_NO_FN, then multiple calls to f1 may be replaced by */
+/* a single call to f2. The parameter f2 is provided only to allow */
+/* some optimization by the client. This terminates when the right */
+/* end of this string is reached, or when f1 or f2 return != 0. In the */
+/* latter case CORD_iter returns != 0. Otherwise it returns 0. */
+/* The specified value of i must be < CORD_len(x). */
+int CORD_iter5(CORD x, size_t i, CORD_iter_fn f1,
+ CORD_batched_iter_fn f2, void * client_data);
+
+/* A simpler version that starts at 0, and without f2: */
+int CORD_iter(CORD x, CORD_iter_fn f1, void * client_data);
+# define CORD_iter(x, f1, cd) CORD_iter5(x, 0, f1, CORD_NO_FN, cd)
+
+/* Similar to CORD_iter5, but end-to-beginning. No provisions for */
+/* CORD_batched_iter_fn. */
+int CORD_riter4(CORD x, size_t i, CORD_iter_fn f1, void * client_data);
+
+/* A simpler version that starts at the end: */
+int CORD_riter(CORD x, CORD_iter_fn f1, void * client_data);
+
+/* Functions that operate on cord positions. The easy way to traverse */
+/* cords. A cord position is logically a pair consisting of a cord */
+/* and an index into that cord. But it is much faster to retrieve a */
+/* charcter based on a position than on an index. Unfortunately, */
+/* positions are big (order of a few 100 bytes), so allocate them with */
+/* caution. */
+/* Things in cord_pos.h should be treated as opaque, except as */
+/* described below. Also note that */
+/* CORD_pos_fetch, CORD_next and CORD_prev have both macro and function */
+/* definitions. The former may evaluate their argument more than once. */
+# include "private/cord_pos.h"
+
+/*
+ Visible definitions from above:
+
+ typedef <OPAQUE but fairly big> CORD_pos[1];
+
+ * Extract the cord from a position:
+ CORD CORD_pos_to_cord(CORD_pos p);
+
+ * Extract the current index from a position:
+ size_t CORD_pos_to_index(CORD_pos p);
+
+ * Fetch the character located at the given position:
+ char CORD_pos_fetch(CORD_pos p);
+
+ * Initialize the position to refer to the given cord and index.
+ * Note that this is the most expensive function on positions:
+ void CORD_set_pos(CORD_pos p, CORD x, size_t i);
+
+ * Advance the position to the next character.
+ * P must be initialized and valid.
+ * Invalidates p if past end:
+ void CORD_next(CORD_pos p);
+
+ * Move the position to the preceding character.
+ * P must be initialized and valid.
+ * Invalidates p if past beginning:
+ void CORD_prev(CORD_pos p);
+
+ * Is the position valid, i.e. inside the cord?
+ int CORD_pos_valid(CORD_pos p);
+*/
+# define CORD_FOR(pos, cord) \
+ for (CORD_set_pos(pos, cord, 0); CORD_pos_valid(pos); CORD_next(pos))
+
+
+/* An out of memory handler to call. May be supplied by client. */
+/* Must not return. */
+extern void (* CORD_oom_fn)(void);
+
+/* Dump the representation of x to stdout in an implementation defined */
+/* manner. Intended for debugging only. */
+void CORD_dump(CORD x);
+
+/* The following could easily be implemented by the client. They are */
+/* provided in cord_xtra.c for convenience. */
+
+/* Concatenate a character to the end of a cord. */
+CORD CORD_cat_char(CORD x, char c);
+
+/* Concatenate n cords. */
+CORD CORD_catn(int n, /* CORD */ ...);
+
+/* Return the character in CORD_substr(x, i, 1) */
+char CORD_fetch(CORD x, size_t i);
+
+/* Return < 0, 0, or > 0, depending on whether x < y, x = y, x > y */
+int CORD_cmp(CORD x, CORD y);
+
+/* A generalization that takes both starting positions for the */
+/* comparison, and a limit on the number of characters to be compared. */
+int CORD_ncmp(CORD x, size_t x_start, CORD y, size_t y_start, size_t len);
+
+/* Find the first occurrence of s in x at position start or later. */
+/* Return the position of the first character of s in x, or */
+/* CORD_NOT_FOUND if there is none. */
+size_t CORD_str(CORD x, size_t start, CORD s);
+
+/* Return a cord consisting of i copies of (possibly NUL) c. Dangerous */
+/* in conjunction with CORD_to_char_star. */
+/* The resulting representation takes constant space, independent of i. */
+CORD CORD_chars(char c, size_t i);
+# define CORD_nul(i) CORD_chars('\0', (i))
+
+/* Turn a file into cord. The file must be seekable. Its contents */
+/* must remain constant. The file may be accessed as an immediate */
+/* result of this call and/or as a result of subsequent accesses to */
+/* the cord. Short files are likely to be immediately read, but */
+/* long files are likely to be read on demand, possibly relying on */
+/* stdio for buffering. */
+/* We must have exclusive access to the descriptor f, i.e. we may */
+/* read it at any time, and expect the file pointer to be */
+/* where we left it. Normally this should be invoked as */
+/* CORD_from_file(fopen(...)) */
+/* CORD_from_file arranges to close the file descriptor when it is no */
+/* longer needed (e.g. when the result becomes inaccessible). */
+/* The file f must be such that ftell reflects the actual character */
+/* position in the file, i.e. the number of characters that can be */
+/* or were read with fread. On UNIX systems this is always true. On */
+/* MS Windows systems, f must be opened in binary mode. */
+CORD CORD_from_file(FILE * f);
+
+/* Equivalent to the above, except that the entire file will be read */
+/* and the file pointer will be closed immediately. */
+/* The binary mode restriction from above does not apply. */
+CORD CORD_from_file_eager(FILE * f);
+
+/* Equivalent to the above, except that the file will be read on demand.*/
+/* The binary mode restriction applies. */
+CORD CORD_from_file_lazy(FILE * f);
+
+/* Turn a cord into a C string. The result shares no structure with */
+/* x, and is thus modifiable. */
+char * CORD_to_char_star(CORD x);
+
+/* Identical to the above, but the result may share structure with */
+/* the argument and is thus not modifiable. */
+const char * CORD_to_const_char_star(CORD x);
+
+/* Write a cord to a file, starting at the current position. No */
+/* trailing NULs are newlines are added. */
+/* Returns EOF if a write error occurs, 1 otherwise. */
+int CORD_put(CORD x, FILE * f);
+
+/* "Not found" result for the following two functions. */
+# define CORD_NOT_FOUND ((size_t)(-1))
+
+/* A vague analog of strchr. Returns the position (an integer, not */
+/* a pointer) of the first occurrence of (char) c inside x at position */
+/* i or later. The value i must be < CORD_len(x). */
+size_t CORD_chr(CORD x, size_t i, int c);
+
+/* A vague analog of strrchr. Returns index of the last occurrence */
+/* of (char) c inside x at position i or earlier. The value i */
+/* must be < CORD_len(x). */
+size_t CORD_rchr(CORD x, size_t i, int c);
+
+
+/* The following are also not primitive, but are implemented in */
+/* cordprnt.c. They provide functionality similar to the ANSI C */
+/* functions with corresponding names, but with the following */
+/* additions and changes: */
+/* 1. A %r conversion specification specifies a CORD argument. Field */
+/* width, precision, etc. have the same semantics as for %s. */
+/* (Note that %c,%C, and %S were already taken.) */
+/* 2. The format string is represented as a CORD. */
+/* 3. CORD_sprintf and CORD_vsprintf assign the result through the 1st */ /* argument. Unlike their ANSI C versions, there is no need to guess */
+/* the correct buffer size. */
+/* 4. Most of the conversions are implement through the native */
+/* vsprintf. Hence they are usually no faster, and */
+/* idiosyncracies of the native printf are preserved. However, */
+/* CORD arguments to CORD_sprintf and CORD_vsprintf are NOT copied; */
+/* the result shares the original structure. This may make them */
+/* very efficient in some unusual applications. */
+/* The format string is copied. */
+/* All functions return the number of characters generated or -1 on */
+/* error. This complies with the ANSI standard, but is inconsistent */
+/* with some older implementations of sprintf. */
+
+/* The implementation of these is probably less portable than the rest */
+/* of this package. */
+
+#ifndef CORD_NO_IO
+
+#include <stdarg.h>
+
+int CORD_sprintf(CORD * out, CORD format, ...);
+int CORD_vsprintf(CORD * out, CORD format, va_list args);
+int CORD_fprintf(FILE * f, CORD format, ...);
+int CORD_vfprintf(FILE * f, CORD format, va_list args);
+int CORD_printf(CORD format, ...);
+int CORD_vprintf(CORD format, va_list args);
+
+#endif /* CORD_NO_IO */
+
+# endif /* CORD_H */
diff --git a/include/ec.h b/include/ec.h
new file mode 100644
index 00000000..c829b83a
--- /dev/null
+++ b/include/ec.h
@@ -0,0 +1,70 @@
+# ifndef EC_H
+# define EC_H
+
+# ifndef CORD_H
+# include "cord.h"
+# endif
+
+/* Extensible cords are strings that may be destructively appended to. */
+/* They allow fast construction of cords from characters that are */
+/* being read from a stream. */
+/*
+ * A client might look like:
+ *
+ * {
+ * CORD_ec x;
+ * CORD result;
+ * char c;
+ * FILE *f;
+ *
+ * ...
+ * CORD_ec_init(x);
+ * while(...) {
+ * c = getc(f);
+ * ...
+ * CORD_ec_append(x, c);
+ * }
+ * result = CORD_balance(CORD_ec_to_cord(x));
+ *
+ * If a C string is desired as the final result, the call to CORD_balance
+ * may be replaced by a call to CORD_to_char_star.
+ */
+
+# ifndef CORD_BUFSZ
+# define CORD_BUFSZ 128
+# endif
+
+typedef struct CORD_ec_struct {
+ CORD ec_cord;
+ char * ec_bufptr;
+ char ec_buf[CORD_BUFSZ+1];
+} CORD_ec[1];
+
+/* This structure represents the concatenation of ec_cord with */
+/* ec_buf[0 ... (ec_bufptr-ec_buf-1)] */
+
+/* Flush the buffer part of the extended chord into ec_cord. */
+/* Note that this is almost the only real function, and it is */
+/* implemented in 6 lines in cordxtra.c */
+void CORD_ec_flush_buf(CORD_ec x);
+
+/* Convert an extensible cord to a cord. */
+# define CORD_ec_to_cord(x) (CORD_ec_flush_buf(x), (x)[0].ec_cord)
+
+/* Initialize an extensible cord. */
+# define CORD_ec_init(x) ((x)[0].ec_cord = 0, (x)[0].ec_bufptr = (x)[0].ec_buf)
+
+/* Append a character to an extensible cord. */
+# define CORD_ec_append(x, c) \
+ { \
+ if ((x)[0].ec_bufptr == (x)[0].ec_buf + CORD_BUFSZ) { \
+ CORD_ec_flush_buf(x); \
+ } \
+ *((x)[0].ec_bufptr)++ = (c); \
+ }
+
+/* Append a cord to an extensible cord. Structure remains shared with */
+/* original. */
+void CORD_ec_append_cord(CORD_ec x, CORD s);
+
+# endif /* EC_H */
diff --git a/include/gc.h b/include/gc.h
index 8c3560dd..ab7944ef 100644
--- a/include/gc.h
+++ b/include/gc.h
@@ -1,17 +1,45 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
- * Permission is hereby granted to copy this garbage collector for any purpose,
- * provided the above notices are retained on all copies.
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, October 9, 1995 1:14 pm PDT */
+
+/*
+ * Note that this defines a large number of tuning hooks, which can
+ * safely be ignored in nearly all cases. For normal use it suffices
+ * to call only GC_MALLOC and perhaps GC_REALLOC.
+ * For better performance, also look at GC_MALLOC_ATOMIC, and
+ * GC_enable_incremental. If you need an action to be performed
+ * immediately before an object is collected, look at GC_register_finalizer.
+ * If you are using Solaris threads, look at the end of this file.
+ * Everything else is best ignored unless you encounter performance
+ * problems.
*/
-#ifndef GC_H
+#ifndef _GC_H
+
+# define _GC_H
+
+# if defined(__STDC__) || defined(__cplusplus)
+# define GC_PROTO(args) args
+ typedef void * GC_PTR;
+# else
+# define GC_PROTO(args) ()
+ typedef char * GC_PTR;
+# endif
-# define GC_H
+# ifdef __cplusplus
+ extern "C" {
+# endif
# include <stddef.h>
@@ -28,13 +56,9 @@ typedef long GC_signed_word;
/* Public read-only variables */
-extern GC_word GC_heapsize; /* Heap size in bytes */
-
extern GC_word GC_gc_no;/* Counter incremented per collection. */
/* Includes empty GCs at startup. */
-extern int GC_incremental; /* Using incremental/generational collection. */
-
/* Public R/W variables */
@@ -67,10 +91,11 @@ extern GC_word GC_free_space_divisor;
/* Increasing its value will use less space */
/* but more collection time. Decreasing it */
/* will appreciably decrease collection time */
- /* at the expens of space. */
+ /* at the expense of space. */
/* GC_free_space_divisor = 1 will effectively */
/* disable collections. */
+
/* Public procedures */
/*
* general purpose allocation routines, with roughly malloc calling conv.
@@ -83,28 +108,18 @@ extern GC_word GC_free_space_divisor;
* collectable. GC_malloc_uncollectable and GC_free called on the resulting
* object implicitly update GC_non_gc_bytes appropriately.
*/
-#if defined(__STDC__) || defined(__cplusplus)
- extern void * GC_malloc(size_t size_in_bytes);
- extern void * GC_malloc_atomic(size_t size_in_bytes);
- extern void * GC_malloc_uncollectable(size_t size_in_bytes);
- extern void * GC_malloc_stubborn(size_t size_in_bytes);
-# else
- extern char * GC_malloc(/* size_in_bytes */);
- extern char * GC_malloc_atomic(/* size_in_bytes */);
- extern char * GC_malloc_uncollectable(/* size_in_bytes */);
- extern char * GC_malloc_stubborn(/* size_in_bytes */);
-# endif
+extern GC_PTR GC_malloc GC_PROTO((size_t size_in_bytes));
+extern GC_PTR GC_malloc_atomic GC_PROTO((size_t size_in_bytes));
+extern GC_PTR GC_malloc_uncollectable GC_PROTO((size_t size_in_bytes));
+extern GC_PTR GC_malloc_stubborn GC_PROTO((size_t size_in_bytes));
/* Explicitly deallocate an object. Dangerous if used incorrectly. */
/* Requires a pointer to the base of an object. */
/* If the argument is stubborn, it should not be changeable when freed. */
/* An object should not be enable for finalization when it is */
/* explicitly deallocated. */
-#if defined(__STDC__) || defined(__cplusplus)
- extern void GC_free(void * object_addr);
-# else
- extern void GC_free(/* object_addr */);
-# endif
+/* GC_free(0) is a no-op, as required by ANSI C for free. */
+extern void GC_free GC_PROTO((GC_PTR object_addr));
/*
* Stubborn objects may be changed only if the collector is explicitly informed.
@@ -121,27 +136,19 @@ extern GC_word GC_free_space_divisor;
* do so. The same applies to dropping stubborn objects that are still
* changeable.
*/
-void GC_change_stubborn(/* p */);
-void GC_end_stubborn_change(/* p */);
+extern void GC_change_stubborn GC_PROTO((GC_PTR));
+extern void GC_end_stubborn_change GC_PROTO((GC_PTR));
/* Return a pointer to the base (lowest address) of an object given */
/* a pointer to a location within the object. */
/* Return 0 if displaced_pointer doesn't point to within a valid */
/* object. */
-# if defined(__STDC__) || defined(__cplusplus)
- void * GC_base(void * displaced_pointer);
-# else
- char * GC_base(/* char * displaced_pointer */);
-# endif
+extern GC_PTR GC_base GC_PROTO((GC_PTR displaced_pointer));
/* Given a pointer to the base of an object, return its size in bytes. */
/* The returned size may be slightly larger than what was originally */
/* requested. */
-# if defined(__STDC__) || defined(__cplusplus)
- size_t GC_size(void * object_addr);
-# else
- size_t GC_size(/* char * object_addr */);
-# endif
+extern size_t GC_size GC_PROTO((GC_PTR object_addr));
/* For compatibility with C library. This is occasionally faster than */
/* a malloc followed by a bcopy. But if you rely on that, either here */
@@ -150,22 +157,25 @@ void GC_end_stubborn_change(/* p */);
/* The resulting object has the same kind as the original. */
/* If the argument is stubborn, the result will have changes enabled. */
/* It is an error to have changes enabled for the original object. */
-# if defined(__STDC__) || defined(__cplusplus)
- extern void * GC_realloc(void * old_object, size_t new_size_in_bytes);
-# else
- extern char * GC_realloc(/* old_object, new_size_in_bytes */);
-# endif
-
-
+/* Follows ANSI comventions for NULL old_object. */
+extern GC_PTR GC_realloc GC_PROTO((GC_PTR old_object,
+ size_t new_size_in_bytes));
+
/* Explicitly increase the heap size. */
/* Returns 0 on failure, 1 on success. */
-extern int GC_expand_hp(/* number_of_4K_blocks */);
+extern int GC_expand_hp GC_PROTO((size_t number_of_bytes));
+
+/* Limit the heap size to n bytes. Useful when you're debugging, */
+/* especially on systems that don't handle running out of memory well. */
+/* n == 0 ==> unbounded. This is the default. */
+extern void GC_set_max_heap_size GC_PROTO((GC_word n));
-/* Clear the set of root segments */
-extern void GC_clear_roots();
+/* Clear the set of root segments. Wizards only. */
+extern void GC_clear_roots GC_PROTO((void));
-/* Add a root segment */
-extern void GC_add_roots(/* low_address, high_address_plus_1 */);
+/* Add a root segment. Wizards only. */
+extern void GC_add_roots GC_PROTO((char * low_address,
+ char * high_address_plus_1));
/* Add a displacement to the set of those considered valid by the */
/* collector. GC_register_displacement(n) means that if p was returned */
@@ -177,47 +187,87 @@ extern void GC_add_roots(/* low_address, high_address_plus_1 */);
/* Preferably, this should be called before any other GC procedures. */
/* Calling it later adds to the probability of excess memory */
/* retention. */
-void GC_register_displacement(/* n */);
-
-/* Explicitly trigger a collection. */
-void GC_gcollect();
+/* This is a no-op if the collector was compiled with recognition of */
+/* arbitrary interior pointers enabled, which is now the default. */
+void GC_register_displacement GC_PROTO((GC_word n));
+
+/* The following version should be used if any debugging allocation is */
+/* being done. */
+void GC_debug_register_displacement GC_PROTO((GC_word n));
+
+/* Explicitly trigger a full, world-stop collection. */
+void GC_gcollect GC_PROTO((void));
+
+/* Trigger a full world-stopped collection. Abort the collection if */
+/* and when stop_func returns a nonzero value. Stop_func will be */
+/* called frequently, and should be reasonably fast. This works even */
+/* if virtual dirty bits, and hence incremental collection is not */
+/* available for this architecture. Collections can be aborted faster */
+/* than normal pause times for incremental collection. However, */
+/* aborted collections do no useful work; the next collection needs */
+/* to start from the beginning. */
+typedef int (* GC_stop_func) GC_PROTO((void));
+int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
+
+/* Return the number of bytes in the heap. Excludes collector private */
+/* data structures. Includes empty blocks and fragmentation loss. */
+/* Includes some pages that were allocated but never written. */
+size_t GC_get_heap_size GC_PROTO((void));
+
+/* Return the number of bytes allocated since the last collection. */
+size_t GC_get_bytes_since_gc GC_PROTO((void));
/* Enable incremental/generational collection. */
/* Not advisable unless dirty bits are */
/* available or most heap objects are */
/* pointerfree(atomic) or immutable. */
/* Don't use in leak finding mode. */
-void GC_enable_incremental();
+/* Ignored if GC_dont_gc is true. */
+void GC_enable_incremental GC_PROTO((void));
+
+/* Perform some garbage collection work, if appropriate. */
+/* Return 0 if there is no more work to be done. */
+/* Typically performs an amount of work corresponding roughly */
+/* to marking from one page. May do more work if further */
+/* progress requires it, e.g. if incremental collection is */
+/* disabled. It is reasonable to call this in a wait loop */
+/* until it returns 0. */
+int GC_collect_a_little GC_PROTO((void));
+
+/* Allocate an object of size lb bytes. The client guarantees that */
+/* as long as the object is live, it will be referenced by a pointer */
+/* that points to somewhere within the first 256 bytes of the object. */
+/* (This should normally be declared volatile to prevent the compiler */
+/* from invalidating this assertion.) This routine is only useful */
+/* if a large array is being allocated. It reduces the chance of */
+/* accidentally retaining such an array as a result of scanning an */
+/* integer that happens to be an address inside the array. (Actually, */
+/* it reduces the chance of the allocator not finding space for such */
+/* an array, since it will try hard to avoid introducing such a false */
+/* reference.) On a SunOS 4.X or MS Windows system this is recommended */
+/* for arrays likely to be larger than 100K or so. For other systems, */
+/* or if the collector is not configured to recognize all interior */
+/* pointers, the threshold is normally much higher. */
+extern GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
+extern GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
/* Debugging (annotated) allocation. GC_gcollect will check */
/* objects allocated in this way for overwrites, etc. */
-# if defined(__STDC__) || defined(__cplusplus)
- extern void * GC_debug_malloc(size_t size_in_bytes,
- char * descr_string, int descr_int);
- extern void * GC_debug_malloc_atomic(size_t size_in_bytes,
- char * descr_string, int descr_int);
- extern void * GC_debug_malloc_uncollectable(size_t size_in_bytes,
- char * descr_string, int descr_int);
- extern void * GC_debug_malloc_stubborn(size_t size_in_bytes,
- char * descr_string, int descr_int);
- extern void GC_debug_free(void * object_addr);
- extern void * GC_debug_realloc(void * old_object,
- size_t new_size_in_bytes,
- char * descr_string, int descr_int);
-# else
- extern char * GC_debug_malloc(/* size_in_bytes, descr_string, descr_int */);
- extern char * GC_debug_malloc_atomic(/* size_in_bytes, descr_string,
- descr_int */);
- extern char * GC_debug_malloc_uncollectable(/* size_in_bytes, descr_string,
- descr_int */);
- extern char * GC_debug_malloc_stubborn(/* size_in_bytes, descr_string,
- descr_int */);
- extern void GC_debug_free(/* object_addr */);
- extern char * GC_debug_realloc(/* old_object, new_size_in_bytes,
- descr_string, descr_int */);
-# endif
-void GC_debug_change_stubborn(/* p */);
-void GC_debug_end_stubborn_change(/* p */);
+extern GC_PTR GC_debug_malloc
+ GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+extern GC_PTR GC_debug_malloc_atomic
+ GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+extern GC_PTR GC_debug_malloc_uncollectable
+ GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+extern GC_PTR GC_debug_malloc_stubborn
+ GC_PROTO((size_t size_in_bytes, char * descr_string, int descr_int));
+extern void GC_debug_free GC_PROTO((GC_PTR object_addr));
+extern GC_PTR GC_debug_realloc
+ GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
+ char * descr_string, int descr_int));
+
+void GC_debug_change_stubborn GC_PROTO((GC_PTR));
+void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
# ifdef GC_DEBUG
# define GC_MALLOC(sz) GC_debug_malloc(sz, __FILE__, __LINE__)
# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, __FILE__, __LINE__)
@@ -229,10 +279,17 @@ void GC_debug_end_stubborn_change(/* p */);
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_register_finalizer(GC_base(p), GC_debug_invoke_finalizer, \
GC_make_closure(f,d), of, od)
+# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
+ GC_register_finalizer_ignore_self( \
+ GC_base(p), GC_debug_invoke_finalizer, \
+ GC_make_closure(f,d), of, od)
# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, __FILE__, \
__LINE__)
# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
+# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
+ GC_general_register_disappearing_link(link, GC_base(obj))
+# define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n)
# else
# define GC_MALLOC(sz) GC_malloc(sz)
# define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
@@ -241,9 +298,14 @@ void GC_debug_end_stubborn_change(/* p */);
# define GC_FREE(p) GC_free(p)
# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
GC_register_finalizer(p, f, d, of, od)
+# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
+ GC_register_finalizer_ignore_self(p, f, d, of, od)
# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
+# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
+ GC_general_register_disappearing_link(link, obj)
+# define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n)
# endif
/* The following are included because they are often convenient, and */
/* reduce the chance for a misspecifed size argument. But calls may */
@@ -252,7 +314,7 @@ void GC_debug_end_stubborn_change(/* p */);
# define GC_NEW(t) (t *)GC_MALLOC(sizeof (t))
# define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t))
# define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t))
-# define GC_NEW_UNCOLLECTABLE(t) (t *)GC_NEW_UNCOLLECTABLE(sizeof (t))
+# define GC_NEW_UNCOLLECTABLE(t) (t *)GC_MALLOC_UNCOLLECTABLE(sizeof (t))
/* Finalization. Some of these primitives are grossly unsafe. */
/* The idea is to make them both cheap, and sufficient to build */
@@ -261,15 +323,12 @@ void GC_debug_end_stubborn_change(/* p */);
/* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
/* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
/* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
-# if defined(__STDC__) || defined(__cplusplus)
- typedef void (*GC_finalization_proc)(void * obj, void * client_data);
-# else
- typedef void (*GC_finalization_proc)(/* void * obj, void * client_data */);
-# endif
-
-void GC_register_finalizer(/* void * obj,
- GC_finalization_proc fn, void * cd,
- GC_finalization_proc *ofn, void ** ocd */);
+typedef void (*GC_finalization_proc)
+ GC_PROTO((GC_PTR obj, GC_PTR client_data));
+
+extern void GC_register_finalizer
+ GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
+ GC_finalization_proc *ofn, GC_PTR *ocd));
/* When obj is no longer accessible, invoke */
/* (*fn)(obj, cd). If a and b are inaccessible, and */
/* a points to b (after disappearing links have been */
@@ -281,10 +340,7 @@ void GC_register_finalizer(/* void * obj,
/* pointers will not be finalized (or collected). */
/* Thus cycles involving finalizable objects should */
/* be avoided, or broken by disappearing links. */
- /* fn is invoked with the allocation lock held. It may */
- /* not allocate. (Any storage it might need */
- /* should be preallocated and passed as part of cd.) */
- /* fn should terminate as quickly as possible, and */
+ /* Fn should terminate as quickly as possible, and */
/* defer extended computation. */
/* All but the last finalizer registered for an object */
/* is ignored. */
@@ -296,6 +352,25 @@ void GC_register_finalizer(/* void * obj,
/* pointers only if the allocation lock is held, and */
/* such conversions are not performed by finalization */
/* routines. */
+ /* If GC_register_finalizer is aborted as a result of */
+ /* a signal, the object may be left with no */
+ /* finalization, even if neither the old nor new */
+ /* finalizer were NULL. */
+ /* Obj should be the nonNULL starting address of an */
+ /* object allocated by GC_malloc or friends. */
+ /* Note that any garbage collectable object referenced */
+ /* by cd will be considered accessible until the */
+ /* finalizer is invoked. */
+
+/* Another versions of the above follow. It ignores */
+/* self-cycles, i.e. pointers from a finalizable object to */
+/* itself. There is a stylistic argument that this is wrong, */
+/* but it's unavoidable for C++, since the compiler may */
+/* silently introduce these. It's also benign in that specific */
+/* case. */
+extern void GC_register_finalizer_ignore_self
+ GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
+ GC_finalization_proc *ofn, GC_PTR *ocd));
/* The following routine may be used to break cycles between */
/* finalizable objects, thus causing cyclic finalizable */
@@ -303,8 +378,8 @@ void GC_register_finalizer(/* void * obj,
/* use involves calling GC_register_disappearing_link(&p), */
/* where p is a pointer that is not followed by finalization */
/* code, and should not be considered in determining */
-/* finalization order. */
-int GC_register_disappearing_link(/* void ** link */);
+/* finalization order. */
+extern int GC_register_disappearing_link GC_PROTO((GC_PTR * /* link */));
/* Link should point to a field of a heap allocated */
/* object obj. *link will be cleared when obj is */
/* found to be inaccessible. This happens BEFORE any */
@@ -323,57 +398,186 @@ int GC_register_disappearing_link(/* void ** link */);
/* Returns 1 if link was already registered, 0 */
/* otherwise. */
/* Only exists for backward compatibility. See below: */
-int GC_general_register_disappearing_link(/* void ** link, void * obj */);
+
+extern int GC_general_register_disappearing_link
+ GC_PROTO((GC_PTR * /* link */, GC_PTR obj));
/* A slight generalization of the above. *link is */
/* cleared when obj first becomes inaccessible. This */
/* can be used to implement weak pointers easily and */
/* safely. Typically link will point to a location */
- /* holding a disguised pointer to obj. In this way */
- /* soft pointers are broken before any object */
+ /* holding a disguised pointer to obj. (A pointer */
+ /* inside an "atomic" object is effectively */
+ /* disguised.) In this way soft */
+ /* pointers are broken before any object */
/* reachable from them are finalized. Each link */
/* May be registered only once, i.e. with one obj */
/* value. This was added after a long email discussion */
/* with John Ellis. */
-int GC_unregister_disappearing_link(/* void ** link */);
+ /* Obj must be a pointer to the first word of an object */
+ /* we allocated. It is unsafe to explicitly deallocate */
+ /* the object containing link. Explicitly deallocating */
+ /* obj may or may not cause link to eventually be */
+ /* cleared. */
+extern int GC_unregister_disappearing_link GC_PROTO((GC_PTR * /* link */));
/* Returns 0 if link was not actually registered. */
/* Undoes a registration by either of the above two */
/* routines. */
/* Auxiliary fns to make finalization work correctly with displaced */
/* pointers introduced by the debugging allocators. */
-# if defined(__STDC__) || defined(__cplusplus)
- void * GC_make_closure(GC_finalization_proc fn, void * data);
- void GC_debug_invoke_finalizer(void * obj, void * data);
-# else
- char * GC_make_closure(/* GC_finalization_proc fn, char * data */);
- void GC_debug_invoke_finalizer(/* void * obj, void * data */);
-# endif
+extern GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data));
+extern void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data));
+/* GC_set_warn_proc can be used to redirect or filter warning messages. */
+typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg));
+extern GC_warn_proc GC_set_warn_proc GC_PROTO((GC_warn_proc p));
+ /* Returns old warning procedure. */
/* The following is intended to be used by a higher level */
/* (e.g. cedar-like) finalization facility. It is expected */
/* that finalization code will arrange for hidden pointers to */
/* disappear. Otherwise objects can be accessed after they */
/* have been collected. */
-# ifdef I_HIDE_POINTERS
-# if defined(__STDC__) || defined(__cplusplus)
-# define HIDE_POINTER(p) (~(size_t)(p))
-# define REVEAL_POINTER(p) ((void *)(HIDE_POINTER(p)))
-# else
-# define HIDE_POINTER(p) (~(unsigned long)(p))
-# define REVEAL_POINTER(p) ((char *)(HIDE_POINTER(p)))
-# endif
+/* Note that putting pointers in atomic objects or in */
+/* nonpointer slots of "typed" objects is equivalent to */
+/* disguising them in this way, and may have other advantages. */
+# if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS)
+ typedef GC_word GC_hidden_pointer;
+# define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
+# define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p)))
/* Converting a hidden pointer to a real pointer requires verifying */
/* that the object still exists. This involves acquiring the */
/* allocator lock to avoid a race with the collector. */
-
-# if defined(__STDC__) || defined(__cplusplus)
- typedef void * (*GC_fn_type)();
- void * GC_call_with_alloc_lock(GC_fn_type fn, void * client_data);
+# endif /* I_HIDE_POINTERS */
+
+typedef GC_PTR (*GC_fn_type) GC_PROTO((GC_PTR client_data));
+extern GC_PTR GC_call_with_alloc_lock
+ GC_PROTO((GC_fn_type fn, GC_PTR client_data));
+
+/* Check that p and q point to the same object. */
+/* Fail conspicuously if they don't. */
+/* Returns the first argument. */
+/* Succeeds if neither p nor q points to the heap. */
+/* May succeed if both p and q point to between heap objects. */
+extern GC_PTR GC_same_obj GC_PROTO((GC_PTR p, GC_PTR q));
+
+/* Checked pointer pre- and post- increment operations. Note that */
+/* the second argument is in units of bytes, not multiples of the */
+/* object size. This should either be invoked from a macro, or the */
+/* call should be automatically generated. */
+extern GC_PTR GC_pre_incr GC_PROTO((GC_PTR *p, size_t how_much));
+extern GC_PTR GC_post_incr GC_PROTO((GC_PTR *p, size_t how_much));
+
+/* Check that p is visible */
+/* to the collector as a possibly pointer containing location. */
+/* If it isn't fail conspicuously. */
+/* Returns the argument in all cases. May erroneously succeed */
+/* in hard cases. (This is intended for debugging use with */
+/* untyped allocations. The idea is that it should be possible, though */
+/* slow, to add such a call to all indirect pointer stores.) */
+/* Currently useless for multithreaded worlds. */
+extern GC_PTR GC_is_visible GC_PROTO((GC_PTR p));
+
+/* Check that if p is a pointer to a heap page, then it points to */
+/* a valid displacement within a heap object. */
+/* Fail conspicuously if this property does not hold. */
+/* Uninteresting with ALL_INTERIOR_POINTERS. */
+/* Always returns its argument. */
+extern GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR p));
+
+/* Safer, but slow, pointer addition. Probably useful mainly with */
+/* a preprocessor. Useful only for heap pointers. */
+#ifdef GC_DEBUG
+# define GC_PTR_ADD3(x, n, type_of_result) \
+ ((type_of_result)GC_same_obj((x)+(n), (x)))
+# define GC_PRE_INCR3(x, n, type_of_result) \
+ ((type_of_result)GC_pre_incr(&(x), (n)*sizeof(*x))
+# define GC_POST_INCR2(x, type_of_result) \
+ ((type_of_result)GC_post_incr(&(x), sizeof(*x))
+# ifdef __GNUC__
+# define GC_PTR_ADD(x, n) \
+ GC_PTR_ADD3(x, n, typeof(x))
+# define GC_PRE_INCR(x, n) \
+ GC_PRE_INCR3(x, n, typeof(x))
+# define GC_POST_INCR(x, n) \
+ GC_POST_INCR3(x, typeof(x))
# else
- typedef char * (*GC_fn_type)();
- char * GC_call_with_alloc_lock(/* GC_fn_type fn, char * client_data */);
+ /* We can't do this right without typeof, which ANSI */
+ /* decided was not sufficiently useful. Repeatedly */
+ /* mentioning the arguments seems too dangerous to be */
+ /* useful. So does not casting the result. */
+# define GC_PTR_ADD(x, n) ((x)+(n))
# endif
+#else /* !GC_DEBUG */
+# define GC_PTR_ADD3(x, n, type_of_result) ((x)+(n))
+# define GC_PTR_ADD(x, n) ((x)+(n))
+# define GC_PRE_INCR3(x, n, type_of_result) ((x) += (n))
+# define GC_PRE_INCR(x, n) ((x) += (n))
+# define GC_POST_INCR2(x, n, type_of_result) ((x)++)
+# define GC_POST_INCR(x, n) ((x)++)
+#endif
+
+/* Safer assignment of a pointer to a nonstack location. */
+#ifdef GC_DEBUG
+# ifdef __STDC__
+# define GC_PTR_STORE(p, q) \
+ (*(void **)GC_is_visible(p) = GC_is_valid_displacement(q))
+# else
+# define GC_PTR_STORE(p, q) \
+ (*(char **)GC_is_visible(p) = GC_is_valid_displacement(q))
# endif
+#else /* !GC_DEBUG */
+# define GC_PTR_STORE(p, q) *((p) = (q))
+#endif
+
+#ifdef SOLARIS_THREADS
+/* We need to intercept calls to many of the threads primitives, so */
+/* that we can locate thread stacks and stop the world. */
+/* Note also that the collector cannot see thread specific data. */
+/* Thread specific data should generally consist of pointers to */
+/* uncollectable objects, which are deallocated using the destructor */
+/* facility in thr_keycreate. */
+# include <thread.h>
+# include <signal.h>
+ int GC_thr_create(void *stack_base, size_t stack_size,
+ void *(*start_routine)(void *), void *arg, long flags,
+ thread_t *new_thread);
+ int GC_thr_join(thread_t wait_for, thread_t *departed, void **status);
+ int GC_thr_suspend(thread_t target_thread);
+ int GC_thr_continue(thread_t target_thread);
+ void * GC_dlopen(const char *path, int mode);
+
+# define thr_create GC_thr_create
+# define thr_join GC_thr_join
+# define thr_suspend GC_thr_suspend
+# define thr_continue GC_thr_continue
+# define dlopen GC_dlopen
+
+/* This returns a list of objects, linked through their first */
+/* word. Its use can greatly reduce lock contention problems, since */
+/* the allocation lock can be acquired and released many fewer times. */
+GC_PTR GC_malloc_many(size_t lb);
+#define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
+ /* in returned list. */
+
+#endif /* SOLARIS_THREADS */
+
+/*
+ * If you are planning on putting
+ * the collector in a SunOS 5 dynamic library, you need to call GC_INIT()
+ * from the statically loaded program section.
+ * This circumvents a Solaris 2.X (X<=4) linker bug.
+ */
+#if defined(sparc) || defined(__sparc)
+# define GC_INIT() { extern end, etext; \
+ GC_noop(&end, &etext); }
+#else
+# define GC_INIT()
#endif
+
+#ifdef __cplusplus
+ } /* end of extern "C" */
+#endif
+
+#endif /* _GC_H */
diff --git a/include/gc_cpp.h b/include/gc_cpp.h
new file mode 100644
index 00000000..812bb653
--- /dev/null
+++ b/include/gc_cpp.h
@@ -0,0 +1,285 @@
+#ifndef GC_CPP_H
+#define GC_CPP_H
+/****************************************************************************
+Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+
+THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+Permission is hereby granted to use or copy this program for any
+purpose, provided the above notices are retained on all copies.
+Permission to modify the code and to distribute modified code is
+granted, provided the above notices are retained, and a notice that
+the code was modified is included with the above copyright notice.
+****************************************************************************
+
+C++ Interface to the Boehm Collector
+
+ John R. Ellis and Jesse Hull
+ Last modified on Mon Jul 24 15:43:42 PDT 1995 by ellis
+
+This interface provides access to the Boehm collector. It provides
+basic facilities similar to those described in "Safe, Efficient
+Garbage Collection for C++", by John R. Elis and David L. Detlefs
+(ftp.parc.xerox.com:/pub/ellis/gc).
+
+All heap-allocated objects are either "collectable" or
+"uncollectable". Programs must explicitly delete uncollectable
+objects, whereas the garbage collector will automatically delete
+collectable objects when it discovers them to be inaccessible.
+Collectable objects may freely point at uncollectable objects and vice
+versa.
+
+Objects allocated with the built-in "::operator new" are uncollectable.
+
+Objects derived from class "gc" are collectable. For example:
+
+ class A: public gc {...};
+ A* a = new A; // a is collectable.
+
+Collectable instances of non-class types can be allocated using the GC
+placement:
+
+ typedef int A[ 10 ];
+ A* a = new (GC) A;
+
+Uncollectable instances of classes derived from "gc" can be allocated
+using the NoGC placement:
+
+ class A: public gc {...};
+ A* a = new (NoGC) A; // a is uncollectable.
+
+Both uncollectable and collectable objects can be explicitly deleted
+with "delete", which invokes an object's destructors and frees its
+storage immediately.
+
+A collectable object may have a clean-up function, which will be
+invoked when the collector discovers the object to be inaccessible.
+An object derived from "gc_cleanup" or containing a member derived
+from "gc_cleanup" has a default clean-up function that invokes the
+object's destructors. Explicit clean-up functions may be specified as
+an additional placement argument:
+
+ A* a = ::new (GC, MyCleanup) A;
+
+An object is considered "accessible" by the collector if it can be
+reached by a path of pointers from static variables, automatic
+variables of active functions, or from some object with clean-up
+enabled; pointers from an object to itself are ignored.
+
+Thus, if objects A and B both have clean-up functions, and A points at
+B, B is considered accessible. After A's clean-up is invoked and its
+storage released, B will then become inaccessible and will have its
+clean-up invoked. If A points at B and B points to A, forming a
+cycle, then that's considered a storage leak, and neither will be
+collectable. See the interface gc.h for low-level facilities for
+handling such cycles of objects with clean-up.
+
+The collector cannot guarrantee that it will find all inaccessible
+objects. In practice, it finds almost all of them.
+
+
+Cautions:
+
+1. Be sure the collector has been augmented with "make c++".
+
+2. If your compiler supports the new "operator new[]" syntax, then
+add -DOPERATOR_NEW_ARRAY to the Makefile.
+
+If your compiler doesn't support "operator new[]", beware that an
+array of type T, where T is derived from "gc", may or may not be
+allocated as a collectable object (it depends on the compiler). Use
+the explicit GC placement to make the array collectable. For example:
+
+ class A: public gc {...};
+ A* a1 = new A[ 10 ]; // collectable or uncollectable?
+ A* a2 = new (GC) A[ 10 ]; // collectable
+
+3. The destructors of collectable arrays of objects derived from
+"gc_cleanup" will not be invoked properly. For example:
+
+ class A: public gc_cleanup {...};
+ A* a = new (GC) A[ 10 ]; // destructors not invoked correctly
+
+Typically, only the destructor for the first element of the array will
+be invoked when the array is garbage-collected. To get all the
+destructors of any array executed, you must supply an explicit
+clean-up function:
+
+ A* a = new (GC, MyCleanUp) A[ 10 ];
+
+(Implementing clean-up of arrays correctly, portably, and in a way
+that preserves the correct exception semantics requires a language
+extension, e.g. the "gc" keyword.)
+
+4. Compiler bugs:
+
+* Solaris 2's CC (SC3.0) doesn't implement t->~T() correctly, so the
+destructors of classes derived from gc_cleanup won't be invoked.
+You'll have to explicitly register a clean-up function with
+new-placement syntax.
+
+* Evidently cfront 3.0 does not allow destructors to be explicitly
+invoked using the ANSI-conforming syntax t->~T(). If you're using
+cfront 3.0, you'll have to comment out the class gc_cleanup, which
+uses explicit invocation.
+
+****************************************************************************/
+
+#include "gc.h"
+
+#ifndef THINK_CPLUS
+#define _cdecl
+#endif
+
+#if ! defined( OPERATOR_NEW_ARRAY ) \
+ && (__BORLANDC__ >= 0x450 || (__GNUC__ >= 2 && __GNUC_MINOR__ >= 6))
+# define OPERATOR_NEW_ARRAY
+#endif
+
+enum GCPlacement {GC, NoGC};
+
+class gc {public:
+ inline void* operator new( size_t size );
+ inline void* operator new( size_t size, GCPlacement gcp );
+ inline void operator delete( void* obj );
+
+#ifdef OPERATOR_NEW_ARRAY
+ inline void* operator new[]( size_t size );
+ inline void* operator new[]( size_t size, GCPlacement gcp );
+ inline void operator delete[]( void* obj );
+#endif /* OPERATOR_NEW_ARRAY */
+ };
+ /*
+ Instances of classes derived from "gc" will be allocated in the
+ collected heap by default, unless an explicit NoGC placement is
+ specified. */
+
+class gc_cleanup: virtual public gc {public:
+ inline gc_cleanup();
+ inline virtual ~gc_cleanup();
+private:
+ inline static void _cdecl cleanup( void* obj, void* clientData );};
+ /*
+ Instances of classes derived from "gc_cleanup" will be allocated
+ in the collected heap by default. When the collector discovers an
+ inaccessible object derived from "gc_cleanup" or containing a
+ member derived from "gc_cleanup", its destructors will be
+ invoked. */
+
+extern "C" {typedef void (*GCCleanUpFunc)( void* obj, void* clientData );}
+
+inline void* operator new(
+ size_t size,
+ GCPlacement gcp,
+ GCCleanUpFunc cleanup = 0,
+ void* clientData = 0 );
+ /*
+ Allocates a collectable or uncollected object, according to the
+ value of "gcp".
+
+ For collectable objects, if "cleanup" is non-null, then when the
+ allocated object "obj" becomes inaccessible, the collector will
+ invoke the function "cleanup( obj, clientData )" but will not
+ invoke the object's destructors. It is an error to explicitly
+ delete an object allocated with a non-null "cleanup".
+
+ It is an error to specify a non-null "cleanup" with NoGC or for
+ classes derived from "gc_cleanup" or containing members derived
+ from "gc_cleanup". */
+
+#ifdef OPERATOR_NEW_ARRAY
+
+inline void* operator new[](
+ size_t size,
+ GCPlacement gcp,
+ GCCleanUpFunc cleanup = 0,
+ void* clientData = 0 );
+ /*
+ The operator new for arrays, identical to the above. */
+
+#endif /* OPERATOR_NEW_ARRAY */
+
+/****************************************************************************
+
+Inline implementation
+
+****************************************************************************/
+
+inline void* gc::operator new( size_t size ) {
+ return GC_MALLOC( size );}
+
+inline void* gc::operator new( size_t size, GCPlacement gcp ) {
+ if (gcp == GC)
+ return GC_MALLOC( size );
+ else
+ return GC_MALLOC_UNCOLLECTABLE( size );}
+
+inline void gc::operator delete( void* obj ) {
+ GC_FREE( obj );}
+
+
+#ifdef OPERATOR_NEW_ARRAY
+
+inline void* gc::operator new[]( size_t size ) {
+ return gc::operator new( size );}
+
+inline void* gc::operator new[]( size_t size, GCPlacement gcp ) {
+ return gc::operator new( size, gcp );}
+
+inline void gc::operator delete[]( void* obj ) {
+ gc::operator delete( obj );}
+
+#endif /* OPERATOR_NEW_ARRAY */
+
+
+inline gc_cleanup::~gc_cleanup() {
+ GC_REGISTER_FINALIZER_IGNORE_SELF( this, 0, 0, 0, 0 );}
+
+inline void gc_cleanup::cleanup( void* obj, void* displ ) {
+ ((gc_cleanup*) ((char*) obj + (ptrdiff_t) displ))->~gc_cleanup();}
+
+inline gc_cleanup::gc_cleanup() {
+ GC_finalization_proc oldProc;
+ void* oldData;
+ void* base = GC_base( (void *) this );
+ if (0 == base) return;
+ GC_REGISTER_FINALIZER_IGNORE_SELF(
+ base, cleanup, (void*) ((char*) this - (char*) base),
+ &oldProc, &oldData );
+ if (0 != oldProc) {
+ GC_REGISTER_FINALIZER_IGNORE_SELF( base, oldProc, oldData, 0, 0 );}}
+
+inline void* operator new(
+ size_t size,
+ GCPlacement gcp,
+ GCCleanUpFunc cleanup,
+ void* clientData )
+{
+ void* obj;
+
+ if (gcp == GC) {
+ obj = GC_MALLOC( size );
+ if (cleanup != 0)
+ GC_REGISTER_FINALIZER_IGNORE_SELF(
+ obj, cleanup, clientData, 0, 0 );}
+ else {
+ obj = GC_MALLOC_UNCOLLECTABLE( size );};
+ return obj;}
+
+
+#ifdef OPERATOR_NEW_ARRAY
+
+inline void* operator new[](
+ size_t size,
+ GCPlacement gcp,
+ GCCleanUpFunc cleanup,
+ void* clientData )
+{
+ return ::operator new( size, gcp, cleanup, clientData );}
+
+#endif /* OPERATOR_NEW_ARRAY */
+
+
+#endif /* GC_CPP_H */
+
diff --git a/gc_inl.h b/include/gc_inl.h
index 1f9a9a0d..700843bb 100644
--- a/gc_inl.h
+++ b/include/gc_inl.h
@@ -1,6 +1,6 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -11,12 +11,20 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:12 pm PDT */
+/* Boehm, October 3, 1995 2:07 pm PDT */
# ifndef GC_PRIVATE_H
-# include "gc_priv.h"
+# include "private/gc_priv.h"
# endif
+/* USE OF THIS FILE IS NOT RECOMMENDED unless the collector has been */
+/* compiled without -DALL_INTERIOR_POINTERS or with */
+/* -DDONT_ADD_BYTE_AT_END, or the specified size includes a pointerfree */
+/* word at the end. In the standard collector configuration, */
+/* the final word of each object may not be scanned. */
+/* This is most useful for compilers that generate C. */
+/* Manual use is hereby discouraged. */
+
/* Allocate n words (NOT BYTES). X is made to point to the result. */
/* It is assumed that n < MAXOBJSZ, and */
/* that n > 0. On machines requiring double word alignment of some */
@@ -46,7 +54,7 @@
obj_link(op) = 0; \
GC_words_allocd += (n); \
FASTUNLOCK(); \
- (result) = (extern_ptr_t) op; \
+ (result) = (GC_PTR) op; \
} \
}
@@ -68,7 +76,7 @@
obj_link(op) = 0; \
GC_words_allocd += (n); \
FASTUNLOCK(); \
- (result) = (extern_ptr_t) op; \
+ (result) = (GC_PTR) op; \
} \
}
@@ -91,5 +99,5 @@
} \
((word *)op)[0] = (word)(first); \
((word *)op)[1] = (word)(second); \
- (result) = (extern_ptr_t) op; \
+ (result) = (GC_PTR) op; \
}
diff --git a/gc_inline.h b/include/gc_inline.h
index db62d1d5..db62d1d5 100644
--- a/gc_inline.h
+++ b/include/gc_inline.h
diff --git a/include/gc_typed.h b/include/gc_typed.h
index 401fd062..f7cc2f22 100644
--- a/include/gc_typed.h
+++ b/include/gc_typed.h
@@ -1,3 +1,16 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
/*
* Some simple primitives for allocation with explicit type information.
* Facilities for dynamic type inference may be added later.
@@ -6,7 +19,7 @@
* Note that this is implemented completely separately from the rest
* of the collector, and is not linked in unless referenced.
*/
-/* Boehm, March 31, 1994 4:43 pm PST */
+/* Boehm, May 19, 1994 2:13 pm PDT */
#ifndef _GC_TYPED_H
# define _GC_TYPED_H
@@ -26,9 +39,9 @@ typedef GC_word * GC_bitmap;
typedef GC_word GC_descr;
#if defined(__STDC__) || defined(__cplusplus)
- extern GC_descr GC_make_decriptor(GC_bitmap bm, size_t len);
+ extern GC_descr GC_make_descriptor(GC_bitmap bm, size_t len);
#else
- extern GC_descr GC_make_decriptor(/* GC_bitmap bm, size_t len */);
+ extern GC_descr GC_make_descriptor(/* GC_bitmap bm, size_t len */);
#endif
/* Return a type descriptor for the object whose layout */
/* is described by the argument. */
@@ -39,7 +52,12 @@ typedef GC_word GC_descr;
/* may be larger (but not smaller). Any additional */
/* words in the object are assumed not to contain */
/* pointers. */
- /* Returns (GC_descr)(-1) on failure (no memory). */
+ /* Returns a conservative approximation in the */
+ /* (unlikely) case of insufficient memory to build */
+ /* the descriptor. Calls to GC_make_descriptor */
+ /* may consume some amount of a finite resource. This */
+ /* is intended to be called once per type, not once */
+ /* per allocation. */
#if defined(__STDC__) || defined(__cplusplus)
extern void * GC_malloc_explicitly_typed(size_t size_in_bytes, GC_descr d);
diff --git a/include/private/config.h b/include/private/config.h
new file mode 100644
index 00000000..62492c3e
--- /dev/null
+++ b/include/private/config.h
@@ -0,0 +1,687 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, October 3, 1995 6:39 pm PDT */
+
+#ifndef CONFIG_H
+
+# define CONFIG_H
+
+/* Machine dependent parameters. Some tuning parameters can be found */
+/* near the top of gc_private.h. */
+
+/* Machine specific parts contributed by various people. See README file. */
+
+/* Determine the machine type: */
+# if defined(sun) && defined(mc68000)
+# define M68K
+# define SUNOS4
+# define mach_type_known
+# endif
+# if defined(hp9000s300)
+# define M68K
+# define HP
+# define mach_type_known
+# endif
+# if defined(__NetBSD__) && defined(m68k)
+# define M68K
+# define NETBSD
+# define mach_type_known
+# endif
+# if defined(vax)
+# define VAX
+# ifdef ultrix
+# define ULTRIX
+# else
+# define BSD
+# endif
+# define mach_type_known
+# endif
+# if defined(mips) || defined(__mips)
+# define MIPS
+# if defined(ultrix) || defined(__ultrix)
+# define ULTRIX
+# else
+# if defined(_SYSTYPE_SVR4) || defined(SYSTYPE_SVR4) || defined(__SYSTYPE_SVR4__)
+# define IRIX5
+# else
+# define RISCOS /* or IRIX 4.X */
+# endif
+# endif
+# define mach_type_known
+# endif
+# if defined(sequent) && defined(i386)
+# define I386
+# define SEQUENT
+# define mach_type_known
+# endif
+# if defined(sun) && defined(i386)
+# define I386
+# define SUNOS5
+# define mach_type_known
+# endif
+# if (defined(__OS2__) || defined(__EMX__)) && defined(__32BIT__)
+# define I386
+# define OS2
+# define mach_type_known
+# endif
+# if defined(ibm032)
+# define RT
+# define mach_type_known
+# endif
+# if defined(sun) && (defined(sparc) || defined(__sparc))
+# define SPARC
+ /* Test for SunOS 5.x */
+# include <errno.h>
+# ifdef ECHRNG
+# define SUNOS5
+# else
+# define SUNOS4
+# endif
+# define mach_type_known
+# endif
+# if defined(sparc) && defined(unix) && !defined(sun)
+# define SPARC
+# define DRSNX
+# define mach_type_known
+# endif
+# if defined(_IBMR2)
+# define RS6000
+# define mach_type_known
+# endif
+# if defined(_M_XENIX) && defined(_M_SYSV) && defined(_M_I386)
+ /* The above test may need refinement */
+# define I386
+# define SCO
+# define mach_type_known
+# endif
+# if defined(_AUX_SOURCE)
+# define M68K
+# define SYSV
+# define mach_type_known
+# endif
+# if defined(_PA_RISC1_0) || defined(_PA_RISC1_1)
+# define HP_PA
+# define mach_type_known
+# endif
+# if defined(linux) && defined(i386)
+# define I386
+# define LINUX
+# define mach_type_known
+# endif
+# if defined(__alpha)
+# define ALPHA
+# define mach_type_known
+# endif
+# if defined(_AMIGA)
+# define M68K
+# define AMIGA
+# define mach_type_known
+# endif
+# if defined(THINK_C) || defined(__MWERKS__) && !defined(__powerc)
+# define M68K
+# define MACOS
+# define mach_type_known
+# endif
+# if defined(__MWERKS__) && defined(__powerc)
+# define POWERPC
+# define MACOS
+# define mach_type_known
+# endif
+# if defined(NeXT) && defined(mc68000)
+# define M68K
+# define NEXT
+# define mach_type_known
+# endif
+# if defined(NeXT) && defined(i386)
+# define I386
+# define NEXT
+# define mach_type_known
+# endif
+# if defined(__FreeBSD__) && defined(i386)
+# define I386
+# define FREEBSD
+# define mach_type_known
+# endif
+# if defined(__NetBSD__) && defined(i386)
+# define I386
+# define NETBSD
+# define mach_type_known
+# endif
+# if defined(bsdi) && defined(i386)
+# define I386
+# define BSDI
+# define mach_type_known
+# endif
+# if !defined(mach_type_known) && defined(__386BSD__)
+# define I386
+# define THREE86BSD
+# define mach_type_known
+# endif
+# if defined(_CX_UX) && defined(_M88K)
+# define M88K
+# define CX_UX
+# define mach_type_known
+# endif
+# if defined(DGUX)
+# define M88K
+ /* DGUX defined */
+# define mach_type_known
+# endif
+# if defined(_MSDOS) && (_M_IX86 == 300) || (_M_IX86 == 400)
+# define I386
+# define MSWIN32 /* or Win32s */
+# define mach_type_known
+# endif
+# if defined(GO32)
+# define I386
+# define DJGPP /* MSDOS running the DJGPP port of GCC */
+# define mach_type_known
+# endif
+# if defined(__BORLANDC__)
+# define I386
+# define MSWIN32
+# define mach_type_known
+# endif
+
+/* Feel free to add more clauses here */
+
+/* Or manually define the machine type here. A machine type is */
+/* characterized by the architecture. Some */
+/* machine types are further subdivided by OS. */
+/* the macros ULTRIX, RISCOS, and BSD to distinguish. */
+/* Note that SGI IRIX is treated identically to RISCOS. */
+/* SYSV on an M68K actually means A/UX. */
+/* The distinction in these cases is usually the stack starting address */
+# ifndef mach_type_known
+ --> unknown machine type
+# endif
+ /* Mapping is: M68K ==> Motorola 680X0 */
+ /* (SUNOS4,HP,NEXT, and SYSV (A/UX), */
+ /* MACOS and AMIGA variants) */
+ /* I386 ==> Intel 386 */
+ /* (SEQUENT, OS2, SCO, LINUX, NETBSD, */
+ /* FREEBSD, THREE86BSD, MSWIN32, */
+ /* BSDI, SUNOS5, NEXT variants) */
+ /* NS32K ==> Encore Multimax */
+ /* MIPS ==> R2000 or R3000 */
+ /* (RISCOS, ULTRIX variants) */
+ /* VAX ==> DEC VAX */
+ /* (BSD, ULTRIX variants) */
+ /* RS6000 ==> IBM RS/6000 AIX3.X */
+ /* RT ==> IBM PC/RT */
+ /* HP_PA ==> HP9000/700 & /800 */
+ /* HP/UX */
+ /* SPARC ==> SPARC under SunOS */
+ /* (SUNOS4, SUNOS5, */
+ /* DRSNX variants) */
+ /* ALPHA ==> DEC Alpha OSF/1 */
+ /* M88K ==> Motorola 88XX0 */
+ /* (CX_UX and DGUX) */
+
+
+/*
+ * For each architecture and OS, the following need to be defined:
+ *
+ * CPP_WORD_SZ is a simple integer constant representing the word size.
+ * in bits. We assume byte addressibility, where a byte has 8 bits.
+ * We also assume CPP_WORD_SZ is either 32 or 64.
+ * (We care about the length of pointers, not hardware
+ * bus widths. Thus a 64 bit processor with a C compiler that uses
+ * 32 bit pointers should use CPP_WORD_SZ of 32, not 64. Default is 32.)
+ *
+ * MACH_TYPE is a string representation of the machine type.
+ * OS_TYPE is analogous for the OS.
+ *
+ * ALIGNMENT is the largest N, such that
+ * all pointer are guaranteed to be aligned on N byte boundaries.
+ * defining it to be 1 will always work, but perform poorly.
+ *
+ * DATASTART is the beginning of the data segment.
+ * On UNIX systems, the collector will scan the area between DATASTART
+ * and &end for root pointers.
+ *
+ * STACKBOTTOM is the cool end of the stack, which is usually the
+ * highest address in the stack.
+ * Under PCR or OS/2, we have other ways of finding thread stacks.
+ * For each machine, the following should:
+ * 1) define STACK_GROWS_UP if the stack grows toward higher addresses, and
+ * 2) define exactly one of
+ * STACKBOTTOM (should be defined to be an expression)
+ * HEURISTIC1
+ * HEURISTIC2
+ * If either of the last two macros are defined, then STACKBOTTOM is computed
+ * during collector startup using one of the following two heuristics:
+ * HEURISTIC1: Take an address inside GC_init's frame, and round it up to
+ * the next multiple of STACK_GRAN.
+ * HEURISTIC2: Take an address inside GC_init's frame, increment it repeatedly
+ * in small steps (decrement if STACK_GROWS_UP), and read the value
+ * at each location. Remember the value when the first
+ * Segmentation violation or Bus error is signalled. Round that
+ * to the nearest plausible page boundary, and use that instead
+ * of STACKBOTTOM.
+ *
+ * If no expression for STACKBOTTOM can be found, and neither of the above
+ * heuristics are usable, the collector can still be used with all of the above
+ * undefined, provided one of the following is done:
+ * 1) GC_mark_roots can be changed to somehow mark from the correct stack(s)
+ * without reference to STACKBOTTOM. This is appropriate for use in
+ * conjunction with thread packages, since there will be multiple stacks.
+ * (Allocating thread stacks in the heap, and treating them as ordinary
+ * heap data objects is also possible as a last resort. However, this is
+ * likely to introduce significant amounts of excess storage retention
+ * unless the dead parts of the thread stacks are periodically cleared.)
+ * 2) Client code may set GC_stackbottom before calling any GC_ routines.
+ * If the author of the client code controls the main program, this is
+ * easily accomplished by introducing a new main program, setting
+ * GC_stackbottom to the address of a local variable, and then calling
+ * the original main program. The new main program would read something
+ * like:
+ *
+ * # include "gc_private.h"
+ *
+ * main(argc, argv, envp)
+ * int argc;
+ * char **argv, **envp;
+ * {
+ * int dummy;
+ *
+ * GC_stackbottom = (ptr_t)(&dummy);
+ * return(real_main(argc, argv, envp));
+ * }
+ *
+ *
+ * Each architecture may also define the style of virtual dirty bit
+ * implementation to be used:
+ * MPROTECT_VDB: Write protect the heap and catch faults.
+ * PROC_VDB: Use the SVR4 /proc primitives to read dirty bits.
+ *
+ * An architecture may define DYNAMIC_LOADING if dynamic_load.c
+ * defined GC_register_dynamic_libraries() for the architecture.
+ */
+
+
+# define STACK_GRAN 0x1000000
+# ifdef M68K
+# define MACH_TYPE "M68K"
+# define ALIGNMENT 2
+# ifdef NETBSD
+# define OS_TYPE "NETBSD"
+# define HEURISTIC2
+ extern char etext;
+# define DATASTART ((ptr_t)(&etext))
+# endif
+# ifdef SUNOS4
+# define OS_TYPE "SUNOS4"
+ extern char etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x1ffff) & ~0x1ffff))
+# define HEURISTIC1 /* differs */
+# define DYNAMIC_LOADING
+# endif
+# ifdef HP
+# define OS_TYPE "HP"
+ extern char etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
+# define STACKBOTTOM ((ptr_t) 0xffeffffc)
+ /* empirically determined. seems to work. */
+# endif
+# ifdef SYSV
+# define OS_TYPE "SYSV"
+ extern etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x3fffff) \
+ & ~0x3fffff) \
+ +((word)&etext & 0x1fff))
+ /* This only works for shared-text binaries with magic number 0413.
+ The other sorts of SysV binaries put the data at the end of the text,
+ in which case the default of &etext would work. Unfortunately,
+ handling both would require having the magic-number available.
+ -- Parag
+ */
+# define STACKBOTTOM ((ptr_t)0xFFFFFFFE)
+ /* The stack starts at the top of memory, but */
+ /* 0x0 cannot be used as setjump_test complains */
+ /* that the stack direction is incorrect. Two */
+ /* bytes down from 0x0 should be safe enough. */
+ /* --Parag */
+# endif
+# ifdef AMIGA
+# define OS_TYPE "AMIGA"
+ /* STACKBOTTOM and DATASTART handled specially */
+ /* in os_dep.c */
+# define DATAEND /* not needed */
+# endif
+# ifdef MACOS
+# ifndef __LOWMEM__
+# include <LowMem.h>
+# endif
+# define OS_TYPE "MACOS"
+ /* see os_dep.c for details of global data segments. */
+# define STACKBOTTOM ((ptr_t) LMGetCurStackBase())
+# define DATAEND /* not needed */
+# endif
+# ifdef NEXT
+# define OS_TYPE "NEXT"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t) 0x4000000)
+# define DATAEND /* not needed */
+# endif
+# endif
+
+# ifdef POWERPC
+# define MACH_TYPE "POWERPC"
+# define ALIGNMENT 2
+# ifdef MACOS
+# ifndef __LOWMEM__
+# include <LowMem.h>
+# endif
+# define OS_TYPE "MACOS"
+ /* see os_dep.c for details of global data segments. */
+# define STACKBOTTOM ((ptr_t) LMGetCurStackBase())
+# define DATAEND /* not needed */
+# endif
+# endif
+
+# ifdef VAX
+# define MACH_TYPE "VAX"
+# define ALIGNMENT 4 /* Pointers are longword aligned by 4.2 C compiler */
+ extern char etext;
+# define DATASTART ((ptr_t)(&etext))
+# ifdef BSD
+# define OS_TYPE "BSD"
+# define HEURISTIC1
+ /* HEURISTIC2 may be OK, but it's hard to test. */
+# endif
+# ifdef ULTRIX
+# define OS_TYPE "ULTRIX"
+# define STACKBOTTOM ((ptr_t) 0x7fffc800)
+# endif
+# endif
+
+# ifdef RT
+# define MACH_TYPE "RT"
+# define ALIGNMENT 4
+# define DATASTART ((ptr_t) 0x10000000)
+# define STACKBOTTOM ((ptr_t) 0x1fffd800)
+# endif
+
+# ifdef SPARC
+# define MACH_TYPE "SPARC"
+# define ALIGNMENT 4 /* Required by hardware */
+ extern int etext;
+# ifdef SUNOS5
+# define OS_TYPE "SUNOS5"
+ extern int _etext;
+ extern int _end;
+ extern char * GC_SysVGetDataStart();
+# define DATASTART (ptr_t)GC_SysVGetDataStart(0x10000, &_etext)
+# define DATAEND (&_end)
+# define PROC_VDB
+# define HEURISTIC1
+# endif
+# ifdef SUNOS4
+# define OS_TYPE "SUNOS4"
+ /* [If you have a weak stomach, don't read this.] */
+ /* We would like to use: */
+/* # define DATASTART ((ptr_t)((((word) (&etext)) + 0x1fff) & ~0x1fff)) */
+ /* This fails occasionally, due to an ancient, but very */
+ /* persistent ld bug. &etext is set 32 bytes too high. */
+ /* We instead read the text segment size from the a.out */
+ /* header, which happens to be mapped into our address space */
+ /* at the start of the text segment. The detective work here */
+ /* was done by Robert Ehrlich, Manuel Serrano, and Bernard */
+ /* Serpette of INRIA. */
+ /* This assumes ZMAGIC, i.e. demand-loadable executables. */
+# define TEXTSTART 0x2000
+# define DATASTART ((ptr_t)(*(int *)(TEXTSTART+0x4)+TEXTSTART))
+# define MPROTECT_VDB
+# define HEURISTIC1
+# endif
+# ifdef DRSNX
+# define CPP_WORDSZ 32
+# define OS_TYPE "DRSNX"
+ extern char * GC_SysVGetDataStart();
+ extern int etext;
+# define DATASTART (ptr_t)GC_SysVGetDataStart(0x10000, &etext)
+# define MPROTECT_VDB
+# define STACKBOTTOM ((ptr_t) 0xdfff0000)
+# endif
+# define DYNAMIC_LOADING
+# endif
+
+# ifdef I386
+# define MACH_TYPE "I386"
+# define ALIGNMENT 4 /* Appears to hold for all "32 bit" compilers */
+ /* except Borland. The -a4 option fixes */
+ /* Borland. */
+# ifdef SEQUENT
+# define OS_TYPE "SEQUENT"
+ extern int etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
+# define STACKBOTTOM ((ptr_t) 0x3ffff000)
+# endif
+# ifdef SUNOS5
+# define OS_TYPE "SUNOS5"
+ extern int etext, _start;
+ extern char * GC_SysVGetDataStart();
+# define DATASTART GC_SysVGetDataStart(0x1000, &etext)
+# define STACKBOTTOM ((ptr_t)(&_start))
+# define PROC_VDB
+# endif
+# ifdef SCO
+# define OS_TYPE "SCO"
+ extern int etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0x3fffff) \
+ & ~0x3fffff) \
+ +((word)&etext & 0xfff))
+# define STACKBOTTOM ((ptr_t) 0x7ffffffc)
+# endif
+# ifdef LINUX
+# define OS_TYPE "LINUX"
+ extern int etext;
+# define DATASTART ((ptr_t)((((word) (&etext)) + 0xfff) & ~0xfff))
+# define STACKBOTTOM ((ptr_t)0xc0000000)
+# define MPROTECT_VDB
+# endif
+# ifdef OS2
+# define OS_TYPE "OS2"
+ /* STACKBOTTOM and DATASTART are handled specially in */
+ /* os_dep.c. OS2 actually has the right */
+ /* system call! */
+# define DATAEND /* not needed */
+# endif
+# ifdef MSWIN32
+# define OS_TYPE "MSWIN32"
+ /* STACKBOTTOM and DATASTART are handled specially in */
+ /* os_dep.c. */
+# define MPROTECT_VDB
+# define DATAEND /* not needed */
+# endif
+# ifdef DJGPP
+# define OS_TYPE "DJGPP"
+ extern int etext;
+# define DATASTART ((ptr_t)(&etext))
+# define STACKBOTTOM ((ptr_t)0x00080000)
+# endif
+# ifdef FREEBSD
+# define OS_TYPE "FREEBSD"
+# define MPROTECT_VDB
+# endif
+# ifdef NETBSD
+# define OS_TYPE "NETBSD"
+# endif
+# ifdef THREE86BSD
+# define OS_TYPE "THREE86BSD"
+# endif
+# ifdef BSDI
+# define OS_TYPE "BSDI"
+# endif
+# if defined(FREEBSD) || defined(NETBSD) \
+ || defined(THREE86BSD) || defined(BSDI)
+# define HEURISTIC2
+ extern char etext;
+# define DATASTART ((ptr_t)(&etext))
+# endif
+# ifdef NEXT
+# define OS_TYPE "NEXT"
+# define DATASTART ((ptr_t) get_etext())
+# define STACKBOTTOM ((ptr_t)0xc0000000)
+# define DATAEND /* not needed */
+# endif
+# endif
+
+# ifdef NS32K
+# define MACH_TYPE "NS32K"
+# define ALIGNMENT 4
+ extern char **environ;
+# define DATASTART ((ptr_t)(&environ))
+ /* hideous kludge: environ is the first */
+ /* word in crt0.o, and delimits the start */
+ /* of the data segment, no matter which */
+ /* ld options were passed through. */
+# define STACKBOTTOM ((ptr_t) 0xfffff000) /* for Encore */
+# endif
+
+# ifdef MIPS
+# define MACH_TYPE "MIPS"
+# define ALIGNMENT 4 /* Required by hardware */
+# define DATASTART 0x10000000
+ /* Could probably be slightly higher since */
+ /* startup code allocates lots of junk */
+# define HEURISTIC2
+# ifdef ULTRIX
+# define OS_TYPE "ULTRIX"
+# endif
+# ifdef RISCOS
+# define OS_TYPE "RISCOS"
+# endif
+# ifdef IRIX5
+# define OS_TYPE "IRIX5"
+# define MPROTECT_VDB
+ /* The above is dubious. Mprotect and signals do work, */
+ /* and dirty bits are implemented under IRIX5. But, */
+ /* at least under IRIX5.2, mprotect seems to be so */
+ /* slow relative to the hardware that incremental */
+ /* collection is likely to be rarely useful. */
+# define DYNAMIC_LOADING
+# endif
+# endif
+
+# ifdef RS6000
+# define MACH_TYPE "RS6000"
+# define ALIGNMENT 4
+# define DATASTART ((ptr_t)0x20000000)
+# define STACKBOTTOM ((ptr_t)0x2ff80000)
+# endif
+
+# ifdef HP_PA
+# define MACH_TYPE "HP_PA"
+# define ALIGNMENT 4
+ extern int __data_start;
+# define DATASTART ((ptr_t)(&__data_start))
+# define HEURISTIC2
+# define STACK_GROWS_UP
+# endif
+
+# ifdef ALPHA
+# define MACH_TYPE "ALPHA"
+# define ALIGNMENT 8
+# define DATASTART ((ptr_t) 0x140000000)
+# define HEURISTIC2
+ /* Normally HEURISTIC2 is too conervative, since */
+ /* the text segment immediately follows the stack. */
+ /* Hence we give an upper pound. */
+ extern __start;
+# define HEURISTIC2_LIMIT ((ptr_t)((word)(&__start) & ~(getpagesize()-1)))
+# define CPP_WORDSZ 64
+# define MPROTECT_VDB
+# define DYNAMIC_LOADING
+# endif
+
+# ifdef M88K
+# define MACH_TYPE "M88K"
+# define ALIGNMENT 4
+# ifdef CX_UX
+# define DATASTART ((((word)&etext + 0x3fffff) & ~0x3fffff) + 0x10000)
+# endif
+# ifdef DGUX
+ extern char * GC_SysVGetDataStart();
+# define DATASTART (ptr_t)GC_SysVGetDataStart(0x10000, &etext)
+# endif
+# define STACKBOTTOM ((char*)0xf0000000) /* determined empirically */
+# endif
+
+# ifndef STACK_GROWS_UP
+# define STACK_GROWS_DOWN
+# endif
+
+# ifndef CPP_WORDSZ
+# define CPP_WORDSZ 32
+# endif
+
+# ifndef OS_TYPE
+# define OS_TYPE ""
+# endif
+
+# ifndef DATAEND
+ extern int end;
+# define DATAEND (&end)
+# endif
+
+# if defined(SUNOS5) || defined(DRSNX)
+ /* OS has SVR4 generic features. Probably others also qualify. */
+# define SVR4
+# endif
+
+# if defined(SUNOS5) || defined(DRSNX)
+ /* OS has SUNOS5 style semi-undocumented interface to dynamic */
+ /* loader. */
+# define SUNOS5DL
+ /* OS has SUNOS5 style signal handlers. */
+# define SUNOS5SIGS
+# endif
+
+# if CPP_WORDSZ != 32 && CPP_WORDSZ != 64
+ -> bad word size
+# endif
+
+# ifdef PCR
+# undef DYNAMIC_LOADING
+# undef STACKBOTTOM
+# undef HEURISTIC1
+# undef HEURISTIC2
+# undef PROC_VDB
+# undef MPROTECT_VDB
+# define PCR_VDB
+# endif
+
+# ifdef SRC_M3
+/* Postponed for now. */
+# undef PROC_VDB
+# undef MPROTECT_VDB
+# endif
+
+# ifdef SMALL_CONFIG
+/* Presumably not worth the space it takes. */
+# undef PROC_VDB
+# undef MPROTECT_VDB
+# endif
+
+# if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB)
+# define DEFAULT_VDB
+# endif
+
+# if defined(SPARC)
+# define SAVE_CALL_CHAIN
+# endif
+
+# endif
diff --git a/include/private/cord_pos.h b/include/private/cord_pos.h
new file mode 100644
index 00000000..a07d07f6
--- /dev/null
+++ b/include/private/cord_pos.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, May 19, 1994 2:23 pm PDT */
+# ifndef CORD_POSITION_H
+
+/* The representation of CORD_position. This is private to the */
+/* implementation, but the ise is known to clients. Also */
+/* the implementation of some exported macros relies on it. */
+/* Don't use anything defined here and not in cord.h. */
+
+# define MAX_DEPTH 48
+ /* The maximum depth of a balanced cord + 1. */
+ /* We don't let cords get deeper than MAX_DEPTH. */
+
+struct CORD_pe {
+ CORD pe_cord;
+ size_t pe_start_pos;
+};
+
+/* A structure describing an entry on the path from the root */
+/* to current position. */
+typedef struct CORD_pos {
+ size_t cur_pos;
+ int path_len;
+# define CORD_POS_INVALID (0x55555555)
+ /* path_len == INVALID <==> position invalid */
+ const char *cur_leaf; /* Current leaf, if it is a string. */
+ /* If the current leaf is a function, */
+ /* then this may point to function_buf */
+ /* containing the next few characters. */
+ /* Always points to a valid string */
+ /* containing the current character */
+ /* unless cur_end is 0. */
+ size_t cur_start; /* Start position of cur_leaf */
+ size_t cur_end; /* Ending position of cur_leaf */
+ /* 0 if cur_leaf is invalid. */
+ struct CORD_pe path[MAX_DEPTH + 1];
+ /* path[path_len] is the leaf corresponding to cur_pos */
+ /* path[0].pe_cord is the cord we point to. */
+# define FUNCTION_BUF_SZ 8
+ char function_buf[FUNCTION_BUF_SZ]; /* Space for next few chars */
+ /* from function node. */
+} CORD_pos[1];
+
+/* Extract the cord from a position: */
+CORD CORD_pos_to_cord(CORD_pos p);
+
+/* Extract the current index from a position: */
+size_t CORD_pos_to_index(CORD_pos p);
+
+/* Fetch the character located at the given position: */
+char CORD_pos_fetch(CORD_pos p);
+
+/* Initialize the position to refer to the give cord and index. */
+/* Note that this is the most expensive function on positions: */
+void CORD_set_pos(CORD_pos p, CORD x, size_t i);
+
+/* Advance the position to the next character. */
+/* P must be initialized and valid. */
+/* Invalidates p if past end: */
+void CORD_next(CORD_pos p);
+
+/* Move the position to the preceding character. */
+/* P must be initialized and valid. */
+/* Invalidates p if past beginning: */
+void CORD_prev(CORD_pos p);
+
+/* Is the position valid, i.e. inside the cord? */
+int CORD_pos_valid(CORD_pos p);
+
+char CORD__pos_fetch(CORD_pos);
+void CORD__next(CORD_pos);
+void CORD__prev(CORD_pos);
+
+#define CORD_pos_fetch(p) \
+ (((p)[0].cur_end != 0)? \
+ (p)[0].cur_leaf[(p)[0].cur_pos - (p)[0].cur_start] \
+ : CORD__pos_fetch(p))
+
+#define CORD_next(p) \
+ (((p)[0].cur_pos + 1 < (p)[0].cur_end)? \
+ (p)[0].cur_pos++ \
+ : (CORD__next(p), 0))
+
+#define CORD_prev(p) \
+ (((p)[0].cur_end != 0 && (p)[0].cur_pos > (p)[0].cur_start)? \
+ (p)[0].cur_pos-- \
+ : (CORD__prev(p), 0))
+
+#define CORD_pos_to_index(p) ((p)[0].cur_pos)
+
+#define CORD_pos_to_cord(p) ((p)[0].path[0].pe_cord)
+
+#define CORD_pos_valid(p) ((p)[0].path_len != CORD_POS_INVALID)
+
+/* Some grubby stuff for performance-critical friends: */
+#define CORD_pos_chars_left(p) ((long)((p)[0].cur_end) - (long)((p)[0].cur_pos))
+ /* Number of characters in cache. <= 0 ==> none */
+
+#define CORD_pos_advance(p,n) ((p)[0].cur_pos += (n) - 1, CORD_next(p))
+ /* Advance position by n characters */
+ /* 0 < n < CORD_pos_chars_left(p) */
+
+#define CORD_pos_cur_char_addr(p) \
+ (p)[0].cur_leaf + ((p)[0].cur_pos - (p)[0].cur_start)
+ /* address of current character in cache. */
+
+#endif
diff --git a/include/private/gc_hdrs.h b/include/private/gc_hdrs.h
new file mode 100644
index 00000000..2f2d1bf9
--- /dev/null
+++ b/include/private/gc_hdrs.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, July 11, 1995 11:54 am PDT */
+# ifndef GC_HEADERS_H
+# define GC_HEADERS_H
+typedef struct hblkhdr hdr;
+
+# if CPP_WORDSZ != 32 && CPP_WORDSZ < 36
+ --> Get a real machine.
+# endif
+
+/*
+ * The 2 level tree data structure that is used to find block headers.
+ * If there are more than 32 bits in a pointer, the top level is a hash
+ * table.
+ */
+
+# if CPP_WORDSZ > 32
+# define HASH_TL
+# endif
+
+/* Define appropriate out-degrees for each of the two tree levels */
+# ifdef SMALL_CONFIG
+# define LOG_BOTTOM_SZ 11
+ /* Keep top index size reasonable with smaller blocks. */
+# else
+# define LOG_BOTTOM_SZ 10
+# endif
+# ifndef HASH_TL
+# define LOG_TOP_SZ (WORDSZ - LOG_BOTTOM_SZ - LOG_HBLKSIZE)
+# else
+# define LOG_TOP_SZ 11
+# endif
+# define TOP_SZ (1 << LOG_TOP_SZ)
+# define BOTTOM_SZ (1 << LOG_BOTTOM_SZ)
+
+typedef struct bi {
+ hdr * index[BOTTOM_SZ];
+ /*
+ * The bottom level index contains one of three kinds of values:
+ * 0 means we're not responsible for this block.
+ * 1 < (long)X <= MAX_JUMP means the block starts at least
+ * X * HBLKSIZE bytes before the current address.
+ * A valid pointer points to a hdr structure. (The above can't be
+ * valid pointers due to the GET_MEM return convention.)
+ */
+ struct bi * asc_link; /* All indices are linked in */
+ /* ascending order. */
+ word key; /* high order address bits. */
+# ifdef HASH_TL
+ struct bi * hash_link; /* Hash chain link. */
+# endif
+} bottom_index;
+
+/* extern bottom_index GC_all_nils; - really part of GC_arrays */
+
+/* extern bottom_index * GC_top_index []; - really part of GC_arrays */
+ /* Each entry points to a bottom_index. */
+ /* On a 32 bit machine, it points to */
+ /* the index for a set of high order */
+ /* bits equal to the index. For longer */
+ /* addresses, we hash the high order */
+ /* bits to compute the index in */
+ /* GC_top_index, and each entry points */
+ /* to a hash chain. */
+ /* The last entry in each chain is */
+ /* GC_all_nils. */
+
+
+# define MAX_JUMP (HBLKSIZE - 1)
+
+# define HDR_FROM_BI(bi, p) \
+ ((bi)->index[((word)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
+# ifndef HASH_TL
+# define BI(p) (GC_top_index \
+ [(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)])
+# define HDR_INNER(p) HDR_FROM_BI(BI(p),p)
+# ifdef SMALL_CONFIG
+# define HDR(p) GC_find_header((ptr_t)(p))
+# else
+# define HDR(p) HDR_INNER(p)
+# endif
+# define GET_BI(p, bottom_indx) (bottom_indx) = BI(p)
+# define GET_HDR(p, hhdr) (hhdr) = HDR(p)
+# define SET_HDR(p, hhdr) HDR_INNER(p) = (hhdr)
+# define GET_HDR_ADDR(p, ha) (ha) = &(HDR_INNER(p))
+# else /* hash */
+/* Hash function for tree top level */
+# define TL_HASH(hi) ((hi) & (TOP_SZ - 1))
+/* Set bottom_indx to point to the bottom index for address p */
+# define GET_BI(p, bottom_indx) \
+ { \
+ register word hi = \
+ (word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
+ register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
+ \
+ while (_bi -> key != hi && _bi != GC_all_nils) \
+ _bi = _bi -> hash_link; \
+ (bottom_indx) = _bi; \
+ }
+# define GET_HDR_ADDR(p, ha) \
+ { \
+ register bottom_index * bi; \
+ \
+ GET_BI(p, bi); \
+ (ha) = &(HDR_FROM_BI(bi, p)); \
+ }
+# define GET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
+ (hhdr) = *_ha; }
+# define SET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
+ *_ha = (hhdr); }
+# define HDR(p) GC_find_header((ptr_t)(p))
+# endif
+
+/* Is the result a forwarding address to someplace closer to the */
+/* beginning of the block or NIL? */
+# define IS_FORWARDING_ADDR_OR_NIL(hhdr) ((unsigned long) (hhdr) <= MAX_JUMP)
+
+/* Get an HBLKSIZE aligned address closer to the beginning of the block */
+/* h. Assumes hhdr == HDR(h) and IS_FORWARDING_ADDR(hhdr). */
+# define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (unsigned long)(hhdr))
+# endif /* GC_HEADERS_H */
diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h
new file mode 100644
index 00000000..357a390d
--- /dev/null
+++ b/include/private/gc_priv.h
@@ -0,0 +1,1342 @@
+/*
+ * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, August 9, 1995 5:49 pm PDT */
+
+
+# ifndef GC_PRIVATE_H
+# define GC_PRIVATE_H
+
+#if defined(mips) && defined(SYSTYPE_BSD) && defined(sony_news)
+ /* sony RISC NEWS, NEWSOS 4 */
+# define BSD_TIME
+ typedef long ptrdiff_t;
+#endif
+
+#if defined(mips) && defined(SYSTYPE_BSD43)
+ /* MIPS RISCOS 4 */
+# define BSD_TIME
+#endif
+
+#ifdef BSD_TIME
+# include <sys/types.h>
+# include <sys/time.h>
+# include <sys/resource.h>
+#endif /* BSD_TIME */
+
+# ifndef GC_H
+# include "gc.h"
+# endif
+
+typedef GC_word word;
+typedef GC_signed_word signed_word;
+
+# ifndef CONFIG_H
+# include "config.h"
+# endif
+
+# ifndef HEADERS_H
+# include "gc_hdrs.h"
+# endif
+
+# ifndef bool
+ typedef int bool;
+# endif
+# define TRUE 1
+# define FALSE 0
+
+typedef char * ptr_t; /* A generic pointer to which we can add */
+ /* byte displacements. */
+ /* Preferably identical to caddr_t, if it */
+ /* exists. */
+
+#if defined(__STDC__)
+# include <stdlib.h>
+# if !(defined( sony_news ) )
+# include <stddef.h>
+# endif
+# define VOLATILE volatile
+#else
+# ifdef MSWIN32
+# include <stdlib.h>
+# endif
+# define VOLATILE
+#endif
+
+#ifdef AMIGA
+# define GC_FAR __far
+#else
+# define GC_FAR
+#endif
+
+/*********************************/
+/* */
+/* Definitions for conservative */
+/* collector */
+/* */
+/*********************************/
+
+/*********************************/
+/* */
+/* Easily changeable parameters */
+/* */
+/*********************************/
+
+#define STUBBORN_ALLOC /* Define stubborn allocation primitives */
+#if defined(SRC_M3) || defined(SMALL_CONFIG)
+# undef STUBBORN_ALLOC
+#endif
+
+
+/* #define ALL_INTERIOR_POINTERS */
+ /* Forces all pointers into the interior of an */
+ /* object to be considered valid. Also causes the */
+ /* sizes of all objects to be inflated by at least */
+ /* one byte. This should suffice to guarantee */
+ /* that in the presence of a compiler that does */
+ /* not perform garbage-collector-unsafe */
+ /* optimizations, all portable, strictly ANSI */
+ /* conforming C programs should be safely usable */
+ /* with malloc replaced by GC_malloc and free */
+ /* calls removed. There are several disadvantages: */
+ /* 1. There are probably no interesting, portable, */
+ /* strictly ANSI conforming C programs. */
+ /* 2. This option makes it hard for the collector */
+ /* to allocate space that is not ``pointed to'' */
+ /* by integers, etc. Under SunOS 4.X with a */
+ /* statically linked libc, we empiricaly */
+ /* observed that it would be difficult to */
+ /* allocate individual objects larger than 100K. */
+ /* Even if only smaller objects are allocated, */
+ /* more swap space is likely to be needed. */
+ /* Fortunately, much of this will never be */
+ /* touched. */
+ /* If you can easily avoid using this option, do. */
+ /* If not, try to keep individual objects small. */
+
+#define PRINTSTATS /* Print garbage collection statistics */
+ /* For less verbose output, undefine in reclaim.c */
+
+#define PRINTTIMES /* Print the amount of time consumed by each garbage */
+ /* collection. */
+
+#define PRINTBLOCKS /* Print object sizes associated with heap blocks, */
+ /* whether the objects are atomic or composite, and */
+ /* whether or not the block was found to be empty */
+ /* duing the reclaim phase. Typically generates */
+ /* about one screenful per garbage collection. */
+#undef PRINTBLOCKS
+
+#define PRINTBLACKLIST /* Print black listed blocks, i.e. values that */
+ /* cause the allocator to avoid allocating certain */
+ /* blocks in order to avoid introducing "false */
+ /* hits". */
+#undef PRINTBLACKLIST
+
+#ifdef SILENT
+# ifdef PRINTSTATS
+# undef PRINTSTATS
+# endif
+# ifdef PRINTTIMES
+# undef PRINTTIMES
+# endif
+# ifdef PRINTNBLOCKS
+# undef PRINTNBLOCKS
+# endif
+#endif
+
+#if defined(PRINTSTATS) && !defined(GATHERSTATS)
+# define GATHERSTATS
+#endif
+
+# if defined(SOLARIS_THREADS) && !defined(SUNOS5)
+--> inconsistent configuration
+# endif
+# if defined(PCR) || defined(SRC_M3) || defined(SOLARIS_THREADS)
+# define THREADS
+# endif
+
+#if defined(SPARC)
+# define ALIGN_DOUBLE /* Align objects of size > 1 word on 2 word */
+ /* boundaries. Wasteful of memory, but */
+ /* apparently required by SPARC architecture. */
+# define ASM_CLEAR_CODE /* Stack clearing is crucial, and we */
+ /* include assembly code to do it well. */
+#endif
+
+#ifdef HP_PA
+# define ALIGN_DOUBLE
+#endif
+
+#define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */
+ /* free lists are actually maintained. This applies */
+ /* only to the top level routines in misc.c, not to */
+ /* user generated code that calls GC_allocobj and */
+ /* GC_allocaobj directly. */
+ /* Slows down average programs slightly. May however */
+ /* substantially reduce fragmentation if allocation */
+ /* request sizes are widely scattered. */
+ /* May save significant amounts of space for obj_map */
+ /* entries. */
+
+/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
+# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
+# define MERGE_SIZES
+# endif
+
+#if defined(ALL_INTERIOR_POINTERS) && !defined(DONT_ADD_BYTE_AT_END)
+# define ADD_BYTE_AT_END
+#endif
+
+
+# ifndef LARGE_CONFIG
+# define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
+# define MAXHINCR 512 /* Maximum heap increment, in blocks */
+# else
+# define MINHINCR 64
+# define MAXHINCR 4096
+# endif
+
+# define TIME_LIMIT 50 /* We try to keep pause times from exceeding */
+ /* this by much. In milliseconds. */
+
+# define BL_LIMIT GC_black_list_spacing
+ /* If we need a block of N bytes, and we have */
+ /* a block of N + BL_LIMIT bytes available, */
+ /* and N > BL_LIMIT, */
+ /* but all possible positions in it are */
+ /* blacklisted, we just use it anyway (and */
+ /* print a warning, if warnings are enabled). */
+ /* This risks subsequently leaking the block */
+ /* due to a false reference. But not using */
+ /* the block risks unreasonable immediate */
+ /* heap growth. */
+
+/*********************************/
+/* */
+/* Stack saving for debugging */
+/* */
+/*********************************/
+
+/*
+ * Number of frames and arguments to save in objects allocated by
+ * debugging allocator.
+ */
+# define NFRAMES 6 /* Number of frames to save. Even for */
+ /* alignment reasons. */
+# define NARGS 2 /* Mumber of arguments to save for each call. */
+
+
+#ifdef SAVE_CALL_CHAIN
+ struct callinfo {
+ word ci_pc;
+ word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
+ };
+
+/* Fill in the pc and argument information for up to NFRAMES of my */
+/* callers. Ignore my frame and my callers frame. */
+void GC_save_callers (/* struct callinfo info[NFRAMES] */);
+
+void GC_print_callers (/* struct callinfo info[NFRAMES] */);
+
+#endif
+
+
+/*********************************/
+/* */
+/* OS interface routines */
+/* */
+/*********************************/
+
+#ifdef BSD_TIME
+# undef CLOCK_TYPE
+# undef GET_TIME
+# undef MS_TIME_DIFF
+# define CLOCK_TYPE struct timeval
+# define GET_TIME(x) { struct rusage rusage; \
+ getrusage (RUSAGE_SELF, &rusage); \
+ x = rusage.ru_utime; }
+# define MS_TIME_DIFF(a,b) ((double) (a.tv_sec - b.tv_sec) * 1000.0 \
+ + (double) (a.tv_usec - b.tv_usec) / 1000.0)
+#else /* !BSD_TIME */
+# include <time.h>
+# if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4)
+ clock_t clock(); /* Not in time.h, where it belongs */
+# endif
+# if defined(FREEBSD) && !defined(CLOCKS_PER_SEC)
+# include <machine/limits.h>
+# define CLOCKS_PER_SEC CLK_TCK
+# endif
+# if !defined(CLOCKS_PER_SEC)
+# define CLOCKS_PER_SEC 1000000
+/*
+ * This is technically a bug in the implementation. ANSI requires that
+ * CLOCKS_PER_SEC be defined. But at least under SunOS4.1.1, it isn't.
+ * Also note that the combination of ANSI C and POSIX is incredibly gross
+ * here. The type clock_t is used by both clock() and times(). But on
+ * some machines these use different notions of a clock tick, CLOCKS_PER_SEC
+ * seems to apply only to clock. Hence we use it here. On many machines,
+ * including SunOS, clock actually uses units of microseconds (which are
+ * not really clock ticks).
+ */
+# endif
+# define CLOCK_TYPE clock_t
+# define GET_TIME(x) x = clock()
+# define MS_TIME_DIFF(a,b) ((unsigned long) \
+ (1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC))
+#endif /* !BSD_TIME */
+
+/* We use bzero and bcopy internally. They may not be available. */
+# if defined(SPARC) && defined(SUNOS4)
+# define BCOPY_EXISTS
+# endif
+# if defined(M68K) && defined(AMIGA)
+# define BCOPY_EXISTS
+# endif
+# if defined(M68K) && defined(NEXT)
+# define BCOPY_EXISTS
+# endif
+# if defined(VAX)
+# define BCOPY_EXISTS
+# endif
+# if defined(AMIGA)
+# include <string.h>
+# define BCOPY_EXISTS
+# endif
+
+# ifndef BCOPY_EXISTS
+# include <string.h>
+# define BCOPY(x,y,n) memcpy(y, x, (size_t)(n))
+# define BZERO(x,n) memset(x, 0, (size_t)(n))
+# else
+# define BCOPY(x,y,n) bcopy((char *)(x),(char *)(y),(int)(n))
+# define BZERO(x,n) bzero((char *)(x),(int)(n))
+# endif
+
+/* HBLKSIZE aligned allocation. 0 is taken to mean failure */
+/* space is assumed to be cleared. */
+# ifdef PCR
+ char * real_malloc();
+# define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + HBLKSIZE) \
+ + HBLKSIZE-1)
+# else
+# ifdef OS2
+ void * os2_alloc(size_t bytes);
+# define GET_MEM(bytes) HBLKPTR((ptr_t)os2_alloc((size_t)bytes + HBLKSIZE) \
+ + HBLKSIZE-1)
+# else
+# if defined(AMIGA) || defined(NEXT)
+# define GET_MEM(bytes) HBLKPTR((size_t) \
+ calloc(1, (size_t)bytes + HBLKSIZE) \
+ + HBLKSIZE-1)
+# else
+# ifdef MSWIN32
+ extern ptr_t GC_win32_get_mem();
+# define GET_MEM(bytes) (struct hblk *)GC_win32_get_mem(bytes)
+# else
+# ifdef MACOS
+# if defined(USE_TEMPORARY_MEMORY)
+ extern Ptr GC_MacTemporaryNewPtr(size_t size,
+ Boolean clearMemory);
+# define GET_MEM(bytes) HBLKPTR( \
+ GC_MacTemporaryNewPtr(bytes + HBLKSIZE, true) + HBLKSIZE-1)
+# else
+# define GET_MEM(bytes) HBLKPTR( \
+ NewPtrClear(bytes + HBLKSIZE) + HBLKSIZE-1)
+# endif
+# else
+ extern ptr_t GC_unix_get_mem();
+# define GET_MEM(bytes) (struct hblk *)GC_unix_get_mem(bytes)
+# endif
+# endif
+# endif
+# endif
+# endif
+
+/*
+ * Mutual exclusion between allocator/collector routines.
+ * Needed if there is more than one allocator thread.
+ * FASTLOCK() is assumed to try to acquire the lock in a cheap and
+ * dirty way that is acceptable for a few instructions, e.g. by
+ * inhibiting preemption. This is assumed to have succeeded only
+ * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
+ * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
+ * If signals cannot be tolerated with the FASTLOCK held, then
+ * FASTLOCK should disable signals. The code executed under
+ * FASTLOCK is otherwise immune to interruption, provided it is
+ * not restarted.
+ * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
+ * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
+ * (There is currently no equivalent for FASTLOCK.)
+ */
+# ifdef THREADS
+# ifdef PCR_OBSOLETE /* Faster, but broken with multiple lwp's */
+# include "th/PCR_Th.h"
+# include "th/PCR_ThCrSec.h"
+ extern struct PCR_Th_MLRep GC_allocate_ml;
+# define DCL_LOCK_STATE PCR_sigset_t GC_old_sig_mask
+# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
+# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
+# define FASTLOCK() PCR_ThCrSec_EnterSys()
+ /* Here we cheat (a lot): */
+# define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
+ /* TRUE if nobody currently holds the lock */
+# define FASTUNLOCK() PCR_ThCrSec_ExitSys()
+# endif
+# ifdef PCR
+# include <base/PCR_Base.h>
+# include <th/PCR_Th.h>
+ extern PCR_Th_ML GC_allocate_ml;
+# define DCL_LOCK_STATE \
+ PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
+# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
+# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
+# define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
+# define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)
+# define FASTUNLOCK() {\
+ if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }
+# endif
+# ifdef SRC_M3
+ extern word RT0u__inCritical;
+# define LOCK() RT0u__inCritical++
+# define UNLOCK() RT0u__inCritical--
+# endif
+# ifdef SOLARIS_THREADS
+# include <thread.h>
+# include <signal.h>
+ extern mutex_t GC_allocate_ml;
+# define LOCK() mutex_lock(&GC_allocate_ml);
+# define UNLOCK() mutex_unlock(&GC_allocate_ml);
+# endif
+# else
+# define LOCK()
+# define UNLOCK()
+# endif
+
+# ifndef DCL_LOCK_STATE
+# define DCL_LOCK_STATE
+# endif
+# ifndef FASTLOCK
+# define FASTLOCK() LOCK()
+# define FASTLOCK_SUCCEEDED() TRUE
+# define FASTUNLOCK() UNLOCK()
+# endif
+
+/* Delay any interrupts or signals that may abort this thread. Data */
+/* structures are in a consistent state outside this pair of calls. */
+/* ANSI C allows both to be empty (though the standard isn't very */
+/* clear on that point). Standard malloc implementations are usually */
+/* neither interruptable nor thread-safe, and thus correspond to */
+/* empty definitions. */
+# ifdef PCR
+# define DISABLE_SIGNALS() \
+ PCR_Th_SetSigMask(PCR_allSigsBlocked,&GC_old_sig_mask)
+# define ENABLE_SIGNALS() \
+ PCR_Th_SetSigMask(&GC_old_sig_mask, NIL)
+# else
+# if defined(SRC_M3) || defined(AMIGA) || defined(SOLARIS_THREADS) \
+ || defined(MSWIN32) || defined(MACOS) || defined(DJGPP) \
+ || defined(NO_SIGNALS)
+ /* Also useful for debugging. */
+ /* Should probably use thr_sigsetmask for SOLARIS_THREADS. */
+# define DISABLE_SIGNALS()
+# define ENABLE_SIGNALS()
+# else
+# define DISABLE_SIGNALS() GC_disable_signals()
+ void GC_disable_signals();
+# define ENABLE_SIGNALS() GC_enable_signals()
+ void GC_enable_signals();
+# endif
+# endif
+
+/*
+ * Stop and restart mutator threads.
+ */
+# ifdef PCR
+# include "th/PCR_ThCtl.h"
+# define STOP_WORLD() \
+ PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \
+ PCR_allSigsBlocked, \
+ PCR_waitForever)
+# define START_WORLD() \
+ PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \
+ PCR_allSigsBlocked, \
+ PCR_waitForever);
+# else
+# ifdef SOLARIS_THREADS
+# define STOP_WORLD() GC_stop_world()
+# define START_WORLD() GC_start_world()
+# else
+# define STOP_WORLD()
+# define START_WORLD()
+# endif
+# endif
+
+/* Abandon ship */
+# ifdef PCR
+# define ABORT(s) PCR_Base_Panic(s)
+# else
+# ifdef SMALL_CONFIG
+# define ABORT(msg) abort();
+# else
+ void GC_abort();
+# define ABORT(msg) GC_abort(msg);
+# endif
+# endif
+
+/* Exit abnormally, but without making a mess (e.g. out of memory) */
+# ifdef PCR
+# define EXIT() PCR_Base_Exit(1,PCR_waitForever)
+# else
+# define EXIT() (void)exit(1)
+# endif
+
+/* Print warning message, e.g. almost out of memory. */
+# define WARN(msg,arg) (*GC_current_warn_proc)(msg, (GC_word)(arg))
+extern GC_warn_proc GC_current_warn_proc;
+
+/*********************************/
+/* */
+/* Word-size-dependent defines */
+/* */
+/*********************************/
+
+#if CPP_WORDSZ == 32
+# define WORDS_TO_BYTES(x) ((x)<<2)
+# define BYTES_TO_WORDS(x) ((x)>>2)
+# define LOGWL ((word)5) /* log[2] of CPP_WORDSZ */
+# define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */
+# if ALIGNMENT != 4
+# define UNALIGNED
+# endif
+#endif
+
+#if CPP_WORDSZ == 64
+# define WORDS_TO_BYTES(x) ((x)<<3)
+# define BYTES_TO_WORDS(x) ((x)>>3)
+# define LOGWL ((word)6) /* log[2] of CPP_WORDSZ */
+# define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */
+# if ALIGNMENT != 8
+# define UNALIGNED
+# endif
+#endif
+
+#define WORDSZ ((word)CPP_WORDSZ)
+#define SIGNB ((word)1 << (WORDSZ-1))
+#define BYTES_PER_WORD ((word)(sizeof (word)))
+#define ONES ((word)(-1))
+#define divWORDSZ(n) ((n) >> LOGWL) /* divide n by size of word */
+
+/*********************/
+/* */
+/* Size Parameters */
+/* */
+/*********************/
+
+/* heap block size, bytes. Should be power of 2 */
+
+#ifdef SMALL_CONFIG
+# define CPP_LOG_HBLKSIZE 10
+#else
+# if CPP_WORDSZ == 32
+# define CPP_LOG_HBLKSIZE 12
+# else
+# define CPP_LOG_HBLKSIZE 13
+# endif
+#endif
+#define LOG_HBLKSIZE ((word)CPP_LOG_HBLKSIZE)
+#define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
+#define HBLKSIZE ((word)CPP_HBLKSIZE)
+
+
+/* max size objects supported by freelist (larger objects may be */
+/* allocated, but less efficiently) */
+
+#define CPP_MAXOBJSZ BYTES_TO_WORDS(CPP_HBLKSIZE/2)
+#define MAXOBJSZ ((word)CPP_MAXOBJSZ)
+
+# define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
+
+# define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q)
+ /* Equivalent to subtracting 2 hblk pointers. */
+ /* We do it this way because a compiler should */
+ /* find it hard to use an integer division */
+ /* instead of a shift. The bundled SunOS 4.1 */
+ /* o.w. sometimes pessimizes the subtraction to */
+ /* involve a call to .div. */
+
+# define modHBLKSZ(n) ((n) & (HBLKSIZE-1))
+
+# define HBLKPTR(objptr) ((struct hblk *)(((word) (objptr)) & ~(HBLKSIZE-1)))
+
+# define HBLKDISPL(objptr) (((word) (objptr)) & (HBLKSIZE-1))
+
+/* Round up byte allocation requests to integral number of words, etc. */
+# ifdef ADD_BYTE_AT_END
+# define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1))
+# ifdef ALIGN_DOUBLE
+# define ALIGNED_WORDS(n) (BYTES_TO_WORDS((n) + WORDS_TO_BYTES(2)) & ~1)
+# else
+# define ALIGNED_WORDS(n) ROUNDED_UP_WORDS(n)
+# endif
+# define SMALL_OBJ(bytes) ((bytes) < WORDS_TO_BYTES(MAXOBJSZ))
+# define ADD_SLOP(bytes) ((bytes)+1)
+# else
+# define ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1))
+# ifdef ALIGN_DOUBLE
+# define ALIGNED_WORDS(n) \
+ (BYTES_TO_WORDS((n) + WORDS_TO_BYTES(2) - 1) & ~1)
+# else
+# define ALIGNED_WORDS(n) ROUNDED_UP_WORDS(n)
+# endif
+# define SMALL_OBJ(bytes) ((bytes) <= WORDS_TO_BYTES(MAXOBJSZ))
+# define ADD_SLOP(bytes) (bytes)
+# endif
+
+
+/*
+ * Hash table representation of sets of pages. This assumes it is
+ * OK to add spurious entries to sets.
+ * Used by black-listing code, and perhaps by dirty bit maintenance code.
+ */
+
+# ifdef LARGE_CONFIG
+# define LOG_PHT_ENTRIES 17
+# else
+# define LOG_PHT_ENTRIES 14 /* Collisions are likely if heap grows */
+ /* to more than 16K hblks = 64MB. */
+ /* Each hash table occupies 2K bytes. */
+# endif
+# define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES)
+# define PHT_SIZE (PHT_ENTRIES >> LOGWL)
+typedef word page_hash_table[PHT_SIZE];
+
+# define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1))
+
+# define get_pht_entry_from_index(bl, index) \
+ (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
+# define set_pht_entry_from_index(bl, index) \
+ (bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)
+# define clear_pht_entry_from_index(bl, index) \
+ (bl)[divWORDSZ(index)] &= ~((word)1 << modWORDSZ(index))
+
+
+
+/********************************************/
+/* */
+/* H e a p B l o c k s */
+/* */
+/********************************************/
+
+/* heap block header */
+#define HBLKMASK (HBLKSIZE-1)
+
+#define BITS_PER_HBLK (HBLKSIZE * 8)
+
+#define MARK_BITS_PER_HBLK (BITS_PER_HBLK/CPP_WORDSZ)
+ /* upper bound */
+ /* We allocate 1 bit/word. Only the first word */
+ /* in each object is actually marked. */
+
+# ifdef ALIGN_DOUBLE
+# define MARK_BITS_SZ (((MARK_BITS_PER_HBLK + 2*CPP_WORDSZ - 1) \
+ / (2*CPP_WORDSZ))*2)
+# else
+# define MARK_BITS_SZ ((MARK_BITS_PER_HBLK + CPP_WORDSZ - 1)/CPP_WORDSZ)
+# endif
+ /* Upper bound on number of mark words per heap block */
+
+struct hblkhdr {
+ word hb_sz; /* If in use, size in words, of objects in the block. */
+ /* if free, the size in bytes of the whole block */
+ struct hblk * hb_next; /* Link field for hblk free list */
+ /* and for lists of chunks waiting to be */
+ /* reclaimed. */
+ word hb_descr; /* object descriptor for marking. See */
+ /* mark.h. */
+ char* hb_map; /* A pointer to a pointer validity map of the block. */
+ /* See GC_obj_map. */
+ /* Valid for all blocks with headers. */
+ /* Free blocks point to GC_invalid_map. */
+ unsigned char hb_obj_kind;
+ /* Kind of objects in the block. Each kind */
+ /* identifies a mark procedure and a set of */
+ /* list headers. Sometimes called regions. */
+ unsigned char hb_flags;
+# define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
+ /* point to the first page of */
+ /* this object. */
+ unsigned short hb_last_reclaimed;
+ /* Value of GC_gc_no when block was */
+ /* last allocated or swept. May wrap. */
+ word hb_marks[MARK_BITS_SZ];
+ /* Bit i in the array refers to the */
+ /* object starting at the ith word (header */
+ /* INCLUDED) in the heap block. */
+ /* The lsb of word 0 is numbered 0. */
+};
+
+/* heap block body */
+
+# define DISCARD_WORDS 0
+ /* Number of words to be dropped at the beginning of each block */
+ /* Must be a multiple of WORDSZ. May reasonably be nonzero */
+ /* on machines that don't guarantee longword alignment of */
+ /* pointers, so that the number of false hits is minimized. */
+ /* 0 and WORDSZ are probably the only reasonable values. */
+
+# define BODY_SZ ((HBLKSIZE-WORDS_TO_BYTES(DISCARD_WORDS))/sizeof(word))
+
+struct hblk {
+# if (DISCARD_WORDS != 0)
+ word garbage[DISCARD_WORDS];
+# endif
+ word hb_body[BODY_SZ];
+};
+
+# define HDR_WORDS ((word)DISCARD_WORDS)
+# define HDR_BYTES ((word)WORDS_TO_BYTES(DISCARD_WORDS))
+
+# define OBJ_SZ_TO_BLOCKS(sz) \
+ divHBLKSZ(HDR_BYTES + WORDS_TO_BYTES(sz) + HBLKSIZE-1)
+ /* Size of block (in units of HBLKSIZE) needed to hold objects of */
+ /* given sz (in words). */
+
+/* Object free list link */
+# define obj_link(p) (*(ptr_t *)(p))
+
+/* lists of all heap blocks and free lists */
+/* These are grouped together in a struct */
+/* so that they can be easily skipped by the */
+/* GC_mark routine. */
+/* The ordering is weird to make GC_malloc */
+/* faster by keeping the important fields */
+/* sufficiently close together that a */
+/* single load of a base register will do. */
+/* Scalars that could easily appear to */
+/* be pointers are also put here. */
+/* The main fields should precede any */
+/* conditionally included fields, so that */
+/* gc_inl.h will work even if a different set */
+/* of macros is defined when the client is */
+/* compiled. */
+
+struct _GC_arrays {
+ word _heapsize;
+ word _max_heapsize;
+ ptr_t _last_heap_addr;
+ ptr_t _prev_heap_addr;
+ word _words_allocd_before_gc;
+ /* Number of words allocated before this */
+ /* collection cycle. */
+ word _words_allocd;
+ /* Number of words allocated during this collection cycle */
+ word _words_wasted;
+ /* Number of words wasted due to internal fragmentation */
+ /* in large objects allocated since last gc. Approximate.*/
+ word _words_finalized;
+ /* Approximate number of words in objects (and headers) */
+ /* That became ready for finalization in the last */
+ /* collection. */
+ word _non_gc_bytes_at_gc;
+ /* Number of explicitly managed bytes of storage */
+ /* at last collection. */
+ word _mem_freed;
+ /* Number of explicitly deallocated words of memory */
+ /* since last collection. */
+
+ ptr_t _objfreelist[MAXOBJSZ+1];
+ /* free list for objects */
+ ptr_t _aobjfreelist[MAXOBJSZ+1];
+ /* free list for atomic objs */
+
+ ptr_t _uobjfreelist[MAXOBJSZ+1];
+ /* uncollectable but traced objs */
+
+# ifdef GATHERSTATS
+ word _composite_in_use;
+ /* Number of words in accessible composite */
+ /* objects. */
+ word _atomic_in_use;
+ /* Number of words in accessible atomic */
+ /* objects. */
+# endif
+# ifdef MERGE_SIZES
+ unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
+ /* Number of words to allocate for a given allocation request in */
+ /* bytes. */
+# endif
+
+# ifdef STUBBORN_ALLOC
+ ptr_t _sobjfreelist[MAXOBJSZ+1];
+# endif
+ /* free list for immutable objects */
+ ptr_t _obj_map[MAXOBJSZ+1];
+ /* If not NIL, then a pointer to a map of valid */
+ /* object addresses. _obj_map[sz][i] is j if the */
+ /* address block_start+i is a valid pointer */
+ /* to an object at */
+ /* block_start+i&~3 - WORDS_TO_BYTES(j). */
+ /* (If ALL_INTERIOR_POINTERS is defined, then */
+ /* instead ((short *)(hbh_map[sz])[i] is j if */
+ /* block_start+WORDS_TO_BYTES(i) is in the */
+ /* interior of an object starting at */
+ /* block_start+WORDS_TO_BYTES(i-j)). */
+ /* It is OBJ_INVALID if */
+ /* block_start+WORDS_TO_BYTES(i) is not */
+ /* valid as a pointer to an object. */
+ /* We assume all values of j <= OBJ_INVALID. */
+ /* The zeroth entry corresponds to large objects.*/
+# ifdef ALL_INTERIOR_POINTERS
+# define map_entry_type short
+# define OBJ_INVALID 0x7fff
+# define MAP_ENTRY(map, bytes) \
+ (((map_entry_type *)(map))[BYTES_TO_WORDS(bytes)])
+# define MAP_ENTRIES BYTES_TO_WORDS(HBLKSIZE)
+# define MAP_SIZE (MAP_ENTRIES * sizeof(map_entry_type))
+# define OFFSET_VALID(displ) TRUE
+# define CPP_MAX_OFFSET (HBLKSIZE - HDR_BYTES - 1)
+# define MAX_OFFSET ((word)CPP_MAX_OFFSET)
+# else
+# define map_entry_type char
+# define OBJ_INVALID 0x7f
+# define MAP_ENTRY(map, bytes) \
+ (map)[bytes]
+# define MAP_ENTRIES HBLKSIZE
+# define MAP_SIZE MAP_ENTRIES
+# define CPP_MAX_OFFSET (WORDS_TO_BYTES(OBJ_INVALID) - 1)
+# define MAX_OFFSET ((word)CPP_MAX_OFFSET)
+# define VALID_OFFSET_SZ \
+ (CPP_MAX_OFFSET > WORDS_TO_BYTES(CPP_MAXOBJSZ)? \
+ CPP_MAX_OFFSET+1 \
+ : WORDS_TO_BYTES(CPP_MAXOBJSZ)+1)
+ char _valid_offsets[VALID_OFFSET_SZ];
+ /* GC_valid_offsets[i] == TRUE ==> i */
+ /* is registered as a displacement. */
+# define OFFSET_VALID(displ) GC_valid_offsets[displ]
+ char _modws_valid_offsets[sizeof(word)];
+ /* GC_valid_offsets[i] ==> */
+ /* GC_modws_valid_offsets[i%sizeof(word)] */
+# endif
+# ifdef STUBBORN_ALLOC
+ page_hash_table _changed_pages;
+ /* Stubborn object pages that were changes since last call to */
+ /* GC_read_changed. */
+ page_hash_table _prev_changed_pages;
+ /* Stubborn object pages that were changes before last call to */
+ /* GC_read_changed. */
+# endif
+# if defined(PROC_VDB) || defined(MPROTECT_VDB)
+ page_hash_table _grungy_pages; /* Pages that were dirty at last */
+ /* GC_read_dirty. */
+# endif
+# ifdef LARGE_CONFIG
+# if CPP_WORDSZ > 32
+# define MAX_HEAP_SECTS 4096 /* overflows at roughly 64 GB */
+# else
+# define MAX_HEAP_SECTS 768 /* Separately added heap sections. */
+# endif
+# else
+# define MAX_HEAP_SECTS 256
+# endif
+ struct HeapSect {
+ ptr_t hs_start; word hs_bytes;
+ } _heap_sects[MAX_HEAP_SECTS];
+# ifdef MSWIN32
+ ptr_t _heap_bases[MAX_HEAP_SECTS];
+ /* Start address of memory regions obtained from kernel. */
+# endif
+ /* Block header index; see gc_headers.h */
+ bottom_index * _all_nils;
+ bottom_index * _top_index [TOP_SZ];
+#ifdef SAVE_CALL_CHAIN
+ struct callinfo _last_stack[NFRAMES]; /* Stack at last garbage collection.*/
+ /* Useful for debugging mysterious */
+ /* object disappearances. */
+ /* In the multithreaded case, we */
+ /* currently only save the calling */
+ /* stack. */
+#endif
+};
+
+extern GC_FAR struct _GC_arrays GC_arrays;
+
+# define GC_objfreelist GC_arrays._objfreelist
+# define GC_aobjfreelist GC_arrays._aobjfreelist
+# define GC_uobjfreelist GC_arrays._uobjfreelist
+# define GC_sobjfreelist GC_arrays._sobjfreelist
+# define GC_valid_offsets GC_arrays._valid_offsets
+# define GC_modws_valid_offsets GC_arrays._modws_valid_offsets
+# ifdef STUBBORN_ALLOC
+# define GC_changed_pages GC_arrays._changed_pages
+# define GC_prev_changed_pages GC_arrays._prev_changed_pages
+# endif
+# define GC_obj_map GC_arrays._obj_map
+# define GC_last_heap_addr GC_arrays._last_heap_addr
+# define GC_prev_heap_addr GC_arrays._prev_heap_addr
+# define GC_words_allocd GC_arrays._words_allocd
+# define GC_words_wasted GC_arrays._words_wasted
+# define GC_words_finalized GC_arrays._words_finalized
+# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
+# define GC_mem_freed GC_arrays._mem_freed
+# define GC_heapsize GC_arrays._heapsize
+# define GC_max_heapsize GC_arrays._max_heapsize
+# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
+# define GC_heap_sects GC_arrays._heap_sects
+# define GC_last_stack GC_arrays._last_stack
+# ifdef MSWIN32
+# define GC_heap_bases GC_arrays._heap_bases
+# endif
+# define GC_all_nils GC_arrays._all_nils
+# define GC_top_index GC_arrays._top_index
+# if defined(PROC_VDB) || defined(MPROTECT_VDB)
+# define GC_grungy_pages GC_arrays._grungy_pages
+# endif
+# ifdef GATHERSTATS
+# define GC_composite_in_use GC_arrays._composite_in_use
+# define GC_atomic_in_use GC_arrays._atomic_in_use
+# endif
+# ifdef MERGE_SIZES
+# define GC_size_map GC_arrays._size_map
+# endif
+
+# define beginGC_arrays ((ptr_t)(&GC_arrays))
+# define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays))
+
+
+# define MAXOBJKINDS 16
+
+/* Object kinds: */
+extern struct obj_kind {
+ ptr_t *ok_freelist; /* Array of free listheaders for this kind of object */
+ /* Point either to GC_arrays or to storage allocated */
+ /* with GC_scratch_alloc. */
+ struct hblk **ok_reclaim_list;
+ /* List headers for lists of blocks waiting to be */
+ /* swept. */
+ word ok_descriptor; /* Descriptor template for objects in this */
+ /* block. */
+ bool ok_relocate_descr;
+ /* Add object size in bytes to descriptor */
+ /* template to obtain descriptor. Otherwise */
+ /* template is used as is. */
+ bool ok_init; /* Clear objects before putting them on the free list. */
+} GC_obj_kinds[MAXOBJKINDS];
+/* Predefined kinds: */
+# define PTRFREE 0
+# define NORMAL 1
+# define UNCOLLECTABLE 2
+# define STUBBORN 3
+
+extern int GC_n_kinds;
+
+extern word GC_n_heap_sects; /* Number of separately added heap */
+ /* sections. */
+
+# ifdef MSWIN32
+extern word GC_n_heap_bases; /* See GC_heap_bases. */
+# endif
+
+extern word GC_total_black_listed;
+ /* Number of bytes on stack blacklist. */
+
+extern word GC_black_list_spacing;
+ /* Average number of bytes between blacklisted */
+ /* blocks. Approximate. */
+
+extern char * GC_invalid_map;
+ /* Pointer to the nowhere valid hblk map */
+ /* Blocks pointing to this map are free. */
+
+extern struct hblk * GC_hblkfreelist;
+ /* List of completely empty heap blocks */
+ /* Linked through hb_next field of */
+ /* header structure associated with */
+ /* block. */
+
+extern bool GC_is_initialized; /* GC_init() has been run. */
+
+extern bool GC_objects_are_marked; /* There are marked objects in */
+ /* the heap. */
+
+extern int GC_incremental; /* Using incremental/generational collection. */
+
+extern bool GC_dirty_maintained;/* Dirty bits are being maintained, */
+ /* either for incremental collection, */
+ /* or to limit the root set. */
+
+# ifndef PCR
+ extern ptr_t GC_stackbottom; /* Cool end of user stack */
+# endif
+
+extern word GC_root_size; /* Total size of registered root sections */
+
+extern bool GC_debugging_started; /* GC_debug_malloc has been called. */
+
+extern ptr_t GC_least_plausible_heap_addr;
+extern ptr_t GC_greatest_plausible_heap_addr;
+ /* Bounds on the heap. Guaranteed valid */
+ /* Likely to include future heap expansion. */
+
+/* Operations */
+# ifndef abs
+# define abs(x) ((x) < 0? (-(x)) : (x))
+# endif
+
+
+/* Marks are in a reserved area in */
+/* each heap block. Each word has one mark bit associated */
+/* with it. Only those corresponding to the beginning of an */
+/* object are used. */
+
+
+/* Mark bit perations */
+
+/*
+ * Retrieve, set, clear the mark bit corresponding
+ * to the nth word in a given heap block.
+ *
+ * (Recall that bit n corresponds to object beginning at word n
+ * relative to the beginning of the block, including unused words)
+ */
+
+# define mark_bit_from_hdr(hhdr,n) (((hhdr)->hb_marks[divWORDSZ(n)] \
+ >> (modWORDSZ(n))) & (word)1)
+# define set_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
+ |= (word)1 << modWORDSZ(n)
+
+# define clear_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
+ &= ~((word)1 << modWORDSZ(n))
+
+/* Important internal collector routines */
+
+void GC_apply_to_all_blocks(/*fn, client_data*/);
+ /* Invoke fn(hbp, client_data) for each */
+ /* allocated heap block. */
+struct hblk * GC_next_block(/* struct hblk * h */);
+void GC_mark_init();
+void GC_clear_marks(); /* Clear mark bits for all heap objects. */
+void GC_invalidate_mark_state(); /* Tell the marker that marked */
+ /* objects may point to unmarked */
+ /* ones, and roots may point to */
+ /* unmarked objects. */
+ /* Reset mark stack. */
+void GC_mark_from_mark_stack(); /* Mark from everything on the mark stack. */
+ /* Return after about one pages worth of */
+ /* work. */
+bool GC_mark_stack_empty();
+bool GC_mark_some(); /* Perform about one pages worth of marking */
+ /* work of whatever kind is needed. Returns */
+ /* quickly if no collection is in progress. */
+ /* Return TRUE if mark phase finished. */
+void GC_initiate_full(); /* initiate full collection. */
+void GC_initiate_partial(); /* initiate partial collection. */
+void GC_push_all(/*b,t*/); /* Push everything in a range */
+ /* onto mark stack. */
+void GC_push_dirty(/*b,t*/); /* Push all possibly changed */
+ /* subintervals of [b,t) onto */
+ /* mark stack. */
+#ifndef SMALL_CONFIG
+ void GC_push_conditional(/* ptr_t b, ptr_t t, bool all*/);
+#else
+# define GC_push_conditional(b, t, all) GC_push_all(b, t)
+#endif
+ /* Do either of the above, depending */
+ /* on the third arg. */
+void GC_push_all_stack(/*b,t*/); /* As above, but consider */
+ /* interior pointers as valid */
+void GC_push_roots(/* bool all */); /* Push all or dirty roots. */
+extern void (*GC_push_other_roots)();
+ /* Push system or application specific roots */
+ /* onto the mark stack. In some environments */
+ /* (e.g. threads environments) this is */
+ /* predfined to be non-zero. A client supplied */
+ /* replacement should also call the original */
+ /* function. */
+void GC_push_regs(); /* Push register contents onto mark stack. */
+void GC_remark(); /* Mark from all marked objects. Used */
+ /* only if we had to drop something. */
+void GC_push_one(/*p*/); /* If p points to an object, mark it */
+ /* and push contents on the mark stack */
+void GC_push_one_checked(/*p*/); /* Ditto, omits plausibility test */
+void GC_push_marked(/* struct hblk h, hdr * hhdr */);
+ /* Push contents of all marked objects in h onto */
+ /* mark stack. */
+#ifdef SMALL_CONFIG
+# define GC_push_next_marked_dirty(h) GC_push_next_marked(h)
+#else
+ struct hblk * GC_push_next_marked_dirty(/* h */);
+ /* Invoke GC_push_marked on next dirty block above h. */
+ /* Return a pointer just past the end of this block. */
+#endif /* !SMALL_CONFIG */
+struct hblk * GC_push_next_marked(/* h */);
+ /* Ditto, but also mark from clean pages. */
+struct hblk * GC_push_next_marked_uncollectable(/* h */);
+ /* Ditto, but mark only from uncollectable pages. */
+bool GC_stopped_mark(); /* Stop world and mark from all roots */
+ /* and rescuers. */
+void GC_clear_hdr_marks(/* hhdr */); /* Clear the mark bits in a header */
+void GC_add_roots_inner();
+bool GC_is_static_root(/* ptr_t p */);
+ /* Is the address p in one of the registered static */
+ /* root sections? */
+void GC_register_dynamic_libraries();
+ /* Add dynamic library data sections to the root set. */
+
+/* Machine dependent startup routines */
+ptr_t GC_get_stack_base();
+void GC_register_data_segments();
+
+/* Black listing: */
+void GC_bl_init();
+# ifndef ALL_INTERIOR_POINTERS
+ void GC_add_to_black_list_normal(/* bits */);
+ /* Register bits as a possible future false */
+ /* reference from the heap or static data */
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_normal(bits)
+# else
+# define GC_ADD_TO_BLACK_LIST_NORMAL(bits) GC_add_to_black_list_stack(bits)
+# endif
+
+void GC_add_to_black_list_stack(/* bits */);
+struct hblk * GC_is_black_listed(/* h, len */);
+ /* If there are likely to be false references */
+ /* to a block starting at h of the indicated */
+ /* length, then return the next plausible */
+ /* starting location for h that might avoid */
+ /* these false references. */
+void GC_promote_black_lists();
+ /* Declare an end to a black listing phase. */
+void GC_unpromote_black_lists();
+ /* Approximately undo the effect of the above. */
+ /* This actually loses some information, but */
+ /* only in a reasonably safe way. */
+word GC_number_stack_black_listed(/*struct hblk *start, struct hblk *endp1 */);
+ /* Return the number of (stack) blacklisted */
+ /* blocks in the range for statistical */
+ /* purposes. */
+
+ptr_t GC_scratch_alloc(/*bytes*/);
+ /* GC internal memory allocation for */
+ /* small objects. Deallocation is not */
+ /* possible. */
+
+/* Heap block layout maps: */
+void GC_invalidate_map(/* hdr */);
+ /* Remove the object map associated */
+ /* with the block. This identifies */
+ /* the block as invalid to the mark */
+ /* routines. */
+bool GC_add_map_entry(/*sz*/);
+ /* Add a heap block map for objects of */
+ /* size sz to obj_map. */
+ /* Return FALSE on failure. */
+void GC_register_displacement_inner(/*offset*/);
+ /* Version of GC_register_displacement */
+ /* that assumes lock is already held */
+ /* and signals are already disabled. */
+
+/* hblk allocation: */
+void GC_new_hblk(/*size_in_words, kind*/);
+ /* Allocate a new heap block, and build */
+ /* a free list in it. */
+struct hblk * GC_allochblk(/*size_in_words, kind*/);
+ /* Allocate a heap block, clear it if */
+ /* for composite objects, inform */
+ /* the marker that block is valid */
+ /* for objects of indicated size. */
+ /* sz < 0 ==> atomic. */
+void GC_freehblk(); /* Deallocate a heap block and mark it */
+ /* as invalid. */
+
+/* Misc GC: */
+void GC_init_inner();
+bool GC_expand_hp_inner();
+void GC_start_reclaim(/*abort_if_found*/);
+ /* Restore unmarked objects to free */
+ /* lists, or (if abort_if_found is */
+ /* TRUE) report them. */
+ /* Sweeping of small object pages is */
+ /* largely deferred. */
+void GC_continue_reclaim(/*size, kind*/);
+ /* Sweep pages of the given size and */
+ /* kind, as long as possible, and */
+ /* as long as the corr. free list is */
+ /* empty. */
+void GC_reclaim_or_delete_all();
+ /* Arrange for all reclaim lists to be */
+ /* empty. Judiciously choose between */
+ /* sweeping and discarding each page. */
+bool GC_reclaim_all(/* GC_stop_func f*/);
+ /* Reclaim all blocks. Abort (in a */
+ /* consistent state) if f returns TRUE. */
+bool GC_block_empty(/* hhdr */); /* Block completely unmarked? */
+bool GC_never_stop_func(); /* Returns FALSE. */
+bool GC_try_to_collect_inner(/* GC_stop_func f */);
+ /* Collect; caller must have acquired */
+ /* lock and disabled signals. */
+ /* Collection is aborted if f returns */
+ /* TRUE. Returns TRUE if it completes */
+ /* successfully. */
+# define GC_gcollect_inner() \
+ (void) GC_try_to_collect_inner(GC_never_stop_func)
+void GC_finish_collection(); /* Finish collection. Mark bits are */
+ /* consistent and lock is still held. */
+bool GC_collect_or_expand(/* needed_blocks */);
+ /* Collect or expand heap in an attempt */
+ /* make the indicated number of free */
+ /* blocks available. Should be called */
+ /* until the blocks are available or */
+ /* until it fails by returning FALSE. */
+void GC_init(); /* Initialize collector. */
+void GC_collect_a_little_inner(/* int n */);
+ /* Do n units worth of garbage */
+ /* collection work, if appropriate. */
+ /* A unit is an amount appropriate for */
+ /* HBLKSIZE bytes of allocation. */
+ptr_t GC_generic_malloc(/* bytes, kind */);
+ /* Allocate an object of the given */
+ /* kind. By default, there are only */
+ /* two kinds: composite, and atomic. */
+ /* We claim it's possible for clever */
+ /* client code that understands GC */
+ /* internals to add more, e.g. to */
+ /* communicate object layout info */
+ /* to the collector. */
+ptr_t GC_generic_malloc_inner(/* bytes, kind */);
+ /* Ditto, but I already hold lock, etc. */
+ptr_t GC_generic_malloc_words_small(/*words, kind*/);
+ /* As above, but size in units of words */
+ /* Bypasses MERGE_SIZES. Assumes */
+ /* words <= MAXOBJSZ. */
+ptr_t GC_generic_malloc_inner_ignore_off_page(/* bytes, kind */);
+ /* Allocate an object, where */
+ /* the client guarantees that there */
+ /* will always be a pointer to the */
+ /* beginning of the object while the */
+ /* object is live. */
+ptr_t GC_allocobj(/* sz_inn_words, kind */);
+ /* Make the indicated */
+ /* free list nonempty, and return its */
+ /* head. */
+
+void GC_init_headers();
+bool GC_install_header(/*h*/);
+ /* Install a header for block h. */
+ /* Return FALSE on failure. */
+bool GC_install_counts(/*h, sz*/);
+ /* Set up forwarding counts for block */
+ /* h of size sz. */
+ /* Return FALSE on failure. */
+void GC_remove_header(/*h*/);
+ /* Remove the header for block h. */
+void GC_remove_counts(/*h, sz*/);
+ /* Remove forwarding counts for h. */
+hdr * GC_find_header(/*p*/); /* Debugging only. */
+
+void GC_finalize(); /* Perform all indicated finalization actions */
+ /* on unmarked objects. */
+ /* Unreachable finalizable objects are enqueued */
+ /* for processing by GC_invoke_finalizers. */
+ /* Invoked with lock. */
+void GC_invoke_finalizers(); /* Run eligible finalizers. */
+ /* Invoked without lock. */
+
+void GC_add_to_heap(/*p, bytes*/);
+ /* Add a HBLKSIZE aligned chunk to the heap. */
+
+void GC_print_obj(/* ptr_t p */);
+ /* P points to somewhere inside an object with */
+ /* debugging info. Print a human readable */
+ /* description of the object to stderr. */
+extern void (*GC_check_heap)();
+ /* Check that all objects in the heap with */
+ /* debugging info are intact. Print */
+ /* descriptions of any that are not. */
+
+/* Virtual dirty bit implementation: */
+/* Each implementation exports the following: */
+void GC_read_dirty(); /* Retrieve dirty bits. */
+bool GC_page_was_dirty(/* struct hblk * h */);
+ /* Read retrieved dirty bits. */
+bool GC_page_was_ever_dirty(/* struct hblk * h */);
+ /* Could the page contain valid heap pointers? */
+void GC_is_fresh(/* struct hblk * h, word number_of_blocks */);
+ /* Assert the region currently contains no */
+ /* valid pointers. */
+void GC_write_hint(/* struct hblk * h */);
+ /* h is about to be written. */
+void GC_dirty_init();
+
+/* Slow/general mark bit manipulation: */
+bool GC_is_marked();
+void GC_clear_mark_bit();
+void GC_set_mark_bit();
+
+/* Stubborn objects: */
+void GC_read_changed(); /* Analogous to GC_read_dirty */
+bool GC_page_was_changed(/* h */); /* Analogous to GC_page_was_dirty */
+void GC_clean_changing_list(); /* Collect obsolete changing list entries */
+void GC_stubborn_init();
+
+/* Debugging print routines: */
+void GC_print_block_list();
+void GC_print_hblkfreelist();
+void GC_print_heap_sects();
+void GC_print_static_roots();
+void GC_dump();
+
+/* Make arguments appear live to compiler */
+void GC_noop();
+
+/* Logging and diagnostic output: */
+void GC_printf GC_PROTO((char * format, long, long, long, long, long, long));
+ /* A version of printf that doesn't allocate, */
+ /* is restricted to long arguments, and */
+ /* (unfortunately) doesn't use varargs for */
+ /* portability. Restricted to 6 args and */
+ /* 1K total output length. */
+ /* (We use sprintf. Hopefully that doesn't */
+ /* allocate for long arguments.) */
+# define GC_printf0(f) GC_printf(f, 0l, 0l, 0l, 0l, 0l, 0l)
+# define GC_printf1(f,a) GC_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l)
+# define GC_printf2(f,a,b) GC_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l)
+# define GC_printf3(f,a,b,c) GC_printf(f, (long)a, (long)b, (long)c, 0l, 0l, 0l)
+# define GC_printf4(f,a,b,c,d) GC_printf(f, (long)a, (long)b, (long)c, \
+ (long)d, 0l, 0l)
+# define GC_printf5(f,a,b,c,d,e) GC_printf(f, (long)a, (long)b, (long)c, \
+ (long)d, (long)e, 0l)
+# define GC_printf6(f,a,b,c,d,e,g) GC_printf(f, (long)a, (long)b, (long)c, \
+ (long)d, (long)e, (long)g)
+
+void GC_err_printf(/* format, a, b, c, d, e, f */);
+# define GC_err_printf0(f) GC_err_puts(f)
+# define GC_err_printf1(f,a) GC_err_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l)
+# define GC_err_printf2(f,a,b) GC_err_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l)
+# define GC_err_printf3(f,a,b,c) GC_err_printf(f, (long)a, (long)b, (long)c, \
+ 0l, 0l, 0l)
+# define GC_err_printf4(f,a,b,c,d) GC_err_printf(f, (long)a, (long)b, \
+ (long)c, (long)d, 0l, 0l)
+# define GC_err_printf5(f,a,b,c,d,e) GC_err_printf(f, (long)a, (long)b, \
+ (long)c, (long)d, \
+ (long)e, 0l)
+# define GC_err_printf6(f,a,b,c,d,e,g) GC_err_printf(f, (long)a, (long)b, \
+ (long)c, (long)d, \
+ (long)e, (long)g)
+ /* Ditto, writes to stderr. */
+
+void GC_err_puts(/* char *s */);
+ /* Write s to stderr, don't buffer, don't add */
+ /* newlines, don't ... */
+
+# endif /* GC_PRIVATE_H */
diff --git a/include/weakpointer.h b/include/weakpointer.h
new file mode 100644
index 00000000..84906b00
--- /dev/null
+++ b/include/weakpointer.h
@@ -0,0 +1,221 @@
+#ifndef _weakpointer_h_
+#define _weakpointer_h_
+
+/****************************************************************************
+
+WeakPointer and CleanUp
+
+ Copyright (c) 1991 by Xerox Corporation. All rights reserved.
+
+ THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+ Permission is hereby granted to copy this code for any purpose,
+ provided the above notices are retained on all copies.
+
+ Last modified on Mon Jul 17 18:16:01 PDT 1995 by ellis
+
+****************************************************************************/
+
+/****************************************************************************
+
+WeakPointer
+
+A weak pointer is a pointer to a heap-allocated object that doesn't
+prevent the object from being garbage collected. Weak pointers can be
+used to track which objects haven't yet been reclaimed by the
+collector. A weak pointer is deactivated when the collector discovers
+its referent object is unreachable by normal pointers (reachability
+and deactivation are defined more precisely below). A deactivated weak
+pointer remains deactivated forever.
+
+****************************************************************************/
+
+
+template< class T > class WeakPointer {
+public:
+
+WeakPointer( T* t = 0 )
+ /* Constructs a weak pointer for *t. t may be null. It is an error
+ if t is non-null and *t is not a collected object. */
+ {impl = _WeakPointer_New( t );}
+
+T* Pointer()
+ /* wp.Pointer() returns a pointer to the referent object of wp or
+ null if wp has been deactivated (because its referent object
+ has been discovered unreachable by the collector). */
+ {return (T*) _WeakPointer_Pointer( this->impl );}
+
+int operator==( WeakPointer< T > wp2 )
+ /* Given weak pointers wp1 and wp2, if wp1 == wp2, then wp1 and
+ wp2 refer to the same object. If wp1 != wp2, then either wp1
+ and wp2 don't refer to the same object, or if they do, one or
+ both of them has been deactivated. (Note: If objects t1 and t2
+ are never made reachable by their clean-up functions, then
+ WeakPointer<T>(t1) == WeakPointer<T>(t2) if and only t1 == t2.) */
+ {return _WeakPointer_Equal( this->impl, wp2.impl );}
+
+int Hash()
+ /* Returns a hash code suitable for use by multiplicative- and
+ division-based hash tables. If wp1 == wp2, then wp1.Hash() ==
+ wp2.Hash(). */
+ {return _WeakPointer_Hash( this->impl );}
+
+private:
+void* impl;
+};
+
+/*****************************************************************************
+
+CleanUp
+
+A garbage-collected object can have an associated clean-up function
+that will be invoked some time after the collector discovers the
+object is unreachable via normal pointers. Clean-up functions can be
+used to release resources such as open-file handles or window handles
+when their containing objects become unreachable. If a C++ object has
+a non-empty explicit destructor (i.e. it contains programmer-written
+code), the destructor will be automatically registered as the object's
+initial clean-up function.
+
+There is no guarantee that the collector will detect every unreachable
+object (though it will find almost all of them). Clients should not
+rely on clean-up to cause some action to occur immediately -- clean-up
+is only a mechanism for improving resource usage.
+
+Every object with a clean-up function also has a clean-up queue. When
+the collector finds the object is unreachable, it enqueues it on its
+queue. The clean-up function is applied when the object is removed
+from the queue. By default, objects are enqueued on the garbage
+collector's queue, and the collector removes all objects from its
+queue after each collection. If a client supplies another queue for
+objects, it is his responsibility to remove objects (and cause their
+functions to be called) by polling it periodically.
+
+Clean-up queues allow clean-up functions accessing global data to
+synchronize with the main program. Garbage collection can occur at any
+time, and clean-ups invoked by the collector might access data in an
+inconsistent state. A client can control this by defining an explicit
+queue for objects and polling it at safe points.
+
+The following definitions are used by the specification below:
+
+Given a pointer t to a collected object, the base object BO(t) is the
+value returned by new when it created the object. (Because of multiple
+inheritance, t and BO(t) may not be the same address.)
+
+A weak pointer wp references an object *t if BO(wp.Pointer()) ==
+BO(t).
+
+***************************************************************************/
+
+template< class T, class Data > class CleanUp {
+public:
+
+static void Set( T* t, void c( Data* d, T* t ), Data* d = 0 )
+ /* Sets the clean-up function of object BO(t) to be <c, d>,
+ replacing any previously defined clean-up function for BO(t); c
+ and d can be null, but t cannot. Sets the clean-up queue for
+ BO(t) to be the collector's queue. When t is removed from its
+ clean-up queue, its clean-up will be applied by calling c(d,
+ t). It is an error if *t is not a collected object. */
+ {_CleanUp_Set( t, c, d );}
+
+static void Call( T* t )
+ /* Sets the new clean-up function for BO(t) to be null and, if the
+ old one is non-null, calls it immediately, even if BO(t) is
+ still reachable. Deactivates any weak pointers to BO(t). */
+ {_CleanUp_Call( t );}
+
+class Queue {public:
+ Queue()
+ /* Constructs a new queue. */
+ {this->head = _CleanUp_Queue_NewHead();}
+
+ void Set( T* t )
+ /* q.Set(t) sets the clean-up queue of BO(t) to be q. */
+ {_CleanUp_Queue_Set( this->head, t );}
+
+ int Call()
+ /* If q is non-empty, q.Call() removes the first object and
+ calls its clean-up function; does nothing if q is
+ empty. Returns true if there are more objects in the
+ queue. */
+ {return _CleanUp_Queue_Call( this->head );}
+
+ private:
+ void* head;
+ };
+};
+
+/**********************************************************************
+
+Reachability and Clean-up
+
+An object O is reachable if it can be reached via a non-empty path of
+normal pointers from the registers, stacks, global variables, or an
+object with a non-null clean-up function (including O itself),
+ignoring pointers from an object to itself.
+
+This definition of reachability ensures that if object B is accessible
+from object A (and not vice versa) and if both A and B have clean-up
+functions, then A will always be cleaned up before B. Note that as
+long as an object with a clean-up function is contained in a cycle of
+pointers, it will always be reachable and will never be cleaned up or
+collected.
+
+When the collector finds an unreachable object with a null clean-up
+function, it atomically deactivates all weak pointers referencing the
+object and recycles its storage. If object B is accessible from object
+A via a path of normal pointers, A will be discovered unreachable no
+later than B, and a weak pointer to A will be deactivated no later
+than a weak pointer to B.
+
+When the collector finds an unreachable object with a non-null
+clean-up function, the collector atomically deactivates all weak
+pointers referencing the object, redefines its clean-up function to be
+null, and enqueues it on its clean-up queue. The object then becomes
+reachable again and remains reachable at least until its clean-up
+function executes.
+
+The clean-up function is assured that its argument is the only
+accessible pointer to the object. Nothing prevents the function from
+redefining the object's clean-up function or making the object
+reachable again (for example, by storing the pointer in a global
+variable).
+
+If the clean-up function does not make its object reachable again and
+does not redefine its clean-up function, then the object will be
+collected by a subsequent collection (because the object remains
+unreachable and now has a null clean-up function). If the clean-up
+function does make its object reachable again and a clean-up function
+is subsequently redefined for the object, then the new clean-up
+function will be invoked the next time the collector finds the object
+unreachable.
+
+Note that a destructor for a collected object cannot safely redefine a
+clean-up function for its object, since after the destructor executes,
+the object has been destroyed into "raw memory". (In most
+implementations, destroying an object mutates its vtbl.)
+
+Finally, note that calling delete t on a collected object first
+deactivates any weak pointers to t and then invokes its clean-up
+function (destructor).
+
+**********************************************************************/
+
+extern "C" {
+ void* _WeakPointer_New( void* t );
+ void* _WeakPointer_Pointer( void* wp );
+ int _WeakPointer_Equal( void* wp1, void* wp2 );
+ int _WeakPointer_Hash( void* wp );
+ void _CleanUp_Set( void* t, void (*c)( void* d, void* t ), void* d );
+ void _CleanUp_Call( void* t );
+ void* _CleanUp_Queue_NewHead ();
+ void _CleanUp_Queue_Set( void* h, void* t );
+ int _CleanUp_Queue_Call( void* h );
+}
+
+#endif /* _weakpointer_h_ */
+
+
diff --git a/mach_dep.c b/mach_dep.c
index cd441f97..c2d22a61 100644
--- a/mach_dep.c
+++ b/mach_dep.c
@@ -11,7 +11,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 1:58 pm PDT */
+/* Boehm, November 17, 1995 12:13 pm PST */
# include "gc_priv.h"
# include <stdio.h>
# include <setjmp.h>
@@ -19,7 +19,44 @@
# define _setjmp(b) setjmp(b)
# define _longjmp(b,v) longjmp(b,v)
# endif
+# ifdef AMIGA
+# include <dos.h>
+# endif
+#if defined(__MWERKS__) && !defined(POWERPC)
+
+asm static void PushMacRegisters()
+{
+ sub.w #4,sp // reserve space for one parameter.
+ move.l a2,(sp)
+ jsr GC_push_one
+ move.l a3,(sp)
+ jsr GC_push_one
+ move.l a4,(sp)
+ jsr GC_push_one
+# if !__option(a6frames)
+ // <pcb> perhaps a6 should be pushed if stack frames are not being used.
+ move.l a6,(sp)
+ jsr GC_push_one
+# endif
+ // skip a5 (globals), a6 (frame pointer), and a7 (stack pointer)
+ move.l d2,(sp)
+ jsr GC_push_one
+ move.l d3,(sp)
+ jsr GC_push_one
+ move.l d4,(sp)
+ jsr GC_push_one
+ move.l d5,(sp)
+ jsr GC_push_one
+ move.l d6,(sp)
+ jsr GC_push_one
+ move.l d7,(sp)
+ jsr GC_push_one
+ add.w #4,sp // fix stack.
+ rts
+}
+
+#endif /* __MWERKS__ */
/* Routine to mark from registers that are preserved by the C compiler. */
/* This must be ported to every new architecture. There is a generic */
@@ -27,22 +64,7 @@
/* on your architecture. Run the test_setjmp program to see whether */
/* there is any chance it will work. */
-#ifdef AMIGA
-__asm GC_push_regs(
- register __a2 word a2,
- register __a3 word a3,
- register __a4 word a4,
- register __a5 word a5,
- register __a6 word a6,
- register __d2 const word d2,
- register __d3 const word d3,
- register __d4 const word d4,
- register __d5 const word d5,
- register __d6 const word d6,
- register __d7 const word d7)
-#else
- void GC_push_regs()
-#endif
+void GC_push_regs()
{
# ifdef RT
register long TMP_SP; /* must be bound to r11 */
@@ -105,63 +127,104 @@ __asm GC_push_regs(
# ifdef AMIGA
/* AMIGA - could be replaced by generic code */
- /* SAS/C optimizer mangles this so compile with "noopt" */
/* a0, a1, d0 and d1 are caller save */
- GC_push_one(a2);
- GC_push_one(a3);
- GC_push_one(a4);
- GC_push_one(a5);
- GC_push_one(a6);
+ GC_push_one(getreg(REG_A2));
+ GC_push_one(getreg(REG_A3));
+ GC_push_one(getreg(REG_A4));
+ GC_push_one(getreg(REG_A5));
+ GC_push_one(getreg(REG_A6));
/* Skip stack pointer */
- GC_push_one(d2);
- GC_push_one(d3);
- GC_push_one(d4);
- GC_push_one(d5);
- GC_push_one(d6);
- GC_push_one(d7);
+ GC_push_one(getreg(REG_D2));
+ GC_push_one(getreg(REG_D3));
+ GC_push_one(getreg(REG_D4));
+ GC_push_one(getreg(REG_D5));
+ GC_push_one(getreg(REG_D6));
+ GC_push_one(getreg(REG_D7));
# endif
-# if defined(I386) &&!defined(OS2) &&!defined(SUNOS5) &&!defined(MSWIN32)
+# if defined(M68K) && defined(MACOS)
+# if defined(THINK_C)
+# define PushMacReg(reg) \
+ move.l reg,(sp) \
+ jsr GC_push_one
+ asm {
+ sub.w #4,sp ; reserve space for one parameter.
+ PushMacReg(a2);
+ PushMacReg(a3);
+ PushMacReg(a4);
+ ; skip a5 (globals), a6 (frame pointer), and a7 (stack pointer)
+ PushMacReg(d2);
+ PushMacReg(d3);
+ PushMacReg(d4);
+ PushMacReg(d5);
+ PushMacReg(d6);
+ PushMacReg(d7);
+ add.w #4,sp ; fix stack.
+ }
+# undef PushMacReg
+# endif /* THINK_C */
+# if defined(__MWERKS__)
+ PushMacRegisters();
+# endif /* __MWERKS__ */
+# endif /* MACOS */
+
+# if defined(I386) &&!defined(OS2) &&!defined(SVR4) &&!defined(MSWIN32) && !defined(SCO) && (!defined(LINUX) || !defined(__ELF__))
/* I386 code, generic code does not appear to work */
/* It does appear to work under OS2, and asms dont */
asm("pushl %eax"); asm("call _GC_push_one"); asm("addl $4,%esp");
asm("pushl %ecx"); asm("call _GC_push_one"); asm("addl $4,%esp");
asm("pushl %edx"); asm("call _GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ebp"); asm("call _GC_push_one"); asm("addl $4,%esp");
asm("pushl %esi"); asm("call _GC_push_one"); asm("addl $4,%esp");
asm("pushl %edi"); asm("call _GC_push_one"); asm("addl $4,%esp");
asm("pushl %ebx"); asm("call _GC_push_one"); asm("addl $4,%esp");
# endif
+# if defined(I386) && defined(LINUX) && defined(__ELF__)
+ /* This is modified for Linux with ELF (Note: _ELF_ only) */
+ asm("pushl %eax"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ecx"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %edx"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ebp"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %esi"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %edi"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ebx"); asm("call GC_push_one"); asm("addl $4,%esp");
+# endif
+
# if defined(I386) && defined(MSWIN32)
/* I386 code, Microsoft variant */
__asm push eax
__asm call GC_push_one
__asm add esp,4
+ __asm push ebx
+ __asm call GC_push_one
+ __asm add esp,4
__asm push ecx
__asm call GC_push_one
__asm add esp,4
__asm push edx
__asm call GC_push_one
__asm add esp,4
- __asm push esi
+ __asm push ebp
__asm call GC_push_one
__asm add esp,4
- __asm push edi
+ __asm push esi
__asm call GC_push_one
__asm add esp,4
- __asm push ebx
+ __asm push edi
__asm call GC_push_one
__asm add esp,4
# endif
-# if defined(I386) && defined(SUNOS5)
+# if defined(I386) && (defined(SVR4) || defined(SCO))
/* I386 code, SVR4 variant, generic code does not appear to work */
asm("pushl %eax"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ebx"); asm("call GC_push_one"); asm("addl $4,%esp");
asm("pushl %ecx"); asm("call GC_push_one"); asm("addl $4,%esp");
asm("pushl %edx"); asm("call GC_push_one"); asm("addl $4,%esp");
+ asm("pushl %ebp"); asm("call GC_push_one"); asm("addl $4,%esp");
asm("pushl %esi"); asm("call GC_push_one"); asm("addl $4,%esp");
asm("pushl %edi"); asm("call GC_push_one"); asm("addl $4,%esp");
- asm("pushl %ebx"); asm("call GC_push_one"); asm("addl $4,%esp");
# endif
# ifdef NS32K
@@ -238,7 +301,7 @@ __asm GC_push_regs(
# endif /* M68K/SYSV */
-# if defined(HP_PA) || defined(M88K) || (defined(I386) && defined(OS2))
+# if defined(HP_PA) || defined(M88K) || defined(POWERPC) || (defined(I386) && defined(OS2))
/* Generic code */
/* The idea is due to Parag Patel at HP. */
/* We're not sure whether he would like */
@@ -253,7 +316,11 @@ __asm GC_push_regs(
for (; (char *)i < lim; i++) {
*i = 0;
}
- (void) _setjmp(regs);
+# ifdef POWERPC
+ (void) setjmp(regs);
+# else
+ (void) _setjmp(regs);
+# endif
GC_push_all_stack((ptr_t)regs, lim);
}
# endif
@@ -261,7 +328,7 @@ __asm GC_push_regs(
/* other machines... */
# if !(defined M68K) && !(defined VAX) && !(defined RT)
# if !(defined SPARC) && !(defined I386) && !(defined NS32K)
-# if !defined(HP_PA) && !defined(M88K)
+# if !defined(HP_PA) && !defined(M88K) && !defined(POWERPC)
--> bad news <--
# endif
# endif
@@ -272,9 +339,10 @@ __asm GC_push_regs(
/* the stack. Return sp. */
# ifdef SPARC
asm(" .seg \"text\"");
-# ifdef SUNOS5
+# ifdef SVR4
asm(" .globl GC_save_regs_in_stack");
asm("GC_save_regs_in_stack:");
+ asm(" .type GC_save_regs_in_stack,#function");
# else
asm(" .globl _GC_save_regs_in_stack");
asm("_GC_save_regs_in_stack:");
@@ -283,7 +351,10 @@ __asm GC_push_regs(
asm(" mov %sp,%o0");
asm(" retl");
asm(" nop");
-
+# ifdef SVR4
+ asm(" .GC_save_regs_in_stack_end:");
+ asm(" .size GC_save_regs_in_stack,.GC_save_regs_in_stack_end-GC_save_regs_in_stack");
+# endif
# ifdef LINT
word GC_save_regs_in_stack() { return(0 /* sp really */);}
# endif
@@ -304,6 +375,7 @@ __asm GC_push_regs(
# else
asm(".globl GC_clear_stack_inner");
asm("GC_clear_stack_inner:");
+ asm(".type GC_save_regs_in_stack,#function");
# endif
asm("mov %sp,%o2"); /* Save sp */
asm("add %sp,-8,%o3"); /* p = sp-8 */
@@ -320,6 +392,10 @@ __asm GC_push_regs(
asm("retl");
asm("mov %o2,%sp"); /* Restore sp., delay slot */
/* First argument = %o0 = return value */
+# ifdef SVR4
+ asm(" .GC_clear_stack_inner_end:");
+ asm(" .size GC_clear_stack_inner,.GC_clear_stack_inner_end-GC_clear_stack_inner");
+# endif
# ifdef LINT
/*ARGSUSED*/
diff --git a/makefile.depend b/makefile.depend
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/makefile.depend
diff --git a/malloc.c b/malloc.c
index 770826eb..f6a9628e 100644
--- a/malloc.c
+++ b/malloc.c
@@ -11,7 +11,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:03 pm PDT */
+/* Boehm, July 31, 1995 5:02 pm PDT */
#include <stdio.h>
#include "gc_priv.h"
@@ -19,6 +19,19 @@
extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
void GC_extend_size_map(); /* in misc.c. */
+/* Allocate reclaim list for kind: */
+/* Return TRUE on success */
+bool GC_alloc_reclaim_list(kind)
+register struct obj_kind * kind;
+{
+ struct hblk ** result = (struct hblk **)
+ GC_scratch_alloc((MAXOBJSZ+1) * sizeof(struct hblk *));
+ if (result == 0) return(FALSE);
+ BZERO(result, (MAXOBJSZ+1)*sizeof(struct hblk *));
+ kind -> ok_reclaim_list = result;
+ return(TRUE);
+}
+
/* allocate lb bytes for an object of kind. */
/* Should not be used to directly to allocate */
/* objects such as STUBBORN objects that */
@@ -34,13 +47,14 @@ register ptr_t op;
register ptr_t *opp;
if( SMALL_OBJ(lb) ) {
+ register struct obj_kind * kind = GC_obj_kinds + k;
# ifdef MERGE_SIZES
lw = GC_size_map[lb];
# else
- lw = ROUNDED_UP_WORDS(lb);
+ lw = ALIGNED_WORDS(lb);
if (lw == 0) lw = 1;
# endif
- opp = &(GC_obj_kinds[k].ok_freelist[lw]);
+ opp = &(kind -> ok_freelist[lw]);
if( (op = *opp) == 0 ) {
# ifdef MERGE_SIZES
if (GC_size_map[lb] == 0) {
@@ -54,6 +68,9 @@ register ptr_t *opp;
return(GC_generic_malloc_inner(lb, k));
}
# endif
+ if (kind -> ok_reclaim_list == 0) {
+ if (!GC_alloc_reclaim_list(kind)) goto out;
+ }
op = GC_allocobj(lw, k);
if (op == 0) goto out;
}
@@ -73,7 +90,8 @@ register ptr_t *opp;
if (!GC_is_initialized) GC_init_inner();
/* Do our share of marking work */
- if(GC_incremental && !GC_dont_gc) GC_collect_a_little((int)n_blocks);
+ if(GC_incremental && !GC_dont_gc)
+ GC_collect_a_little_inner((int)n_blocks);
lw = ROUNDED_UP_WORDS(lb);
while ((h = GC_allochblk(lw, k, 0)) == 0
&& GC_collect_or_expand(n_blocks));
@@ -93,23 +111,24 @@ out:
/* Allocate a composite object of size n bytes. The caller guarantees */
/* that pointers past the first page are not relevant. Caller holds */
/* allocation lock. */
-ptr_t GC_malloc_ignore_off_page_inner(lb)
+ptr_t GC_generic_malloc_inner_ignore_off_page(lb, k)
register size_t lb;
+register int k;
{
-# ifdef ALL_INTERIOR_POINTERS
register struct hblk * h;
register word n_blocks;
register word lw;
register ptr_t op;
if (lb <= HBLKSIZE)
- return(GC_generic_malloc_inner((word)lb, NORMAL));
+ return(GC_generic_malloc_inner((word)lb, k));
n_blocks = divHBLKSZ(ADD_SLOP(lb) + HDR_BYTES + HBLKSIZE-1);
if (!GC_is_initialized) GC_init_inner();
/* Do our share of marking work */
- if(GC_incremental && !GC_dont_gc) GC_collect_a_little((int)n_blocks);
+ if(GC_incremental && !GC_dont_gc)
+ GC_collect_a_little_inner((int)n_blocks);
lw = ROUNDED_UP_WORDS(lb);
- while ((h = GC_allochblk(lw, NORMAL, IGNORE_OFF_PAGE)) == 0
+ while ((h = GC_allochblk(lw, k, IGNORE_OFF_PAGE)) == 0
&& GC_collect_or_expand(n_blocks));
if (h == 0) {
op = 0;
@@ -119,30 +138,44 @@ register size_t lb;
}
GC_words_allocd += lw;
return((ptr_t)op);
-# else
- return(GC_generic_malloc_inner((word)lb, NORMAL));
-# endif
}
-# if defined(__STDC__) || defined(__cplusplus)
- void * GC_malloc_ignore_off_page(size_t lb)
-# else
- char * GC_malloc_ignore_off_page(lb)
- register size_t lb;
-# endif
+ptr_t GC_generic_malloc_ignore_off_page(lb, k)
+register size_t lb;
+register int k;
{
- register extern_ptr_t result;
+ register ptr_t result;
DCL_LOCK_STATE;
GC_invoke_finalizers();
DISABLE_SIGNALS();
LOCK();
- result = GC_malloc_ignore_off_page_inner(lb);
+ result = GC_generic_malloc_inner_ignore_off_page(lb,k);
UNLOCK();
ENABLE_SIGNALS();
return(result);
}
+# if defined(__STDC__) || defined(__cplusplus)
+ void * GC_malloc_ignore_off_page(size_t lb)
+# else
+ char * GC_malloc_ignore_off_page(lb)
+ register size_t lb;
+# endif
+{
+ return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
+}
+
+# if defined(__STDC__) || defined(__cplusplus)
+ void * GC_malloc_atomic_ignore_off_page(size_t lb)
+# else
+ char * GC_malloc_atomic_ignore_off_page(lb)
+ register size_t lb;
+# endif
+{
+ return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
+}
+
ptr_t GC_generic_malloc(lb, k)
register word lb;
register int k;
@@ -168,16 +201,20 @@ register int k;
{
register ptr_t op;
register ptr_t *opp;
+register struct obj_kind * kind = GC_obj_kinds + k;
DCL_LOCK_STATE;
GC_invoke_finalizers();
DISABLE_SIGNALS();
LOCK();
- opp = &(GC_obj_kinds[k].ok_freelist[lw]);
+ opp = &(kind -> ok_freelist[lw]);
if( (op = *opp) == 0 ) {
if (!GC_is_initialized) {
GC_init_inner();
}
+ if (kind -> ok_reclaim_list == 0) {
+ if (!GC_alloc_reclaim_list(kind)) goto out;
+ }
op = GC_clear_stack(GC_allocobj(lw, k));
if (op == 0) goto out;
}
@@ -218,7 +255,7 @@ DCL_LOCK_STATE;
obj_link(op) = 0;
return(op);
}
- lw = ROUNDED_UP_WORDS(lb);
+ lw = ALIGNED_WORDS(lb);
GC_invoke_finalizers();
DISABLE_SIGNALS();
LOCK();
@@ -259,15 +296,15 @@ void * GC_malloc_many(size_t lb)
# endif
#define GENERAL_MALLOC(lb,k) \
- (extern_ptr_t)GC_clear_stack(GC_generic_malloc((word)lb, k))
+ (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
/* We make the GC_clear_stack_call a tail call, hoping to get more of */
/* the stack. */
/* Allocate lb bytes of atomic (pointerfree) data */
# ifdef __STDC__
- extern_ptr_t GC_malloc_atomic(size_t lb)
+ GC_PTR GC_malloc_atomic(size_t lb)
# else
- extern_ptr_t GC_malloc_atomic(lb)
+ GC_PTR GC_malloc_atomic(lb)
size_t lb;
# endif
{
@@ -280,7 +317,7 @@ DCL_LOCK_STATE;
# ifdef MERGE_SIZES
lw = GC_size_map[lb];
# else
- lw = ROUNDED_UP_WORDS(lb);
+ lw = ALIGNED_WORDS(lb);
# endif
opp = &(GC_aobjfreelist[lw]);
FASTLOCK();
@@ -292,7 +329,7 @@ DCL_LOCK_STATE;
*opp = obj_link(op);
GC_words_allocd += lw;
FASTUNLOCK();
- return((extern_ptr_t) op);
+ return((GC_PTR) op);
} else {
return(GENERAL_MALLOC((word)lb, PTRFREE));
}
@@ -300,9 +337,9 @@ DCL_LOCK_STATE;
/* Allocate lb bytes of composite (pointerful) data */
# ifdef __STDC__
- extern_ptr_t GC_malloc(size_t lb)
+ GC_PTR GC_malloc(size_t lb)
# else
- extern_ptr_t GC_malloc(lb)
+ GC_PTR GC_malloc(lb)
size_t lb;
# endif
{
@@ -315,7 +352,7 @@ DCL_LOCK_STATE;
# ifdef MERGE_SIZES
lw = GC_size_map[lb];
# else
- lw = ROUNDED_UP_WORDS(lb);
+ lw = ALIGNED_WORDS(lb);
# endif
opp = &(GC_objfreelist[lw]);
FASTLOCK();
@@ -328,17 +365,42 @@ DCL_LOCK_STATE;
obj_link(op) = 0;
GC_words_allocd += lw;
FASTUNLOCK();
- return((extern_ptr_t) op);
+ return((GC_PTR) op);
} else {
return(GENERAL_MALLOC((word)lb, NORMAL));
}
}
+# ifdef REDIRECT_MALLOC
+# ifdef __STDC__
+ GC_PTR malloc(size_t lb)
+# else
+ GC_PTR malloc(lb)
+ size_t lb;
+# endif
+ {
+ /* It might help to manually inline the GC_malloc call here. */
+ /* But any decent compiler should reduce the extra procedure call */
+ /* to at most a jump instruction in this case. */
+ return(REDIRECT_MALLOC(lb));
+ }
+
+# ifdef __STDC__
+ GC_PTR calloc(size_t n, size_t lb)
+# else
+ GC_PTR calloc(n, lb)
+ size_t n, lb;
+# endif
+ {
+ return(REDIRECT_MALLOC(n*lb));
+ }
+# endif /* REDIRECT_MALLOC */
+
/* Allocate lb bytes of pointerful, traced, but not collectable data */
# ifdef __STDC__
- extern_ptr_t GC_malloc_uncollectable(size_t lb)
+ GC_PTR GC_malloc_uncollectable(size_t lb)
# else
- extern_ptr_t GC_malloc_uncollectable(lb)
+ GC_PTR GC_malloc_uncollectable(lb)
size_t lb;
# endif
{
@@ -350,12 +412,13 @@ DCL_LOCK_STATE;
if( SMALL_OBJ(lb) ) {
# ifdef MERGE_SIZES
# ifdef ADD_BYTE_AT_END
- lb--; /* We don't need the extra byte, since this won't be */
+ if (lb != 0) lb--;
+ /* We don't need the extra byte, since this won't be */
/* collected anyway. */
# endif
lw = GC_size_map[lb];
# else
- lw = ROUNDED_UP_WORDS(lb);
+ lw = ALIGNED_WORDS(lb);
# endif
opp = &(GC_uobjfreelist[lw]);
FASTLOCK();
@@ -367,7 +430,7 @@ DCL_LOCK_STATE;
GC_set_mark_bit(op);
GC_non_gc_bytes += WORDS_TO_BYTES(lw);
FASTUNLOCK();
- return((extern_ptr_t) op);
+ return((GC_PTR) op);
}
FASTUNLOCK();
op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
@@ -389,11 +452,11 @@ DCL_LOCK_STATE;
GC_non_gc_bytes += WORDS_TO_BYTES(lw);
UNLOCK();
ENABLE_SIGNALS();
- return((extern_ptr_t) op);
+ return((GC_PTR) op);
}
}
-extern_ptr_t GC_generic_or_special_malloc(lb,knd)
+GC_PTR GC_generic_or_special_malloc(lb,knd)
word lb;
int knd;
{
@@ -419,10 +482,10 @@ int knd;
/* The kind (e.g. atomic) is the same as that of the old. */
/* Shrinking of large blocks is not implemented well. */
# ifdef __STDC__
- extern_ptr_t GC_realloc(extern_ptr_t p, size_t lb)
+ GC_PTR GC_realloc(GC_PTR p, size_t lb)
# else
- extern_ptr_t GC_realloc(p,lb)
- extern_ptr_t p;
+ GC_PTR GC_realloc(p,lb)
+ GC_PTR p;
size_t lb;
# endif
{
@@ -442,11 +505,15 @@ int obj_kind;
if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
/* Round it up to the next whole heap block */
+ register word descr;
sz = (sz+HDR_BYTES+HBLKSIZE-1)
& (~HBLKMASK);
sz -= HDR_BYTES;
hhdr -> hb_sz = BYTES_TO_WORDS(sz);
+ descr = GC_obj_kinds[obj_kind].ok_descriptor;
+ if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
+ hhdr -> hb_descr = descr;
if (obj_kind == UNCOLLECTABLE) GC_non_gc_bytes += (sz - orig_sz);
/* Extra area is already cleared by allochblk. */
}
@@ -464,7 +531,7 @@ int obj_kind;
return(p);
} else {
/* shrink */
- extern_ptr_t result =
+ GC_PTR result =
GC_generic_or_special_malloc((word)lb, obj_kind);
if (result == 0) return(0);
@@ -476,7 +543,7 @@ int obj_kind;
}
} else {
/* grow */
- extern_ptr_t result =
+ GC_PTR result =
GC_generic_or_special_malloc((word)lb, obj_kind);
if (result == 0) return(0);
@@ -486,12 +553,25 @@ int obj_kind;
}
}
+# ifdef REDIRECT_MALLOC
+# ifdef __STDC__
+ GC_PTR realloc(GC_PTR p, size_t lb)
+# else
+ GC_PTR realloc(p,lb)
+ GC_PTR p;
+ size_t lb;
+# endif
+ {
+ return(GC_realloc(p, lb));
+ }
+# endif /* REDIRECT_MALLOC */
+
/* Explicitly deallocate an object p. */
# ifdef __STDC__
- void GC_free(extern_ptr_t p)
+ void GC_free(GC_PTR p)
# else
void GC_free(p)
- extern_ptr_t p;
+ GC_PTR p;
# endif
{
register struct hblk *h;
@@ -517,7 +597,7 @@ int obj_kind;
GC_mem_freed += sz;
/* A signal here can make GC_mem_freed and GC_non_gc_bytes */
/* inconsistent. We claim this is benign. */
- if (knd == UNCOLLECTABLE) GC_non_gc_bytes -= sz;
+ if (knd == UNCOLLECTABLE) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
if (ok -> ok_init) {
BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
}
@@ -532,10 +612,21 @@ int obj_kind;
DISABLE_SIGNALS();
LOCK();
GC_mem_freed += sz;
- if (knd == UNCOLLECTABLE) GC_non_gc_bytes -= sz;
+ if (knd == UNCOLLECTABLE) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
GC_freehblk(h);
UNLOCK();
ENABLE_SIGNALS();
}
}
+# ifdef REDIRECT_MALLOC
+# ifdef __STDC__
+ void free(GC_PTR p)
+# else
+ void free(p)
+ GC_PTR p;
+# endif
+ {
+ GC_free(p);
+ }
+# endif /* REDIRECT_MALLOC */
diff --git a/mark.c b/mark.c
index b73ff0e4..a93c4ede 100644
--- a/mark.c
+++ b/mark.c
@@ -1,7 +1,7 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -31,20 +31,20 @@ word GC_n_mark_procs = 0;
/* GC_init is called. */
/* It's done here, since we need to deal with mark descriptors. */
struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
-/* PTRFREE */ { &GC_aobjfreelist[0], &GC_areclaim_list[0],
+/* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
0 | DS_LENGTH, FALSE, FALSE },
-/* NORMAL */ { &GC_objfreelist[0], &GC_reclaim_list[0],
-# ifdef ADD_BYTE_AT_END
- (word)(WORDS_TO_BYTES(-1)) | DS_LENGTH,
+/* NORMAL */ { &GC_objfreelist[0], 0,
+# if defined(ADD_BYTE_AT_END) && ALIGNMENT > DS_TAGS
+ (word)(-ALIGNMENT) | DS_LENGTH,
# else
0 | DS_LENGTH,
# endif
TRUE /* add length to descr */, TRUE },
/* UNCOLLECTABLE */
- { &GC_uobjfreelist[0], &GC_ureclaim_list[0],
+ { &GC_uobjfreelist[0], 0,
0 | DS_LENGTH, TRUE /* add length to descr */, TRUE },
# ifdef STUBBORN_ALLOC
-/*STUBBORN*/ { &GC_sobjfreelist[0], &GC_sreclaim_list[0],
+/*STUBBORN*/ { &GC_sobjfreelist[0], 0,
0 | DS_LENGTH, TRUE /* add length to descr */, TRUE },
# endif
};
@@ -340,7 +340,7 @@ register hdr * hhdr;
current = (word)HBLKPTR(current) + HDR_BYTES;
do {
- current = current - HBLKSIZE*(int)hhdr;
+ current = current - HBLKSIZE*(word)hhdr;
hhdr = HDR(current);
} while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
/* current points to the start of the large object */
@@ -362,6 +362,12 @@ register hdr * hhdr;
# endif
}
+void GC_invalidate_mark_state()
+{
+ GC_mark_state = MS_INVALID;
+ GC_mark_stack_top = GC_mark_stack-1;
+}
+
mse * GC_signal_mark_stack_overflow(msp)
mse * msp;
{
@@ -421,7 +427,7 @@ void GC_mark_from_mark_stack()
WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
/* Make sure that pointers overlapping the two ranges are */
/* considered. */
- limit += sizeof(word) - ALIGNMENT;
+ limit = (word *)((char *)limit + sizeof(word) - ALIGNMENT);
break;
case DS_BITMAP:
GC_mark_stack_top_reg--;
@@ -624,34 +630,6 @@ ptr_t top;
}
#endif
-/*
- * Push a single value onto mark stack. Mark from the object pointed to by p.
- * GC_push_one is normally called by GC_push_regs, and thus must be defined.
- * P is considered valid even if it is an interior pointer.
- * Previously marked objects are not pushed. Hence we make progress even
- * if the mark stack overflows.
- */
-# define GC_PUSH_ONE_STACK(p) \
- if ((ptr_t)(p) >= GC_least_plausible_heap_addr \
- && (ptr_t)(p) < GC_greatest_plausible_heap_addr) { \
- GC_push_one_checked(p,TRUE); \
- }
-
-/*
- * As above, but interior pointer recognition as for
- * normal for heap pointers.
- */
-# ifdef ALL_INTERIOR_POINTERS
-# define AIP TRUE
-# else
-# define AIP FALSE
-# endif
-# define GC_PUSH_ONE_HEAP(p) \
- if ((ptr_t)(p) >= GC_least_plausible_heap_addr \
- && (ptr_t)(p) < GC_greatest_plausible_heap_addr) { \
- GC_push_one_checked(p,AIP); \
- }
-
# ifdef MSWIN32
void __cdecl GC_push_one(p)
# else
@@ -722,6 +700,51 @@ register bool interior_ptrs;
}
}
+# ifdef TRACE_BUF
+
+# define TRACE_ENTRIES 1000
+
+struct trace_entry {
+ char * kind;
+ word gc_no;
+ word words_allocd;
+ word arg1;
+ word arg2;
+} GC_trace_buf[TRACE_ENTRIES];
+
+int GC_trace_buf_ptr = 0;
+
+void GC_add_trace_entry(char *kind, word arg1, word arg2)
+{
+ GC_trace_buf[GC_trace_buf_ptr].kind = kind;
+ GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
+ GC_trace_buf[GC_trace_buf_ptr].words_allocd = GC_words_allocd;
+ GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
+ GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
+ GC_trace_buf_ptr++;
+ if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
+}
+
+void GC_print_trace(word gc_no, bool lock)
+{
+ int i;
+ struct trace_entry *p;
+
+ if (lock) LOCK();
+ for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
+ if (i < 0) i = TRACE_ENTRIES-1;
+ p = GC_trace_buf + i;
+ if (p -> gc_no < gc_no || p -> kind == 0) return;
+ printf("Trace:%s (gc:%d,words:%d) 0x%X, 0x%X\n",
+ p -> kind, p -> gc_no, p -> words_allocd,
+ (p -> arg1) ^ 0x80000000, (p -> arg2) ^ 0x80000000);
+ }
+ printf("Trace incomplete\n");
+ if (lock) UNLOCK();
+}
+
+# endif /* TRACE_BUF */
+
/*
* A version of GC_push_all that treats all interior pointers as valid
*/
@@ -731,6 +754,9 @@ ptr_t top;
{
# ifdef ALL_INTERIOR_POINTERS
GC_push_all(bottom, top);
+# ifdef TRACE_BUF
+ GC_add_trace_entry("GC_push_all_stack", bottom, top);
+# endif
# else
word * b = (word *)(((long) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
word * t = (word *)(((long) top) & ~(ALIGNMENT-1));
@@ -795,6 +821,8 @@ register hdr * hhdr;
}
+#ifndef UNALIGNED
+
/* Push all objects reachable from marked objects in the given block */
/* of size 2 objects. */
void GC_push_marked2(h, hhdr)
@@ -881,6 +909,8 @@ register hdr * hhdr;
# undef GC_least_plausible_heap_addr
}
+#endif /* UNALIGNED */
+
#endif /* SMALL_CONFIG */
/* Push all objects reachable from marked objects in the given block */
@@ -909,10 +939,12 @@ register hdr * hhdr;
}
switch(sz) {
-# ifndef SMALL_CONFIG
+# if !defined(SMALL_CONFIG)
case 1:
GC_push_marked1(h, hhdr);
break;
+# endif
+# if !defined(SMALL_CONFIG) && !defined(UNALIGNED)
case 2:
GC_push_marked2(h, hhdr);
break;
@@ -924,7 +956,10 @@ register hdr * hhdr;
GC_mark_stack_top_reg = GC_mark_stack_top;
for (p = (word *)h + HDR_WORDS, word_no = HDR_WORDS; p <= lim;
p += sz, word_no += sz) {
- /* This needs manual optimization: */
+ /* This ignores user specified mark procs. This currently */
+ /* doesn't matter, since marking from the whole object */
+ /* is always sufficient, and we will eventually use the user */
+ /* mark proc to avoid any bogus pointers. */
if (mark_bit_from_hdr(hhdr, word_no)) {
/* Mark from fields inside the object */
PUSH_OBJ((word *)p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
diff --git a/mark_rts.c b/mark_rts.c
index 376746f1..c5883fa7 100644
--- a/mark_rts.c
+++ b/mark_rts.c
@@ -11,19 +11,23 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 1:58 pm PDT */
+/* Boehm, October 9, 1995 1:06 pm PDT */
# include <stdio.h>
# include "gc_priv.h"
-# ifdef PCR
-# define MAX_ROOT_SETS 1024
+# ifdef LARGE_CONFIG
+# define MAX_ROOT_SETS 4096
# else
-# ifdef MSWIN32
+# ifdef PCR
+# define MAX_ROOT_SETS 1024
+# else
+# ifdef MSWIN32
# define MAX_ROOT_SETS 512
/* Under NT, we add only written pages, which can result */
/* in many small root sets. */
-# else
+# else
# define MAX_ROOT_SETS 64
+# endif
# endif
# endif
@@ -37,6 +41,8 @@ struct roots {
# ifndef MSWIN32
struct roots * r_next;
# endif
+ bool r_tmp;
+ /* Delete before registering new dynamic libraries */
};
static struct roots static_roots[MAX_ROOT_SETS];
@@ -45,6 +51,54 @@ static int n_root_sets = 0;
/* static_roots[0..n_root_sets) contains the valid root sets. */
+# if !defined(NO_DEBUGGING)
+/* For debugging: */
+void GC_print_static_roots()
+{
+ register int i;
+ size_t total = 0;
+
+ for (i = 0; i < n_root_sets; i++) {
+ GC_printf2("From 0x%lx to 0x%lx ",
+ (unsigned long) static_roots[i].r_start,
+ (unsigned long) static_roots[i].r_end);
+ if (static_roots[i].r_tmp) {
+ GC_printf0(" (temporary)\n");
+ } else {
+ GC_printf0("\n");
+ }
+ total += static_roots[i].r_end - static_roots[i].r_start;
+ }
+ GC_printf1("Total size: %ld\n", (unsigned long) total);
+ if (GC_root_size != total) {
+ GC_printf1("GC_root_size incorrect: %ld!!\n",
+ (unsigned long) GC_root_size);
+ }
+}
+# endif /* NO_DEBUGGING */
+
+/* Primarily for debugging support: */
+/* Is the address p in one of the registered static */
+/* root sections? */
+bool GC_is_static_root(p)
+ptr_t p;
+{
+ static int last_root_set = 0;
+ register int i;
+
+
+ if (p >= static_roots[last_root_set].r_start
+ && p < static_roots[last_root_set].r_end) return(TRUE);
+ for (i = 0; i < n_root_sets; i++) {
+ if (p >= static_roots[i].r_start
+ && p < static_roots[i].r_end) {
+ last_root_set = i;
+ return(TRUE);
+ }
+ }
+ return(FALSE);
+}
+
#ifndef MSWIN32
# define LOG_RT_SIZE 6
# define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */
@@ -112,7 +166,7 @@ char * b; char * e;
DISABLE_SIGNALS();
LOCK();
- GC_add_roots_inner(b, e);
+ GC_add_roots_inner(b, e, FALSE);
UNLOCK();
ENABLE_SIGNALS();
}
@@ -122,8 +176,11 @@ char * b; char * e;
/* is a moderately fast noop, and hence benign. We do not handle */
/* different but overlapping intervals efficiently. (We do handle */
/* them correctly.) */
-void GC_add_roots_inner(b, e)
+/* Tmp specifies that the interval may be deleted before */
+/* reregistering dynamic libraries. */
+void GC_add_roots_inner(b, e, tmp)
char * b; char * e;
+bool tmp;
{
struct roots * old;
@@ -136,8 +193,8 @@ char * b; char * e;
} else if ((ptr_t)b >= beginGC_arrays) {
b = (char *)endGC_arrays;
} else {
- GC_add_roots_inner(b, (char *)beginGC_arrays);
- GC_add_roots_inner((char *)endGC_arrays, e);
+ GC_add_roots_inner(b, (char *)beginGC_arrays, tmp);
+ GC_add_roots_inner((char *)endGC_arrays, e, tmp);
return;
}
}
@@ -156,10 +213,13 @@ char * b; char * e;
if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
if ((ptr_t)b < old -> r_start) {
old -> r_start = (ptr_t)b;
+ GC_root_size += (old -> r_start - (ptr_t)b);
}
if ((ptr_t)e > old -> r_end) {
old -> r_end = (ptr_t)e;
+ GC_root_size += ((ptr_t)e - old -> r_end);
}
+ old -> r_tmp &= tmp;
break;
}
}
@@ -174,14 +234,18 @@ char * b; char * e;
if ((ptr_t)b <= old -> r_end && (ptr_t)e >= old -> r_start) {
if ((ptr_t)b < old -> r_start) {
old -> r_start = (ptr_t)b;
+ GC_root_size += (old -> r_start - (ptr_t)b);
}
if ((ptr_t)e > old -> r_end) {
old -> r_end = (ptr_t)e;
+ GC_root_size += ((ptr_t)e - old -> r_end);
}
+ old -> r_tmp &= other -> r_tmp;
/* Delete this entry. */
+ GC_root_size -= (other -> r_end - other -> r_start);
other -> r_start = static_roots[n_root_sets-1].r_start;
other -> r_end = static_roots[n_root_sets-1].r_end;
- n_root_sets--;
+ n_root_sets--;
}
}
return;
@@ -202,6 +266,7 @@ char * b; char * e;
}
static_roots[n_root_sets].r_start = (ptr_t)b;
static_roots[n_root_sets].r_end = (ptr_t)e;
+ static_roots[n_root_sets].r_tmp = tmp;
# ifndef MSWIN32
static_roots[n_root_sets].r_next = 0;
# endif
@@ -210,7 +275,7 @@ char * b; char * e;
n_root_sets++;
}
-void GC_clear_roots()
+void GC_clear_roots GC_PROTO((void))
{
DCL_LOCK_STATE;
@@ -218,18 +283,50 @@ void GC_clear_roots()
LOCK();
n_root_sets = 0;
GC_root_size = 0;
+# ifndef MSWIN32
+ {
+ register int i;
+
+ for (i = 0; i < RT_SIZE; i++) root_index[i] = 0;
+ }
+# endif
UNLOCK();
ENABLE_SIGNALS();
}
-# ifndef THREADS
+/* Internal use only; lock held. */
+void GC_remove_tmp_roots()
+{
+ register int i;
+
+ for (i = 0; i < n_root_sets; ) {
+ if (static_roots[i].r_tmp) {
+ GC_root_size -= (static_roots[i].r_end - static_roots[i].r_start);
+ static_roots[i].r_start = static_roots[n_root_sets-1].r_start;
+ static_roots[i].r_end = static_roots[n_root_sets-1].r_end;
+ static_roots[i].r_tmp = static_roots[n_root_sets-1].r_tmp;
+ n_root_sets--;
+ } else {
+ i++;
+ }
+ }
+# ifndef MSWIN32
+ {
+ register int i;
+
+ for (i = 0; i < RT_SIZE; i++) root_index[i] = 0;
+ for (i = 0; i < n_root_sets; i++) add_roots_to_index(static_roots + i);
+ }
+# endif
+
+}
+
ptr_t GC_approx_sp()
{
word dummy;
return((ptr_t)(&dummy));
}
-# endif
/*
* Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
@@ -255,6 +352,7 @@ bool all;
/* Reregister dynamic libraries, in case one got added. */
# if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(PCR)) \
&& !defined(SRC_M3)
+ GC_remove_tmp_roots();
GC_register_dynamic_libraries();
# endif
/* Mark everything in static data areas */
diff --git a/misc.c b/misc.c
index f4b5d9ca..4dd68faa 100644
--- a/misc.c
+++ b/misc.c
@@ -11,18 +11,22 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:04 pm PDT */
+/* Boehm, July 31, 1995 5:02 pm PDT */
-#define DEBUG /* Some run-time consistency checks */
-#undef DEBUG
-#define VERBOSE
-#undef VERBOSE
#include <stdio.h>
#include <signal.h>
+
#define I_HIDE_POINTERS /* To make GC_call_with_alloc_lock visible */
#include "gc_priv.h"
+#ifdef SOLARIS_THREADS
+# include <sys/syscall.h>
+#endif
+#ifdef MSWIN32
+# include <windows.h>
+#endif
+
# ifdef THREADS
# ifdef PCR
# include "il/PCR_IL.h"
@@ -125,6 +129,16 @@ extern signed_word GC_mem_found;
if (word_sz > MAXOBJSZ) {
word_sz = MAXOBJSZ;
}
+ /* If we can fit the same number of larger objects in a block, */
+ /* do so. */
+ {
+# ifdef ALIGN_DOUBLE
+# define INCR 2
+# else
+# define INCR 1
+# endif
+ while (BODY_SZ/word_sz == BODY_SZ/(word_sz + INCR)) word_sz += INCR;
+ }
byte_sz = WORDS_TO_BYTES(word_sz);
# ifdef ADD_BYTE_AT_END
/* We need one extra byte; don't fill in GC_size_map[byte_sz] */
@@ -168,13 +182,6 @@ word GC_high_water;
/* "hottest" stack pointer value we have seen */
/* recently. Degrades over time. */
-word GC_stack_upper_bound()
-{
- word dummy;
-
- return((word)(&dummy));
-}
-
word GC_words_allocd_at_reset;
#if defined(ASM_CLEAR_CODE) && !defined(THREADS)
@@ -201,6 +208,7 @@ word limit;
}
#endif
+extern ptr_t GC_approx_sp(); /* in mark_rts.c */
/* Clear some of the inaccessible part of the stack. Returns its */
/* argument, so it can be used in a tail call position, hence clearing */
@@ -208,7 +216,7 @@ word limit;
ptr_t GC_clear_stack(arg)
ptr_t arg;
{
- register word sp = GC_stack_upper_bound();
+ register word sp = (word)GC_approx_sp(); /* Hotter than actual sp */
register word limit;
# ifdef THREADS
word dummy[CLEAR_SIZE];;
@@ -268,24 +276,31 @@ ptr_t arg;
/* Return a pointer to the base address of p, given a pointer to a */
/* an address within an object. Return 0 o.w. */
# ifdef __STDC__
- extern_ptr_t GC_base(extern_ptr_t p)
+ GC_PTR GC_base(GC_PTR p)
# else
- extern_ptr_t GC_base(p)
- extern_ptr_t p;
+ GC_PTR GC_base(p)
+ GC_PTR p;
# endif
{
register word r;
register struct hblk *h;
+ register bottom_index *bi;
register hdr *candidate_hdr;
+ register word limit;
r = (word)p;
h = HBLKPTR(r);
- candidate_hdr = HDR(r);
+ GET_BI(r, bi);
+ if (bi == 0) {
+ /* Collector uninitialized. Nothing allocated yet. */
+ return(0);
+ }
+ candidate_hdr = HDR_FROM_BI(bi, r);
if (candidate_hdr == 0) return(0);
/* If it's a pointer to the middle of a large object, move it */
/* to the beginning. */
while (IS_FORWARDING_ADDR_OR_NIL(candidate_hdr)) {
- h = h - (int)candidate_hdr;
+ h = FORWARDED_ADDR(h,candidate_hdr);
r = (word)h + HDR_BYTES;
candidate_hdr = HDR(h);
}
@@ -294,28 +309,44 @@ ptr_t arg;
r &= ~(WORDS_TO_BYTES(1) - 1);
{
register int offset =
- (word *)r - (word *)(HBLKPTR(r)) - HDR_WORDS;
+ (char *)r - (char *)(HBLKPTR(r)) - HDR_BYTES;
register signed_word sz = candidate_hdr -> hb_sz;
- register int correction;
-
- correction = offset % sz;
- r -= (WORDS_TO_BYTES(correction));
- if (((word *)r + sz) > (word *)(h + 1)
+
+# ifdef ALL_INTERIOR_POINTERS
+ register map_entry_type map_entry;
+
+ map_entry = MAP_ENTRY((candidate_hdr -> hb_map), offset);
+ if (map_entry == OBJ_INVALID) {
+ return(0);
+ }
+ r -= WORDS_TO_BYTES(map_entry);
+ limit = r + WORDS_TO_BYTES(sz);
+# else
+ register int correction;
+
+ offset = BYTES_TO_WORDS(offset - HDR_BYTES);
+ correction = offset % sz;
+ r -= (WORDS_TO_BYTES(correction));
+ limit = r + WORDS_TO_BYTES(sz);
+ if (limit > (word)(h + 1)
&& sz <= BYTES_TO_WORDS(HBLKSIZE) - HDR_WORDS) {
return(0);
- }
+ }
+# endif
+ if ((word)p >= limit) return(0);
}
- return((extern_ptr_t)r);
+ return((GC_PTR)r);
}
+
/* Return the size of an object, given a pointer to its base. */
/* (For small obects this also happens to work from interior pointers, */
/* but that shouldn't be relied upon.) */
# ifdef __STDC__
- size_t GC_size(extern_ptr_t p)
+ size_t GC_size(GC_PTR p)
# else
size_t GC_size(p)
- extern_ptr_t p;
+ GC_PTR p;
# endif
{
register int sz;
@@ -329,11 +360,16 @@ ptr_t arg;
}
}
-size_t GC_get_heap_size()
+size_t GC_get_heap_size GC_PROTO(())
{
return ((size_t) GC_heapsize);
}
+size_t GC_get_bytes_since_gc GC_PROTO(())
+{
+ return ((size_t) WORDS_TO_BYTES(GC_words_allocd));
+}
+
bool GC_is_initialized = FALSE;
void GC_init()
@@ -357,7 +393,6 @@ void GC_init_inner()
word dummy;
if (GC_is_initialized) return;
- GC_is_initialized = TRUE;
# ifdef MSWIN32
GC_init_win32();
# endif
@@ -422,10 +457,11 @@ void GC_init_inner()
ABORT("signed_word");
}
- GC_init_headers();
- /* Add initial guess of root sets */
+ /* Add initial guess of root sets. Do this first, since sbrk(0) */
+ /* mightbe used. */
GC_register_data_segments();
- GC_bl_init();
+ GC_init_headers();
+ GC_bl_init();
GC_mark_init();
if (!GC_expand_hp_inner((word)MINHINCR)) {
GC_err_printf0("Can't start up: not enough memory\n");
@@ -442,7 +478,12 @@ void GC_init_inner()
GC_init_size_map();
# endif
# ifdef PCR
- PCR_IL_Lock(PCR_Bool_false, PCR_allSigsBlocked, PCR_waitForever);
+ if (PCR_IL_Lock(PCR_Bool_false, PCR_allSigsBlocked, PCR_waitForever)
+ != PCR_ERes_okay) {
+ ABORT("Can't lock load state\n");
+ } else if (PCR_IL_Unlock() != PCR_ERes_okay) {
+ ABORT("Can't unlock load state\n");
+ }
PCR_IL_Unlock();
GC_pcr_install();
# endif
@@ -451,20 +492,26 @@ void GC_init_inner()
# ifdef STUBBORN_ALLOC
GC_stubborn_init();
# endif
+ GC_is_initialized = TRUE;
/* Convince lint that some things are used */
# ifdef LINT
{
extern char * GC_copyright[];
- extern GC_read();
+ extern int GC_read();
+ extern void GC_register_finalizer_no_order();
- GC_noop(GC_copyright, GC_find_header, GC_print_block_list,
+ GC_noop(GC_copyright, GC_find_header,
GC_push_one, GC_call_with_alloc_lock, GC_read,
- GC_print_hblkfreelist, GC_dont_expand);
+ GC_dont_expand,
+# ifndef NO_DEBUGGING
+ GC_dump,
+# endif
+ GC_register_finalizer_no_order);
}
# endif
}
-void GC_enable_incremental()
+void GC_enable_incremental GC_PROTO(())
{
DCL_LOCK_STATE;
@@ -472,6 +519,14 @@ void GC_enable_incremental()
DISABLE_SIGNALS();
LOCK();
if (GC_incremental) goto out;
+# ifdef MSWIN32
+ {
+ extern bool GC_is_win32s();
+
+ /* VirtualProtect is not functional under win32s. */
+ if (GC_is_win32s()) goto out;
+ }
+# endif /* MSWIN32 */
# ifndef SOLARIS_THREADS
GC_dirty_init();
# endif
@@ -498,24 +553,35 @@ out:
# endif
}
-#if defined(OS2) || defined(MSWIN32)
- FILE * GC_stdout = NULL;
- FILE * GC_stderr = NULL;
-#endif
#ifdef MSWIN32
+# define LOG_FILE "gc.log"
+
+ HANDLE GC_stdout = 0, GC_stderr;
+ int GC_tmp;
+ DWORD GC_junk;
+
void GC_set_files()
{
- if (GC_stdout == NULL) {
- GC_stdout = fopen("gc.log", "wt");
+ if (!GC_stdout) {
+ GC_stdout = CreateFile(LOG_FILE, GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ NULL, CREATE_ALWAYS, FILE_FLAG_WRITE_THROUGH,
+ NULL);
+ if (INVALID_HANDLE_VALUE == GC_stdout) ABORT("Open of log file failed");
}
- if (GC_stderr == NULL) {
+ if (GC_stderr == 0) {
GC_stderr = GC_stdout;
}
}
+
#endif
-#ifdef OS2
+#if defined(OS2) || defined(MACOS)
+FILE * GC_stdout = NULL;
+FILE * GC_stderr = NULL;
+int GC_tmp; /* Should really be local ... */
+
void GC_set_files()
{
if (GC_stdout == NULL) {
@@ -527,6 +593,52 @@ out:
}
#endif
+#if !defined(OS2) && !defined(MACOS) && !defined(MSWIN32)
+ int GC_stdout = 1;
+ int GC_stderr = 2;
+# if !defined(AMIGA)
+# include <unistd.h>
+# endif
+#endif
+
+#if !defined(MSWIN32) && !defined(OS2) && !defined(MACOS)
+int GC_write(fd, buf, len)
+int fd;
+char *buf;
+size_t len;
+{
+ register int bytes_written = 0;
+ register int result;
+
+ while (bytes_written < len) {
+# ifdef SOLARIS_THREADS
+ result = syscall(SYS_write, fd, buf + bytes_written,
+ len - bytes_written);
+# else
+ result = write(fd, buf + bytes_written, len - bytes_written);
+# endif
+ if (-1 == result) return(result);
+ bytes_written += result;
+ }
+ return(bytes_written);
+}
+#endif /* UN*X */
+
+#ifdef MSWIN32
+# define WRITE(f, buf, len) (GC_set_files(), \
+ GC_tmp = WriteFile((f), (buf), \
+ (len), &GC_junk, NULL),\
+ (GC_tmp? 1 : -1))
+#else
+# if defined(OS2) || defined(MACOS)
+# define WRITE(f, buf, len) (GC_set_files(), \
+ GC_tmp = fwrite((buf), 1, (len), (f)), \
+ fflush(f), GC_tmp)
+# else
+# define WRITE(f, buf, len) GC_write((f), (buf), (len))
+# endif
+#endif
+
/* A version of printf that is unlikely to call malloc, and is thus safer */
/* to call from the collector in case malloc has been bound to GC_malloc. */
/* Assumes that no more than 1023 characters are written at once. */
@@ -543,15 +655,7 @@ long a, b, c, d, e, f;
buf[1024] = 0x15;
(void) sprintf(buf, format, a, b, c, d, e, f);
if (buf[1024] != 0x15) ABORT("GC_printf clobbered stack");
-# if defined(OS2) || defined(MSWIN32)
- GC_set_files();
- /* We hope this doesn't allocate */
- if (fwrite(buf, 1, strlen(buf), GC_stdout) != strlen(buf))
- ABORT("write to stdout failed");
- fflush(GC_stdout);
-# else
- if (write(1, buf, strlen(buf)) < 0) ABORT("write to stdout failed");
-# endif
+ if (WRITE(GC_stdout, buf, strlen(buf)) < 0) ABORT("write to stdout failed");
}
void GC_err_printf(format, a, b, c, d, e, f)
@@ -563,31 +667,45 @@ long a, b, c, d, e, f;
buf[1024] = 0x15;
(void) sprintf(buf, format, a, b, c, d, e, f);
if (buf[1024] != 0x15) ABORT("GC_err_printf clobbered stack");
-# if defined(OS2) || defined(MSWIN32)
- GC_set_files();
- /* We hope this doesn't allocate */
- if (fwrite(buf, 1, strlen(buf), GC_stderr) != strlen(buf))
- ABORT("write to stderr failed");
- fflush(GC_stderr);
-# else
- if (write(2, buf, strlen(buf)) < 0) ABORT("write to stderr failed");
-# endif
+ if (WRITE(GC_stderr, buf, strlen(buf)) < 0) ABORT("write to stderr failed");
}
void GC_err_puts(s)
char *s;
{
-# if defined(OS2) || defined(MSWIN32)
- GC_set_files();
- /* We hope this doesn't allocate */
- if (fwrite(s, 1, strlen(s), GC_stderr) != strlen(s))
- ABORT("write to stderr failed");
- fflush(GC_stderr);
-# else
- if (write(2, s, strlen(s)) < 0) ABORT("write to stderr failed");
-# endif
+ if (WRITE(GC_stderr, s, strlen(s)) < 0) ABORT("write to stderr failed");
+}
+
+# if defined(__STDC__) || defined(__cplusplus)
+ void GC_default_warn_proc(char *msg, GC_word arg)
+# else
+ void GC_default_warn_proc(msg, arg)
+ char *msg;
+ GC_word arg;
+# endif
+{
+ GC_err_printf1(msg, (unsigned long)arg);
+}
+
+GC_warn_proc GC_current_warn_proc = GC_default_warn_proc;
+
+# if defined(__STDC__) || defined(__cplusplus)
+ GC_warn_proc GC_set_warn_proc(GC_warn_proc p)
+# else
+ GC_warn_proc GC_set_warn_proc(p)
+ GC_warn_proc p;
+# endif
+{
+ GC_warn_proc result;
+
+ LOCK();
+ result = GC_current_warn_proc;
+ GC_current_warn_proc = p;
+ UNLOCK();
+ return(result);
}
+
#ifndef PCR
void GC_abort(msg)
char * msg;
@@ -608,3 +726,19 @@ void GC_disable()
GC_dont_gc++;
}
# endif
+
+#if !defined(NO_DEBUGGING)
+
+void GC_dump()
+{
+ GC_printf0("***Static roots:\n");
+ GC_print_static_roots();
+ GC_printf0("\n***Heap sections:\n");
+ GC_print_heap_sects();
+ GC_printf0("\n***Free blocks:\n");
+ GC_print_hblkfreelist();
+ GC_printf0("\n***Blocks in use:\n");
+ GC_print_block_list();
+}
+
+# endif /* NO_DEBUGGING */
diff --git a/obj_map.c b/obj_map.c
index e728c37c..ee00db02 100644
--- a/obj_map.c
+++ b/obj_map.c
@@ -11,7 +11,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 1:59 pm PDT */
+/* Boehm, October 9, 1995 1:09 pm PDT */
/* Routines for maintaining maps describing heap block
* layouts for various object sizes. Allows fast pointer validity checks
@@ -46,8 +46,13 @@ hdr *hhdr;
/* Consider pointers that are offset bytes displaced from the beginning */
/* of an object to be valid. */
-void GC_register_displacement(offset)
-word offset;
+
+# if defined(__STDC__) || defined(__cplusplus)
+ void GC_register_displacement(GC_word offset)
+# else
+ void GC_register_displacement(offset)
+ GC_word offset;
+# endif
{
# ifndef ALL_INTERIOR_POINTERS
DCL_LOCK_STATE;
diff --git a/os_dep.c b/os_dep.c
index 89932bd8..5d5d1861 100644
--- a/os_dep.c
+++ b/os_dep.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
@@ -10,17 +10,43 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:10 pm PDT */
-# if !defined(OS2) && !defined(PCR) && !defined(AMIGA)
+/* Boehm, October 3, 1995 6:39 pm PDT */
+
+# include "gc_priv.h"
+# ifdef LINUX
+ /* Ugly hack to get struct sigcontext_struct definition. Required */
+ /* for some early 1.3.X releases. Will hopefully go away soon. */
+ /* in some later Linux releases, asm/sigcontext.h may have to */
+ /* be included instead. */
+# define __KERNEL__
+# include <asm/signal.h>
+# undef __KERNEL__
+# endif
+# if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS)
# include <sys/types.h>
# endif
-# include "gc_priv.h"
# include <stdio.h>
# include <signal.h>
/* Blatantly OS dependent routines, except for those that are related */
/* dynamic loading. */
+# if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
+# define NEED_FIND_LIMIT
+# endif
+
+# if defined(SUNOS4) & defined(DYNAMIC_LOADING)
+# define NEED_FIND_LIMIT
+# endif
+
+# if defined(SVR4) || defined(AUX) || defined(DGUX)
+# define NEED_FIND_LIMIT
+# endif
+
+#ifdef NEED_FIND_LIMIT
+# include <setjmp.h>
+#endif
+
#ifdef FREEBSD
# include <machine/trap.h>
#endif
@@ -38,10 +64,23 @@
# include <windows.h>
#endif
+#ifdef MACOS
+# include <Processes.h>
+#endif
+
#ifdef IRIX5
# include <sys/uio.h>
#endif
+#ifdef SUNOS5SIGS
+# include <sys/siginfo.h>
+# undef setjmp
+# undef longjmp
+# define setjmp(env) sigsetjmp(env, 1)
+# define longjmp(env, val) siglongjmp(env, val)
+# define jmp_buf sigjmp_buf
+#endif
+
#ifdef PCR
# include "il/PCR_IL.h"
# include "th/PCR_ThCtl.h"
@@ -108,13 +147,6 @@ struct o32_obj {
# else /* IBM's compiler */
-# define INCL_DOSEXCEPTIONS
-# define INCL_DOSPROCESS
-# define INCL_DOSERRORS
-# define INCL_DOSMODULEMGR
-# define INCL_DOSMEMMGR
-# include <os2.h>
-
/* A kludge to get around what appears to be a header file bug */
# ifndef WORD
# define WORD unsigned short
@@ -129,6 +161,14 @@ struct o32_obj {
# endif /* __IBMC__ */
+# define INCL_DOSEXCEPTIONS
+# define INCL_DOSPROCESS
+# define INCL_DOSERRORS
+# define INCL_DOSMODULEMGR
+# define INCL_DOSMEMMGR
+# include <os2.h>
+
+
/* Disable and enable signals during nontrivial allocations */
void GC_disable_signals(void)
@@ -150,7 +190,8 @@ void GC_enable_signals(void)
# else
-# if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32)
+# if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
+ && !defined(MACOS) && !defined(DJGPP)
# ifdef sigmask
/* Use the traditional BSD interface */
@@ -235,7 +276,7 @@ void GC_enable_signals()
/* Get the page size. */
word GC_page_size = 0;
-word GC_get_page_size()
+word GC_getpagesize()
{
SYSTEM_INFO sysinfo;
@@ -275,7 +316,7 @@ ptr_t GC_get_stack_base()
{
int dummy;
ptr_t sp = (ptr_t)(&dummy);
- ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_get_page_size() - 1));
+ ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_getpagesize() - 1));
word size = GC_get_writable_length(trunc_sp, 0);
return(trunc_sp + size);
@@ -329,18 +370,11 @@ ptr_t GC_get_stack_base()
# else
-# if !defined(THREADS) && !defined(STACKBOTTOM) && defined(HEURISTIC2)
-# define NEED_FIND_LIMIT
-# endif
-# if defined(SUNOS4) & defined(DYNAMIC_LOADING)
-# define NEED_FIND_LIMIT
-# endif
# ifdef NEED_FIND_LIMIT
/* Some tools to implement HEURISTIC2 */
# define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
-# include <setjmp.h>
/* static */ jmp_buf GC_jmp_buf;
/*ARGSUSED*/
@@ -356,6 +390,47 @@ ptr_t GC_get_stack_base()
typedef void (*handler)();
# endif
+# ifdef SUNOS5SIGS
+ static struct sigaction oldact;
+# else
+ static handler old_segv_handler, old_bus_handler;
+# endif
+
+ GC_setup_temporary_fault_handler()
+ {
+# ifdef SUNOS5SIGS
+ struct sigaction act;
+
+ act.sa_handler = GC_fault_handler;
+ act.sa_flags = SA_RESTART | SA_SIGINFO | SA_NODEFER;
+ /* The presence of SA_NODEFER represents yet another gross */
+ /* hack. Under Solaris 2.3, siglongjmp doesn't appear to */
+ /* interact correctly with -lthread. We hide the confusion */
+ /* by making sure that signal handling doesn't affect the */
+ /* signal mask. */
+
+ (void) sigemptyset(&act.sa_mask);
+ (void) sigaction(SIGSEGV, &act, &oldact);
+# else
+ old_segv_handler = signal(SIGSEGV, GC_fault_handler);
+# ifdef SIGBUS
+ old_bus_handler = signal(SIGBUS, GC_fault_handler);
+# endif
+# endif
+ }
+
+ GC_reset_fault_handler()
+ {
+# ifdef SUNOS5SIGS
+ (void) sigaction(SIGSEGV, &oldact, 0);
+# else
+ (void) signal(SIGSEGV, old_segv_handler);
+# ifdef SIGBUS
+ (void) signal(SIGBUS, old_bus_handler);
+# endif
+# endif
+ }
+
/* Return the first nonaddressible location > p (up) or */
/* the smallest location q s.t. [q,p] is addressible (!up). */
ptr_t GC_find_limit(p, up)
@@ -368,13 +443,8 @@ ptr_t GC_get_stack_base()
/* static since it's only called once, with the */
/* allocation lock held. */
- static handler old_segv_handler, old_bus_handler;
- /* See above for static declaration. */
- old_segv_handler = signal(SIGSEGV, GC_fault_handler);
-# ifdef SIGBUS
- old_bus_handler = signal(SIGBUS, GC_fault_handler);
-# endif
+ GC_setup_temporary_fault_handler();
if (setjmp(GC_jmp_buf) == 0) {
result = (ptr_t)(((word)(p))
& ~(MIN_PAGE_SIZE-1));
@@ -387,10 +457,7 @@ ptr_t GC_get_stack_base()
GC_noop(*result);
}
}
- (void) signal(SIGSEGV, old_segv_handler);
-# ifdef SIGBUS
- (void) signal(SIGBUS, old_bus_handler);
-# endif
+ GC_reset_fault_handler();
if (!up) {
result += MIN_PAGE_SIZE;
}
@@ -404,7 +471,7 @@ ptr_t GC_get_stack_base()
word dummy;
ptr_t result;
-# define STACKBOTTOM_ALIGNMENT_M1 0xffffff
+# define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
# ifdef STACKBOTTOM
return(STACKBOTTOM);
@@ -422,9 +489,22 @@ ptr_t GC_get_stack_base()
# ifdef HEURISTIC2
# ifdef STACK_GROWS_DOWN
result = GC_find_limit((ptr_t)(&dummy), TRUE);
+# ifdef HEURISTIC2_LIMIT
+ if (result > HEURISTIC2_LIMIT
+ && (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
+ result = HEURISTIC2_LIMIT;
+ }
+# endif
# else
result = GC_find_limit((ptr_t)(&dummy), FALSE);
+# ifdef HEURISTIC2_LIMIT
+ if (result < HEURISTIC2_LIMIT
+ && (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
+ result = HEURISTIC2_LIMIT;
+ }
+# endif
# endif
+
# endif /* HEURISTIC2 */
return(result);
# endif /* STACKBOTTOM */
@@ -528,7 +608,7 @@ void GC_register_data_segments()
GC_err_printf0("Object with invalid pages?\n");
continue;
}
- GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg));
+ GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
}
}
@@ -544,9 +624,17 @@ void GC_register_data_segments()
/* We rgister the main data segment here. */
bool GC_win32s = FALSE; /* We're running under win32s. */
+ bool GC_is_win32s()
+ {
+ DWORD v = GetVersion();
+
+ /* Check that this is not NT, and Windows major version <= 3 */
+ return ((v & 0x80000000) && (v & 0xff) <= 3);
+ }
+
void GC_init_win32()
{
- if (GetVersion() & 0x80000000) GC_win32s = TRUE;
+ GC_win32s = GC_is_win32s();
}
/* Return the smallest address a such that VirtualQuery */
@@ -563,12 +651,12 @@ void GC_register_data_segments()
GetSystemInfo(&sysinfo);
limit = sysinfo.lpMinimumApplicationAddress;
- p = (ptr_t)((word)start & ~(GC_get_page_size() - 1));
+ p = (ptr_t)((word)start & ~(GC_getpagesize() - 1));
for (;;) {
- q = (LPVOID)(p - GC_get_page_size());
+ q = (LPVOID)(p - GC_getpagesize());
if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
result = VirtualQuery(q, &buf, sizeof(buf));
- if (result != sizeof(buf)) break;
+ if (result != sizeof(buf) || buf.AllocationBase == 0) break;
p = (ptr_t)(buf.AllocationBase);
}
return(p);
@@ -578,19 +666,23 @@ void GC_register_data_segments()
/* heap sections? */
bool GC_is_heap_base (ptr_t p)
{
- static ptr_t malloc_heap_pointer = 0;
+
register unsigned i;
- register DWORD result;
- if (malloc_heap_pointer = 0) {
- MEMORY_BASIC_INFORMATION buf;
- result = VirtualQuery(malloc(1), &buf, sizeof(buf));
- if (result != sizeof(buf)) {
- ABORT("Weird VirtualQuery result");
- }
- malloc_heap_pointer = (ptr_t)(buf.AllocationBase);
- }
- if (p == malloc_heap_pointer) return(TRUE);
+# ifndef REDIRECT_MALLOC
+ static ptr_t malloc_heap_pointer = 0;
+
+ if (0 == malloc_heap_pointer) {
+ MEMORY_BASIC_INFORMATION buf;
+ register DWORD result = VirtualQuery(malloc(1), &buf, sizeof(buf));
+
+ if (result != sizeof(buf)) {
+ ABORT("Weird VirtualQuery result");
+ }
+ malloc_heap_pointer = (ptr_t)(buf.AllocationBase);
+ }
+ if (p == malloc_heap_pointer) return(TRUE);
+# endif
for (i = 0; i < GC_n_heap_bases; i++) {
if (GC_heap_bases[i] == p) return(TRUE);
}
@@ -612,7 +704,8 @@ void GC_register_data_segments()
GetSystemInfo(&sysinfo);
while (p < sysinfo.lpMaximumApplicationAddress) {
result = VirtualQuery(p, &buf, sizeof(buf));
- if (result != sizeof(buf) || GC_is_heap_base(buf.AllocationBase)) break;
+ if (result != sizeof(buf) || buf.AllocationBase == 0
+ || GC_is_heap_base(buf.AllocationBase)) break;
new_limit = (char *)p + buf.RegionSize;
protect = buf.Protect;
if (buf.State == MEM_COMMIT
@@ -620,7 +713,7 @@ void GC_register_data_segments()
if ((char *)p == limit) {
limit = new_limit;
} else {
- if (base != limit) GC_add_roots_inner(base, limit);
+ if (base != limit) GC_add_roots_inner(base, limit, FALSE);
base = p;
limit = new_limit;
}
@@ -628,7 +721,7 @@ void GC_register_data_segments()
if (p > (LPVOID)new_limit /* overflow */) break;
p = (LPVOID)new_limit;
}
- if (base != limit) GC_add_roots_inner(base, limit);
+ if (base != limit) GC_add_roots_inner(base, limit, FALSE);
}
void GC_register_data_segments()
@@ -670,25 +763,89 @@ void GC_register_data_segments()
for (data = (ULONG *)BADDR(myseglist); data != 0;
data = (ULONG *)BADDR(data[0])) {
- GC_add_roots_inner((char *)&data[1], ((char *)&data[1]) + data[-1]);
+# ifdef AMIGA_SKIP_SEG
+ if (((ULONG) GC_register_data_segments < (ULONG) &data[1]) ||
+ ((ULONG) GC_register_data_segments > (ULONG) &data[1] + data[-1])) {
+# else
+ {
+# endif /* AMIGA_SKIP_SEG */
+ GC_add_roots_inner((char *)&data[1],
+ ((char *)&data[1]) + data[-1], FALSE);
+ }
}
}
# else
+# if defined(SVR4) || defined(AUX) || defined(DGUX)
+char * GC_SysVGetDataStart(max_page_size, etext_addr)
+int max_page_size;
+int * etext_addr;
+{
+ word text_end = ((word)(etext_addr) + sizeof(word) - 1)
+ & ~(sizeof(word) - 1);
+ /* etext rounded to word boundary */
+ word next_page = ((text_end + (word)max_page_size - 1)
+ & ~((word)max_page_size - 1));
+ word page_offset = (text_end & ((word)max_page_size - 1));
+ VOLATILE char * result = (char *)(next_page + page_offset);
+ /* Note that this isnt equivalent to just adding */
+ /* max_page_size to &etext if &etext is at a page boundary */
+
+ GC_setup_temporary_fault_handler();
+ if (setjmp(GC_jmp_buf) == 0) {
+ /* Try writing to the address. */
+ *result = *result;
+ } else {
+ /* We got here via a longjmp. The address is not readable. */
+ /* This is known to happen under Solaris 2.4 + gcc, which place */
+ /* string constants in the text segment, but after etext. */
+ /* Use plan B. Note that we now know there is a gap between */
+ /* text and data segments, so plan A bought us something. */
+ result = (char *)GC_find_limit((ptr_t)(DATAEND) - MIN_PAGE_SIZE, FALSE);
+ }
+ GC_reset_fault_handler();
+ return((char *)result);
+}
+# endif
+
+
void GC_register_data_segments()
{
-# ifndef NEXT
- extern int end;
-# endif
-
-# if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT)
- GC_add_roots_inner(DATASTART, (char *)(&end));
+# if !defined(PCR) && !defined(SRC_M3) && !defined(NEXT) && !defined(MACOS)
+# if defined(REDIRECT_MALLOC) && defined(SOLARIS_THREADS)
+ /* As of Solaris 2.3, the Solaris threads implementation */
+ /* allocates the data structure for the initial thread with */
+ /* sbrk at process startup. It needs to be scanned, so that */
+ /* we don't lose some malloc allocated data structures */
+ /* hanging from it. We're on thin ice here ... */
+ extern caddr_t sbrk();
+
+ GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
+# else
+ GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
+# endif
# endif
# if !defined(PCR) && defined(NEXT)
- GC_add_roots_inner(DATASTART, (char *) get_end());
+ GC_add_roots_inner(DATASTART, (char *) get_end(), FALSE);
# endif
+# if defined(MACOS)
+ {
+# if defined(THINK_C)
+ extern void* GC_MacGetDataStart(void);
+ /* globals begin above stack and end at a5. */
+ GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
+ (ptr_t)LMGetCurrentA5(), FALSE);
+# else
+# if defined(__MWERKS__)
+ extern long __datastart, __dataend;
+ GC_add_roots_inner((ptr_t)&__datastart, (ptr_t)&__dataend, FALSE);
+# endif
+# endif
+ }
+# endif /* MACOS */
+
/* Dynamic libraries are added at every collection, since they may */
/* change. */
}
@@ -701,7 +858,8 @@ void GC_register_data_segments()
* Auxiliary routines for obtaining memory from OS.
*/
-# if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32)
+# if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
+ && !defined(MSWIN32) && !defined(MACOS)
extern caddr_t sbrk();
# ifdef __STDC__
@@ -757,7 +915,7 @@ word bytes;
# endif
-# ifdef __OS2__
+# ifdef OS2
void * os2_alloc(size_t bytes)
{
@@ -894,7 +1052,7 @@ extern void GC_push_finalizer_structures();
/* From stubborn.c: */
# ifdef STUBBORN_ALLOC
- extern extern_ptr_t * GC_changing_list_start;
+ extern GC_PTR * GC_changing_list_start;
# endif
@@ -953,7 +1111,7 @@ void (*GC_push_other_roots)() = GC_default_push_other_roots;
* or write only to the stack.
*/
-bool GC_dirty_maintained;
+bool GC_dirty_maintained = FALSE;
# ifdef DEFAULT_VDB
@@ -1036,9 +1194,42 @@ struct hblk *h;
* not to work under a number of other systems.
*/
-# include <sys/mman.h>
-# include <signal.h>
-# include <sys/syscall.h>
+# ifndef MSWIN32
+
+# include <sys/mman.h>
+# include <signal.h>
+# include <sys/syscall.h>
+
+# define PROTECT(addr, len) \
+ if (mprotect((caddr_t)(addr), (int)(len), \
+ PROT_READ | PROT_EXEC) < 0) { \
+ ABORT("mprotect failed"); \
+ }
+# define UNPROTECT(addr, len) \
+ if (mprotect((caddr_t)(addr), (int)(len), \
+ PROT_WRITE | PROT_READ | PROT_EXEC) < 0) { \
+ ABORT("un-mprotect failed"); \
+ }
+
+# else
+
+# include <signal.h>
+
+ static DWORD protect_junk;
+# define PROTECT(addr, len) \
+ if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
+ &protect_junk)) { \
+ DWORD last_error = GetLastError(); \
+ GC_printf1("Last error code: %lx\n", last_error); \
+ ABORT("VirtualProtect failed"); \
+ }
+# define UNPROTECT(addr, len) \
+ if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
+ &protect_junk)) { \
+ ABORT("un-VirtualProtect failed"); \
+ }
+
+# endif
VOLATILE page_hash_table GC_dirty_pages;
/* Pages dirtied since last GC_read_dirty. */
@@ -1048,7 +1239,7 @@ word GC_page_size;
bool GC_just_outside_heap(addr)
word addr;
{
- register int i;
+ register unsigned i;
register word start;
register word end;
word mask = GC_page_size-1;
@@ -1067,16 +1258,27 @@ word addr;
#if defined(SUNOS4) || defined(FREEBSD)
typedef void (* SIG_PF)();
#endif
-
-#if defined(ALPHA) /* OSF1 */
+#if defined(SUNOS5SIGS) || defined(ALPHA) /* OSF1 */ || defined(LINUX)
typedef void (* SIG_PF)(int);
#endif
+#if defined(MSWIN32)
+ typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
+# undef SIG_DFL
+# define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
+#endif
+
#if defined(IRIX5) || defined(ALPHA) /* OSF1 */
typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
#endif
+#if defined(SUNOS5SIGS)
+ typedef void (* REAL_SIG_PF)(int, struct siginfo *, void *);
+#endif
+#if defined(LINUX)
+ typedef void (* REAL_SIG_PF)(int, struct sigcontext_struct);
+# endif
SIG_PF GC_old_bus_handler;
-SIG_PF GC_old_segv_handler;
+SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
/*ARGSUSED*/
# if defined (SUNOS4) || defined(FREEBSD)
@@ -1106,14 +1308,45 @@ SIG_PF GC_old_segv_handler;
# define CODE_OK (code == EACCES)
# endif
# endif
+# if defined(LINUX)
+ void GC_write_fault_handler(int sig, struct sigcontext_struct sc)
+# define SIG_OK (sig == SIGSEGV)
+# define CODE_OK TRUE
+ /* Empirically c.trapno == 14, but is that useful? */
+ /* We assume Intel architecture, so alignment */
+ /* faults are not possible. */
+# endif
+# if defined(SUNOS5SIGS)
+ void GC_write_fault_handler(int sig, struct siginfo *scp, void * context)
+# define SIG_OK (sig == SIGSEGV)
+# define CODE_OK (scp -> si_code == SEGV_ACCERR)
+# endif
+# if defined(MSWIN32)
+ LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
+# define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
+ EXCEPTION_ACCESS_VIOLATION)
+# define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
+ /* Write fault */
+# endif
{
- register int i;
+ register unsigned i;
# ifdef IRIX5
char * addr = (char *) (scp -> sc_badvaddr);
# endif
# ifdef ALPHA
char * addr = (char *) (scp -> sc_traparg_a0);
# endif
+# ifdef SUNOS5SIGS
+ char * addr = (char *) (scp -> si_addr);
+# endif
+# ifdef LINUX
+ char * addr = (char *) (sc.cr2);
+# endif
+# if defined(MSWIN32)
+ char * addr = (char *) (exc_info -> ExceptionRecord
+ -> ExceptionInformation[1]);
+# define sig SIGSEGV
+# endif
if (SIG_OK && CODE_OK) {
register struct hblk * h =
@@ -1128,14 +1361,31 @@ SIG_PF GC_old_segv_handler;
old_handler = GC_old_bus_handler;
}
if (old_handler == SIG_DFL) {
- ABORT("Unexpected bus error or segmentation fault");
+# ifndef MSWIN32
+ ABORT("Unexpected bus error or segmentation fault");
+# else
+ return(EXCEPTION_CONTINUE_SEARCH);
+# endif
} else {
# if defined (SUNOS4) || defined(FREEBSD)
- (*old_handler) (sig, code, scp, addr);
-# else
- (*(REAL_SIG_PF)old_handler) (sig, code, scp);
+ (*old_handler) (sig, code, scp, addr);
+ return;
+# endif
+# if defined (SUNOS5SIGS)
+ (*(REAL_SIG_PF)old_handler) (sig, scp, context);
+ return;
+# endif
+# if defined (LINUX)
+ (*(REAL_SIG_PF)old_handler) (sig, sc);
+ return;
+# endif
+# if defined (IRIX5) || defined(ALPHA)
+ (*(REAL_SIG_PF)old_handler) (sig, code, scp);
+ return;
+# endif
+# ifdef MSWIN32
+ return((*old_handler)(exc_info));
# endif
- return;
}
}
for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
@@ -1143,17 +1393,18 @@ SIG_PF GC_old_segv_handler;
set_pht_entry_from_index(GC_dirty_pages, index);
}
- if (mprotect((caddr_t)h, (int)GC_page_size,
- PROT_WRITE | PROT_READ | PROT_EXEC) < 0) {
- ABORT("mprotect failed in handler");
- }
-# if defined(IRIX5) || defined(ALPHA)
- /* IRIX resets the signal handler each time. */
+ UNPROTECT(h, GC_page_size);
+# if defined(IRIX5) || defined(ALPHA) || defined(LINUX)
+ /* These reset the signal handler each time by default. */
signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
# endif
/* The write may not take place before dirty bits are read. */
/* But then we'll fault again ... */
- return;
+# ifdef MSWIN32
+ return(EXCEPTION_CONTINUE_EXECUTION);
+# else
+ return;
+# endif
}
ABORT("Unexpected bus error or segmentation fault");
@@ -1162,11 +1413,13 @@ SIG_PF GC_old_segv_handler;
void GC_write_hint(h)
struct hblk *h;
{
- register struct hblk * h_trunc =
- (struct hblk *)((word)h & ~(GC_page_size-1));
- register int i;
- register bool found_clean = FALSE;
+ register struct hblk * h_trunc;
+ register unsigned i;
+ register bool found_clean;
+ if (!GC_dirty_maintained) return;
+ h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
+ found_clean = FALSE;
for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
register int index = PHT_HASH(h_trunc+i);
@@ -1176,17 +1429,38 @@ struct hblk *h;
}
}
if (found_clean) {
- if (mprotect((caddr_t)h_trunc, (int)GC_page_size,
- PROT_WRITE | PROT_READ | PROT_EXEC) < 0) {
- ABORT("mprotect failed in GC_write_hint");
- }
+ UNPROTECT(h_trunc, GC_page_size);
}
}
+
+#if defined(SUNOS5) || defined(DRSNX)
+#include <unistd.h>
+int
+GC_getpagesize()
+{
+ return sysconf(_SC_PAGESIZE);
+}
+#else
+# ifdef MSWIN32
+ /* GC_getpagesize() defined above */
+# else
+# define GC_getpagesize() getpagesize()
+# endif
+#endif
void GC_dirty_init()
{
+#if defined(SUNOS5SIGS)
+ struct sigaction act, oldact;
+ act.sa_sigaction = GC_write_fault_handler;
+ act.sa_flags = SA_RESTART | SA_SIGINFO;
+ (void)sigemptyset(&act.sa_mask);
+#endif
+# ifdef PRINTSTATS
+ GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
+# endif
GC_dirty_maintained = TRUE;
- GC_page_size = getpagesize();
+ GC_page_size = GC_getpagesize();
if (GC_page_size % HBLKSIZE != 0) {
GC_err_printf0("Page size not multiple of HBLKSIZE\n");
ABORT("Page size not multiple of HBLKSIZE");
@@ -1195,7 +1469,7 @@ void GC_dirty_init()
GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
if (GC_old_bus_handler == SIG_IGN) {
GC_err_printf0("Previously ignored bus error!?");
- GC_old_bus_handler == SIG_DFL;
+ GC_old_bus_handler = SIG_DFL;
}
if (GC_old_bus_handler != SIG_DFL) {
# ifdef PRINTSTATS
@@ -1203,11 +1477,11 @@ void GC_dirty_init()
# endif
}
# endif
-# if defined(IRIX5) || defined(ALPHA) || defined(SUNOS4)
+# if defined(IRIX5) || defined(ALPHA) || defined(SUNOS4) || defined(LINUX)
GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
if (GC_old_segv_handler == SIG_IGN) {
GC_err_printf0("Previously ignored segmentation violation!?");
- GC_old_segv_handler == SIG_DFL;
+ GC_old_segv_handler = SIG_DFL;
}
if (GC_old_segv_handler != SIG_DFL) {
# ifdef PRINTSTATS
@@ -1215,6 +1489,33 @@ void GC_dirty_init()
# endif
}
# endif
+# if defined(SUNOS5SIGS)
+ sigaction(SIGSEGV, &act, &oldact);
+ if (oldact.sa_flags & SA_SIGINFO) {
+ GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
+ } else {
+ GC_old_segv_handler = oldact.sa_handler;
+ }
+ if (GC_old_segv_handler == SIG_IGN) {
+ GC_err_printf0("Previously ignored segmentation violation!?");
+ GC_old_segv_handler = SIG_DFL;
+ }
+ if (GC_old_segv_handler != SIG_DFL) {
+# ifdef PRINTSTATS
+ GC_err_printf0("Replaced other SIGSEGV handler\n");
+# endif
+ }
+# endif
+# if defined(MSWIN32)
+ GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
+ if (GC_old_segv_handler != NULL) {
+# ifdef PRINTSTATS
+ GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
+# endif
+ } else {
+ GC_old_segv_handler = SIG_DFL;
+ }
+# endif
}
@@ -1226,16 +1527,14 @@ void GC_protect_heap()
ptr_t start;
word offset;
word len;
- int i;
+ unsigned i;
for (i = 0; i < GC_n_heap_sects; i++) {
offset = (word)(GC_heap_sects[i].hs_start) & pmask;
start = GC_heap_sects[i].hs_start - offset;
len = GC_heap_sects[i].hs_bytes + offset;
len += ps-1; len &= ~pmask;
- if (mprotect((caddr_t)start, (int)len, PROT_READ | PROT_EXEC) < 0) {
- ABORT("mprotect failed");
- }
+ PROTECT(start, len);
}
}
@@ -1246,9 +1545,9 @@ void GC_protect_heap()
void GC_read_dirty()
{
- BCOPY(GC_dirty_pages, GC_grungy_pages,
+ BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
(sizeof GC_dirty_pages));
- BZERO(GC_dirty_pages, (sizeof GC_dirty_pages));
+ BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
GC_protect_heap();
}
@@ -1300,14 +1599,11 @@ word len;
set_pht_entry_from_index(GC_dirty_pages, index);
}
- if (mprotect((caddr_t)start_block,
- (int)((ptr_t)end_block - (ptr_t)start_block)
- + HBLKSIZE,
- PROT_WRITE | PROT_READ | PROT_EXEC) < 0) {
- ABORT("mprotect failed in GC_unprotect_range");
- }
+ UNPROTECT(start_block,
+ ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
}
+#ifndef MSWIN32
/* Replacement for UNIX system call. */
/* Other calls that write to the heap */
/* should be handled similarly. */
@@ -1345,6 +1641,7 @@ int nbyte;
GC_end_syscall();
return(result);
}
+#endif /* !MSWIN32 */
/*ARGSUSED*/
bool GC_page_was_ever_dirty(h)
@@ -1384,7 +1681,8 @@ word n;
#include <sys/stat.h>
#include <fcntl.h>
-#define BUFSZ 20000
+#define INITIAL_BUF_SZ 4096
+word GC_proc_buf_size = INITIAL_BUF_SZ;
char *GC_proc_buf;
page_hash_table GC_written_pages = { 0 }; /* Pages ever dirtied */
@@ -1438,11 +1736,11 @@ void GC_dirty_init()
if (fd < 0) {
ABORT("/proc open failed");
}
- GC_proc_fd = ioctl(fd, PIOCOPENPD, 0);
+ GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
if (GC_proc_fd < 0) {
ABORT("/proc ioctl failed");
}
- GC_proc_buf = GC_scratch_alloc(BUFSZ);
+ GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
# ifdef SOLARIS_THREADS
GC_fresh_pages = (struct hblk **)
GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
@@ -1461,6 +1759,12 @@ struct hblk *h;
{
}
+#ifdef SOLARIS_THREADS
+# define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
+#else
+# define READ(fd,buf,nbytes) read(fd, buf, nbytes)
+#endif
+
void GC_read_dirty()
{
unsigned long ps, np;
@@ -1470,12 +1774,36 @@ void GC_read_dirty()
char * bufp;
ptr_t current_addr, limit;
int i;
+int dummy;
BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
bufp = GC_proc_buf;
- if (read(GC_proc_fd, bufp, BUFSZ) <= 0) {
- ABORT("/proc read failed: BUFSZ too small?\n");
+ if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
+# ifdef PRINTSTATS
+ GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
+ GC_proc_buf_size);
+# endif
+ {
+ /* Retry with larger buffer. */
+ word new_size = 2 * GC_proc_buf_size;
+ char * new_buf = GC_scratch_alloc(new_size);
+
+ if (new_buf != 0) {
+ GC_proc_buf = bufp = new_buf;
+ GC_proc_buf_size = new_size;
+ }
+ if (syscall(SYS_read, GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
+ WARN("Insufficient space for /proc read\n", 0);
+ /* Punt: */
+ memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
+# ifdef SOLARIS_THREADS
+ BZERO(GC_fresh_pages,
+ MAX_FRESH_PAGES * sizeof (struct hblk *));
+# endif
+ return;
+ }
+ }
}
/* Copy dirty bits into GC_grungy_pages */
nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
@@ -1524,6 +1852,8 @@ void GC_read_dirty()
# endif
}
+#undef READ
+
bool GC_page_was_dirty(h)
struct hblk *h;
{
@@ -1555,6 +1885,7 @@ struct hblk *h;
return(result);
}
+/* Caller holds allocation lock. */
void GC_is_fresh(h, n)
struct hblk *h;
word n;
@@ -1567,7 +1898,7 @@ word n;
if (GC_fresh_pages != 0) {
for (i = 0; i < n; i++) {
- PAGE_IS_FRESH(h + n);
+ ADD_FRESH_PAGE(h + i);
}
}
# endif
@@ -1640,6 +1971,70 @@ struct hblk *h;
# endif /* PCR_VDB */
+/*
+ * Call stack save code for debugging.
+ * Should probably be in mach_dep.c, but that requires reorganization.
+ */
+#if defined(SPARC)
+# if defined(SUNOS4)
+# include <machine/frame.h>
+# else
+# if defined (DRSNX)
+# include <sys/sparc/frame.h>
+# else
+# include <sys/frame.h>
+# endif
+# endif
+# if NARGS > 6
+ --> We only know how to to get the first 6 arguments
+# endif
+
+/* Fill in the pc and argument information for up to NFRAMES of my */
+/* callers. Ignore my frame and my callers frame. */
+void GC_save_callers (info)
+struct callinfo info[NFRAMES];
+{
+ struct frame *frame;
+ struct frame *fp;
+ int nframes = 0;
+ word GC_save_regs_in_stack();
+
+ frame = (struct frame *) GC_save_regs_in_stack ();
+
+ for (fp = frame -> fr_savfp; fp != 0 && nframes < NFRAMES;
+ fp = fp -> fr_savfp, nframes++) {
+ register int i;
+
+ info[nframes].ci_pc = fp->fr_savpc;
+ for (i = 0; i < NARGS; i++) {
+ info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
+ }
+ }
+ if (nframes < NFRAMES) info[nframes].ci_pc = 0;
+}
+
+#endif /* SPARC */
+
+#ifdef SAVE_CALL_CHAIN
+
+void GC_print_callers (info)
+struct callinfo info[NFRAMES];
+{
+ register int i,j;
+
+ GC_err_printf0("\tCall chain at allocation:\n");
+ for (i = 0; i < NFRAMES; i++) {
+ if (info[i].ci_pc == 0) break;
+ GC_err_printf0("\t\targs: ");
+ for (j = 0; j < NARGS; j++) {
+ if (j != 0) GC_err_printf0(", ");
+ GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
+ ~(info[i].ci_arg[j]));
+ }
+ GC_err_printf1("\n\t\t##PC##= 0x%X\n", info[i].ci_pc);
+ }
+}
+#endif /* SAVE_CALL_CHAIN */
diff --git a/pc_excludes b/pc_excludes
index 6f1465fa..52da4311 100644
--- a/pc_excludes
+++ b/pc_excludes
@@ -7,10 +7,9 @@ alpha_mach_dep.s
sparc_mach_dep.s
PCR-Makefile
setjmp_t.c
-SMakefile.amiga
-SCoptions.amiga
-README.amiga
callprocs
gc.man
pc_excludes
barrett_diagram
+include/gc_c++.h
+include/gc_inline.h \ No newline at end of file
diff --git a/pcr_interface.c b/pcr_interface.c
index 0985c8f8..f4a11789 100644
--- a/pcr_interface.c
+++ b/pcr_interface.c
@@ -10,7 +10,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 1:59 pm PDT */
+/* Boehm, April 14, 1995 3:10 pm PDT */
# include "gc_priv.h"
# ifdef PCR
@@ -20,9 +20,12 @@
* We wrap all of the allocator functions to avoid questions of
* compatibility between the prototyped and nonprototyped versions of the f
*/
+# include "config/PCR_StdTypes.h"
# include "mm/PCR_MM.h"
+# include <errno.h>
# define MY_MAGIC 17L
+# define MY_DEBUGMAGIC 42L
void * GC_AllocProc(size_t size, PCR_Bool ptrFree, PCR_Bool clear )
{
@@ -35,9 +38,26 @@ void * GC_AllocProc(size_t size, PCR_Bool ptrFree, PCR_Bool clear )
}
}
+void * GC_DebugAllocProc(size_t size, PCR_Bool ptrFree, PCR_Bool clear )
+{
+ if (ptrFree) {
+ void * result = (void *)GC_debug_malloc_atomic(size, __FILE__,
+ __LINE__);
+ if (clear && result != 0) BZERO(result, size);
+ return(result);
+ } else {
+ return((void *)GC_debug_malloc(size, __FILE__, __LINE__));
+ }
+}
+
# define GC_ReallocProc GC_realloc
+void * GC_DebugReallocProc(void * old_object, size_t new_size_in_bytes)
+{
+ return(GC_debug_realloc(old_object, new_size_in_bytes, __FILE__, __LINE__));
+}
# define GC_FreeProc GC_free
+# define GC_DebugFreeProc GC_debug_free
typedef struct {
PCR_ERes (*ed_proc)(void *p, size_t size, PCR_Any data);
@@ -107,8 +127,47 @@ struct PCR_MM_ProcsRep GC_Rep = {
GC_DummyShutdownProc /* mmp_shutdown */
};
+struct PCR_MM_ProcsRep GC_DebugRep = {
+ MY_DEBUGMAGIC,
+ GC_DebugAllocProc,
+ GC_DebugReallocProc,
+ GC_DummyFreeProc, /* mmp_free */
+ GC_DebugFreeProc, /* mmp_unsafeFree */
+ GC_EnumerateProc,
+ GC_DummyShutdownProc /* mmp_shutdown */
+};
+
+bool GC_use_debug = 0;
+
void GC_pcr_install()
{
- PCR_MM_Install(&GC_Rep, &GC_old_allocator);
+ PCR_MM_Install((GC_use_debug? &GC_DebugRep : &GC_Rep), &GC_old_allocator);
+}
+
+PCR_ERes
+PCR_GC_Setup(void)
+{
+ return PCR_ERes_okay;
}
+
+PCR_ERes
+PCR_GC_Run(void)
+{
+
+ if( !PCR_Base_TestPCRArg("-nogc") ) {
+ GC_quiet = ( PCR_Base_TestPCRArg("-gctrace") ? 0 : 1 );
+ GC_use_debug = (bool)PCR_Base_TestPCRArg("-debug_alloc");
+ GC_init();
+ if( !PCR_Base_TestPCRArg("-nogc_incremental") ) {
+ /*
+ * awful hack to test whether VD is implemented ...
+ */
+ if( PCR_VD_Start( 0, NIL, 0) != PCR_ERes_FromErr(ENOSYS) ) {
+ GC_enable_incremental();
+ }
+ }
+ }
+ return PCR_ERes_okay;
+}
+
# endif
diff --git a/ptr_chck.c b/ptr_chck.c
new file mode 100644
index 00000000..4d2cc8b9
--- /dev/null
+++ b/ptr_chck.c
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+/* Boehm, September 19, 1995 1:26 pm PDT */
+
+#include "gc_priv.h"
+#include "gc_mark.h"
+
+void GC_default_same_obj_print_proc(p,q)
+ptr_t p, q;
+{
+ GC_err_printf2("0x%lx and 0x%lx are not in the same object\n",
+ (unsigned long)p, (unsigned long)q);
+ ABORT("GC_same_obj test failed");
+}
+
+void (*GC_same_obj_print_proc)() = GC_default_same_obj_print_proc;
+
+/* Check that p and q point to the same object. Call */
+/* *GC_same_obj_print_proc if they don't. */
+/* Returns the first argument. (Return value may be hard */
+/* to use,due to typing issues. But if we had a suitable */
+/* preprocessor ...) */
+/* Succeeds if neither p nor q points to the heap. */
+/* We assume this is performance critical. (It shouldn't */
+/* be called by production code, but this can easily make */
+/* debugging intolerably slow.) */
+#ifdef __STDC__
+ GC_PTR GC_same_obj(register void *p, register void *q)
+#else
+ GC_PTR GC_same_obj(p, q)
+ register char *p, *q;
+#endif
+{
+ register struct hblk *h;
+ register hdr *hhdr;
+ register ptr_t base, limit;
+ register word sz;
+
+ if (!GC_is_initialized) GC_init();
+ hhdr = HDR((word)p);
+ if (hhdr == 0) {
+ if (divHBLKSZ((word)p) != divHBLKSZ((word)q)
+ && HDR((word)q) != 0) {
+ goto fail;
+ }
+ return(p);
+ }
+ /* If it's a pointer to the middle of a large object, move it */
+ /* to the beginning. */
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ h = HBLKPTR(p) - (word)hhdr;
+ hhdr = HDR(h);
+ while (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ h = FORWARDED_ADDR(h, hhdr);
+ hhdr = HDR(h);
+ }
+ limit = (ptr_t)((word *)h + HDR_WORDS + hhdr -> hb_sz);
+ if ((ptr_t)p >= limit || (ptr_t)q >= limit || (ptr_t)q < (ptr_t)h ) {
+ goto fail;
+ }
+ return(p);
+ }
+ sz = WORDS_TO_BYTES(hhdr -> hb_sz);
+ if (sz > WORDS_TO_BYTES(MAXOBJSZ)) {
+ base = (ptr_t)HBLKPTR(p);
+ limit = base + sz;
+ if ((ptr_t)p >= limit) {
+ goto fail;
+ }
+ } else {
+# ifdef ALL_INTERIOR_POINTERS
+ register map_entry_type map_entry;
+ register int pdispl;
+
+ pdispl = HBLKDISPL(p);
+ map_entry = MAP_ENTRY((hhdr -> hb_map), pdispl);
+ if (map_entry == OBJ_INVALID) {
+ goto fail;
+ } else {
+ base = (char *)((word)p & ~(WORDS_TO_BYTES(1) - 1));
+ base -= WORDS_TO_BYTES(map_entry);
+ }
+# else
+ register int offset = HBLKDISPL(p) - HDR_BYTES;
+ register word correction = offset % sz;
+
+ if (HBLKPTR(p) != HBLKPTR(q)) {
+ /* The following computation otherwise fails in this case */
+ goto fail;
+ }
+ base = (ptr_t)p - correction;
+# endif
+ limit = base + sz;
+ }
+ /* [base, limit) delimits the object containing p, if any. */
+ /* If p is not inside a valid object, then either q is */
+ /* also outside any valid object, or it is outside */
+ /* [base, limit). */
+ if ((ptr_t)q >= limit || (ptr_t)q < base) {
+ goto fail;
+ }
+ return(p);
+fail:
+ (*GC_same_obj_print_proc)((ptr_t)p, (ptr_t)q);
+ return(p);
+}
+
+
+void GC_default_is_valid_displacement_print_proc(p)
+ptr_t p;
+{
+ GC_err_printf1("0x%lx does not point to valid object displacement\n",
+ (unsigned long)p);
+ ABORT("GC_is_valid_displacement test failed");
+}
+
+void (*GC_is_valid_displacement_print_proc)() =
+ GC_default_is_valid_displacement_print_proc;
+
+/* Check that if p is a pointer to a heap page, then it points to */
+/* a valid displacement within a heap object. */
+/* Uninteresting with ALL_INTERIOR_POINTERS. */
+/* Always returns its argument. */
+/* Note that we don't lock, since nothing relevant about the header */
+/* should change while we have a valid object pointer to the block. */
+#ifdef __STDC__
+ void * GC_is_valid_displacement(void *p)
+#else
+ char *GC_is_valid_displacement(p)
+ char *p;
+#endif
+{
+ register hdr *hhdr;
+ register word pdispl;
+ register struct hblk *h;
+ register map_entry_type map_entry;
+ register word sz;
+
+ if (!GC_is_initialized) GC_init();
+ hhdr = HDR((word)p);
+ if (hhdr == 0) return(p);
+ h = HBLKPTR(p);
+# ifdef ALL_INTERIOR_POINTERS
+ while (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ h = FORWARDED_ADDR(h, hhdr);
+ hhdr = HDR(h);
+ }
+# endif
+ if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
+ goto fail;
+ }
+ sz = WORDS_TO_BYTES(hhdr -> hb_sz);
+ pdispl = HBLKDISPL(p);
+ map_entry = MAP_ENTRY((hhdr -> hb_map), pdispl);
+ if (map_entry == OBJ_INVALID
+ || sz > MAXOBJSZ && (ptr_t)p >= (ptr_t)h + sz) {
+ goto fail;
+ }
+ return(p);
+fail:
+ (*GC_is_valid_displacement_print_proc)((ptr_t)p);
+ return(p);
+}
+
+
+void GC_default_is_visible_print_proc(p)
+ptr_t p;
+{
+ GC_err_printf1("0x%lx is not a GC visible pointer location\n",
+ (unsigned long)p);
+ ABORT("GC_is_visible test failed");
+}
+
+void (*GC_is_visible_print_proc)() =
+ GC_default_is_visible_print_proc;
+
+/* Could p be a stack address? */
+bool GC_on_stack(p)
+ptr_t p;
+{
+# ifdef THREADS
+ return(TRUE);
+# else
+ int dummy;
+# ifdef STACK_GROWS_DOWN
+ if ((ptr_t)p >= (ptr_t)(&dummy) && (ptr_t)p < GC_stackbottom ) {
+ return(TRUE);
+ }
+# else
+ if ((ptr_t)p <= (ptr_t)(&dummy) && (ptr_t)p > GC_stackbottom ) {
+ return(TRUE);
+ }
+# endif
+ return(FALSE);
+# endif
+}
+
+/* Check that p is visible */
+/* to the collector as a possibly pointer containing location. */
+/* If it isn't invoke *GC_is_visible_print_proc. */
+/* Returns the argument in all cases. May erroneously succeed */
+/* in hard cases. (This is intended for debugging use with */
+/* untyped allocations. The idea is that it should be possible, though */
+/* slow, to add such a call to all indirect pointer stores.) */
+/* Currently useless for multithreaded worlds. */
+#ifdef __STDC__
+ void * GC_is_visible(void *p)
+#else
+ char *GC_is_visible(p)
+ char *p;
+#endif
+{
+ register hdr *hhdr;
+
+ if ((word)p & (ALIGNMENT - 1)) goto fail;
+ if (!GC_is_initialized) GC_init();
+# ifdef THREADS
+ hhdr = HDR((word)p);
+ if (hhdr != 0 && GC_base(p) == 0) {
+ goto fail;
+ } else {
+ /* May be inside thread stack. We can't do much. */
+ return(p);
+ }
+# else
+ /* Check stack first: */
+ if (GC_on_stack(p)) return(p);
+ hhdr = HDR((word)p);
+ if (hhdr == 0) {
+ bool result;
+
+ if (GC_is_static_root(p)) return(p);
+ /* Else do it again correctly: */
+# if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(PCR)) \
+ && !defined(SRC_M3)
+ DISABLE_SIGNALS();
+ GC_register_dynamic_libraries();
+ result = GC_is_static_root(p);
+ ENABLE_SIGNALS();
+ if (result) return(p);
+# endif
+ goto fail;
+ } else {
+ /* p points to the heap. */
+ word descr;
+ ptr_t base = GC_base(p); /* Should be manually inlined? */
+
+ if (base == 0) goto fail;
+ if (HBLKPTR(base) != HBLKPTR(p)) hhdr = HDR((word)p);
+ descr = hhdr -> hb_descr;
+ retry:
+ switch(descr & DS_TAGS) {
+ case DS_LENGTH:
+ if ((word)((ptr_t)p - (ptr_t)base) > (word)descr) goto fail;
+ break;
+ case DS_BITMAP:
+ if ((ptr_t)p - (ptr_t)base
+ >= WORDS_TO_BYTES(BITMAP_BITS)
+ || ((word)p & (sizeof(word) - 1))) goto fail;
+ if (!((1 << (WORDSZ - ((ptr_t)p - (ptr_t)base) - 1))
+ & descr)) goto fail;
+ break;
+ case DS_PROC:
+ /* We could try to decipher this partially. */
+ /* For now we just punt. */
+ break;
+ case DS_PER_OBJECT:
+ descr = *(word *)((ptr_t)base + (descr & ~DS_TAGS));
+ goto retry;
+ }
+ return(p);
+ }
+# endif
+fail:
+ (*GC_is_visible_print_proc)((ptr_t)p);
+ return(p);
+}
+
+
+GC_PTR GC_pre_incr (p, how_much)
+GC_PTR *p;
+size_t how_much;
+{
+ GC_PTR initial = *p;
+ GC_PTR result = GC_same_obj((GC_PTR)((word)initial + how_much), initial);
+
+# ifndef ALL_INTERIOR_POINTERS
+ (void) GC_is_valid_displacement(result);
+# endif
+ return (*p = result);
+}
+
+GC_PTR GC_post_incr (p, how_much)
+GC_PTR *p;
+size_t how_much;
+{
+ GC_PTR initial = *p;
+ GC_PTR result = GC_same_obj((GC_PTR)((word)initial + how_much), initial);
+
+# ifndef ALL_INTERIOR_POINTERS
+ (void) GC_is_valid_displacement(result);
+# endif
+ *p = result;
+ return(initial);
+}
diff --git a/reclaim.c b/reclaim.c
index 004cbf1d..7dbd1349 100644
--- a/reclaim.c
+++ b/reclaim.c
@@ -11,7 +11,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:00 pm PDT */
+/* Boehm, April 18, 1995 1:59 pm PDT */
#include <stdio.h>
#include "gc_priv.h"
@@ -532,6 +532,7 @@ word abort_if_found; /* Abort if a reclaimable object is found */
}
}
+#if !defined(NO_DEBUGGING)
/* Routines to gather and print heap block info */
/* intended for debugging. Otherwise should be called */
/* with lock. */
@@ -593,6 +594,8 @@ void GC_print_block_list()
(unsigned long)total_bytes);
}
+#endif /* NO_DEBUGGING */
+
/*
* Do the same thing on the entire heap, after first clearing small object
* free lists (if we are not just looking for leaks).
@@ -606,9 +609,11 @@ int abort_if_found; /* Abort if a GC_reclaimable object is found */
for (kind = 0; kind < GC_n_kinds; kind++) {
register ptr_t *fop;
register ptr_t *lim;
- register struct hblk ** hbpp;
- register struct hblk ** hlim;
-
+ register struct hblk ** rlp;
+ register struct hblk ** rlim;
+ register struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
+
+ if (rlist == 0) continue; /* This kind not used. */
if (!abort_if_found) {
lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJSZ+1]);
for( fop = GC_obj_kinds[kind].ok_freelist; fop < lim; fop++ ) {
@@ -616,10 +621,9 @@ int abort_if_found; /* Abort if a GC_reclaimable object is found */
}
} /* otherwise free list objects are marked, */
/* and its safe to leave them */
- hlim = &(GC_obj_kinds[kind].ok_reclaim_list[MAXOBJSZ+1]);
- for( hbpp = GC_obj_kinds[kind].ok_reclaim_list;
- hbpp < hlim; hbpp++ ) {
- *hbpp = 0;
+ rlim = rlist + MAXOBJSZ+1;
+ for( rlp = rlist; rlp < rlim; rlp++ ) {
+ *rlp = 0;
}
}
@@ -646,10 +650,11 @@ int kind;
register hdr * hhdr;
register struct hblk * hbp;
register struct obj_kind * ok = &(GC_obj_kinds[kind]);
- struct hblk ** rlh = &(ok -> ok_reclaim_list[sz]);
+ struct hblk ** rlh = ok -> ok_reclaim_list;
ptr_t *flh = &(ok -> ok_freelist[sz]);
-
+ if (rlh == 0) return; /* No blocks of this kind. */
+ rlh += sz;
while ((hbp = *rlh) != 0) {
hhdr = HDR(hbp);
*rlh = hhdr -> hb_next;
@@ -663,7 +668,8 @@ int kind;
* Clear lists of blocks waiting to be reclaimed.
* Must be done before clearing mark bits with the world running,
* since otherwise a subsequent reclamation of block would see
- * the wrong mark bits.
+ * the wrong mark bits. (Alternatively, GC_reclaim_all
+ * may be used.)
* SHOULD PROBABLY BE INCREMENTAL
*/
void GC_reclaim_or_delete_all()
@@ -673,6 +679,7 @@ void GC_reclaim_or_delete_all()
register hdr * hhdr;
register struct hblk * hbp;
register struct obj_kind * ok;
+ struct hblk ** rlp;
struct hblk ** rlh;
# ifdef PRINTTIMES
CLOCK_TYPE start_time;
@@ -683,8 +690,10 @@ void GC_reclaim_or_delete_all()
for (kind = 0; kind < GC_n_kinds; kind++) {
ok = &(GC_obj_kinds[kind]);
+ rlp = ok -> ok_reclaim_list;
+ if (rlp == 0) continue;
for (sz = 1; sz <= MAXOBJSZ; sz++) {
- rlh = &(ok -> ok_reclaim_list[sz]);
+ rlh = rlp + sz;
while ((hbp = *rlh) != 0) {
hhdr = HDR(hbp);
*rlh = hhdr -> hb_next;
@@ -703,3 +712,48 @@ void GC_reclaim_or_delete_all()
MS_TIME_DIFF(done_time,start_time));
# endif
}
+
+/*
+ * Reclaim all small blocks waiting to be reclaimed.
+ * Abort and return FALSE when/if (*stop_func)() returns TRUE.
+ * If this returns TRUE, then it's safe to restart the world
+ * with incorrectly cleared mark bits.
+ */
+bool GC_reclaim_all(stop_func)
+GC_stop_func stop_func;
+{
+ register word sz;
+ register int kind;
+ register hdr * hhdr;
+ register struct hblk * hbp;
+ register struct obj_kind * ok;
+ struct hblk ** rlp;
+ struct hblk ** rlh;
+# ifdef PRINTTIMES
+ CLOCK_TYPE start_time;
+ CLOCK_TYPE done_time;
+
+ GET_TIME(start_time);
+# endif
+
+ for (kind = 0; kind < GC_n_kinds; kind++) {
+ ok = &(GC_obj_kinds[kind]);
+ rlp = ok -> ok_reclaim_list;
+ if (rlp == 0) continue;
+ for (sz = 1; sz <= MAXOBJSZ; sz++) {
+ rlh = rlp + sz;
+ while ((hbp = *rlh) != 0) {
+ if ((*stop_func)()) return(FALSE);
+ hhdr = HDR(hbp);
+ *rlh = hhdr -> hb_next;
+ GC_reclaim_small_nonempty_block(hbp, FALSE);
+ }
+ }
+ }
+# ifdef PRINTTIMES
+ GET_TIME(done_time);
+ GC_printf1("Disposing of reclaim lists took %lu msecs\n",
+ MS_TIME_DIFF(done_time,start_time));
+# endif
+ return(TRUE);
+}
diff --git a/setjmp_t.c b/setjmp_t.c
index 14dcd30c..e26f65fb 100644
--- a/setjmp_t.c
+++ b/setjmp_t.c
@@ -10,7 +10,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:01 pm PDT */
+/* Boehm, September 21, 1995 5:39 pm PDT */
/* Check whether setjmp actually saves registers in jmp_buf. */
/* If it doesn't, the generic mark_regs code won't work. */
@@ -24,14 +24,10 @@
/* code.) */
#include <stdio.h>
#include <setjmp.h>
+#include <string.h>
#include "config.h"
#ifdef __hpux
-/* X/OPEN PG3 defines "void* sbrk();" and this clashes with the definition */
-/* in gc_private.h, so we set the clock backwards with _CLASSIC_XOPEN_TYPES. */
-/* This is for HP-UX 8.0.
-/* sbrk() is not used in this file, of course. W. Underwood, 15 Jun 1992 */
-#define _CLASSIC_XOPEN_TYPES
#include <unistd.h>
int
getpagesize()
@@ -40,8 +36,7 @@ getpagesize()
}
#endif
-#if defined(SUNOS5)
-#define _CLASSIC_XOPEN_TYPES
+#if defined(SUNOS5) || defined(DRSNX)
#include <unistd.h>
int
getpagesize()
@@ -59,7 +54,7 @@ getpagesize()
}
#endif
-#ifdef AMIGA
+#if defined(AMIGA) || defined(MACOS)
int
getpagesize()
{
@@ -67,7 +62,7 @@ getpagesize()
}
#endif
-#ifdef __OS2__
+#ifdef OS2
#define INCL_DOSFILEMGR
#define INCL_DOSMISC
#define INCL_DOSERRORS
@@ -101,9 +96,10 @@ main()
int dummy;
long ps = getpagesize();
jmp_buf b;
- register int x = strlen("a"); /* 1, slightly disguised */
+ register int x = (int)strlen("a"); /* 1, slightly disguised */
static int y = 0;
+ printf("This appears to be a %s running %s\n", MACH_TYPE, OS_TYPE);
if (nested_sp() < &dummy) {
printf("Stack appears to grow down, which is the default.\n");
printf("A good guess for STACKBOTTOM on this machine is 0x%X.\n",
@@ -116,6 +112,7 @@ main()
}
printf("Note that this may vary between machines of ostensibly\n");
printf("the same architecture (e.g. Sun 3/50s and 3/80s).\n");
+ printf("On many machines the value is not fixed.\n");
printf("A good guess for ALIGNMENT on this machine is %d.\n",
(unsigned long)(&(a.a_b))-(unsigned long)(&a));
diff --git a/solaris_threads.c b/solaris_threads.c
index 94f461e8..08bf0131 100644
--- a/solaris_threads.c
+++ b/solaris_threads.c
@@ -14,29 +14,279 @@
* Support code for Solaris threads. Provides functionality we wish Sun
* had provided. Relies on some information we probably shouldn't rely on.
*/
-/* Boehm, May 19, 1994 2:05 pm PDT */
+/* Boehm, September 14, 1994 4:44 pm PDT */
# if defined(SOLARIS_THREADS)
# include "gc_priv.h"
# include <thread.h>
# include <synch.h>
+# include <signal.h>
+# include <fcntl.h>
# include <sys/types.h>
# include <sys/mman.h>
# include <sys/time.h>
# include <sys/resource.h>
+# include <sys/stat.h>
+# include <sys/syscall.h>
+# include <sys/procfs.h>
+# include <sys/lwp.h>
+# include <sys/reg.h>
# define _CLASSIC_XOPEN_TYPES
# include <unistd.h>
+# define MAX_LWPS 32
+
#undef thr_join
#undef thr_create
#undef thr_suspend
#undef thr_continue
-mutex_t GC_thr_lock; /* Acquired before allocation lock */
-cond_t GC_prom_join_cv; /* Broadcast whenany thread terminates */
+cond_t GC_prom_join_cv; /* Broadcast when any thread terminates */
cond_t GC_create_cv; /* Signalled when a new undetached */
/* thread starts. */
+
+
+/* We use the allocation lock to protect thread-related data structures. */
+
+/* We stop the world using /proc primitives. This makes some */
+/* minimal assumptions about the threads implementation. */
+/* We don't play by the rules, since the rules make this */
+/* impossible (as of Solaris 2.3). Also note that as of */
+/* Solaris 2.3 the various thread and lwp suspension */
+/* primitives failed to stop threads by the time the request */
+/* is completed. */
+
+
+static sigset_t old_mask;
+# define MAX_LWPS 32
+
+/* Sleep for n milliseconds, n < 1000 */
+void GC_msec_sleep(int n)
+{
+ struct timespec ts;
+
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1000000*n;
+ if (syscall(SYS_nanosleep, &ts, 0) < 0) {
+ ABORT("nanosleep failed");
+ }
+}
+/* Turn off preemption; gross but effective. */
+/* Caller has allocation lock. */
+/* Actually this is not needed under Solaris 2.3 and */
+/* 2.4, but hopefully that'll change. */
+void preempt_off()
+{
+ sigset_t set;
+
+ (void)sigfillset(&set);
+ syscall(SYS_sigprocmask, SIG_SETMASK, &set, &old_mask);
+}
+
+void preempt_on()
+{
+ syscall(SYS_sigprocmask, SIG_SETMASK, &old_mask, NULL);
+}
+
+int GC_main_proc_fd = -1;
+
+struct lwp_cache_entry {
+ lwpid_t lc_id;
+ int lc_descr; /* /proc file descriptor. */
+} GC_lwp_cache[MAX_LWPS];
+
+prgregset_t GC_lwp_registers[MAX_LWPS];
+
+/* Return a file descriptor for the /proc entry corresponding */
+/* to the given lwp. The file descriptor may be stale if the */
+/* lwp exited and a new one was forked. */
+static int open_lwp(lwpid_t id)
+{
+ int result;
+ static int next_victim = 0;
+ register int i;
+
+ for (i = 0; i < MAX_LWPS; i++) {
+ if (GC_lwp_cache[i].lc_id == id) return(GC_lwp_cache[i].lc_descr);
+ }
+ if ((result = syscall(SYS_ioctl, GC_main_proc_fd, PIOCOPENLWP, &id)) < 0) {
+ return(-1) /* exited? */;
+ }
+ if (GC_lwp_cache[next_victim].lc_id != 0)
+ (void)syscall(SYS_close, GC_lwp_cache[next_victim].lc_descr);
+ GC_lwp_cache[next_victim].lc_id = id;
+ GC_lwp_cache[next_victim].lc_descr = result;
+ next_victim++;
+ return(result);
+}
+
+static void uncache_lwp(lwpid_t id)
+{
+ register int i;
+
+ for (i = 0; i < MAX_LWPS; i++) {
+ if (GC_lwp_cache[i].lc_id == id) {
+ (void)syscall(SYS_close, GC_lwp_cache[id].lc_descr);
+ GC_lwp_cache[i].lc_id = 0;
+ break;
+ }
+ }
+}
+
+lwpid_t GC_current_ids[MAX_LWPS + 1]; /* Sequence of current lwp ids */
+
+/* Stop all lwps in process. Assumes preemption is off. */
+/* Caller has allocation lock (and any other locks he may */
+/* need). */
+static void stop_all_lwps()
+{
+ int lwp_fd;
+ char buf[30];
+ prstatus_t status;
+ lwpid_t last_ids[MAX_LWPS + 1];
+ register int i;
+ bool changed;
+ lwpid_t me = _lwp_self();
+
+ if (GC_main_proc_fd == -1) {
+ sprintf(buf, "/proc/%d", getpid());
+ GC_main_proc_fd = syscall(SYS_open, buf, O_RDONLY);
+ if (GC_main_proc_fd < 0) {
+ ABORT("/proc open failed");
+ }
+ }
+ if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCSTATUS, &status) < 0)
+ ABORT("Main PIOCSTATUS failed");
+ if (status.pr_nlwp < 1 || status.pr_nlwp > MAX_LWPS) {
+ ABORT("Too many lwps");
+ /* Only a heuristic. There seems to be no way to do this right, */
+ /* since there can be intervening forks. */
+ }
+ BZERO(GC_lwp_registers, sizeof GC_lwp_registers);
+ for (i = 0; i <= MAX_LWPS; i++) last_ids[i] = 0;
+ for (;;) {
+ if (syscall(SYS_ioctl, GC_main_proc_fd, PIOCLWPIDS, GC_current_ids) < 0) {
+ ABORT("PIOCLWPIDS failed");
+ }
+ changed = FALSE;
+ for (i = 0; GC_current_ids[i] != 0; i++) {
+ if (GC_current_ids[i] != last_ids[i]) {
+ changed = TRUE;
+ if (GC_current_ids[i] != me) {
+ /* PIOCSTOP doesn't work without a writable */
+ /* descriptor. And that makes the process */
+ /* undebuggable. */
+ if (_lwp_suspend(GC_current_ids[i]) < 0) {
+ /* Could happen if the lwp exited */
+ uncache_lwp(GC_current_ids[i]);
+ GC_current_ids[i] = me; /* ignore */
+ }
+ }
+ }
+ if (i >= MAX_LWPS) ABORT("Too many lwps");
+ }
+ /* All lwps in GC_current_ids != me have been suspended. Note */
+ /* that _lwp_suspend is idempotent. */
+ for (i = 0; GC_current_ids[i] != 0; i++) {
+ if (GC_current_ids[i] != last_ids[i]) {
+ if (GC_current_ids[i] != me) {
+ lwp_fd = open_lwp(GC_current_ids[i]);
+ /* LWP should be stopped. Empirically it sometimes */
+ /* isn't, and more frequently the PR_STOPPED flag */
+ /* is not set. Wait for PR_STOPPED. */
+ if (syscall(SYS_ioctl, lwp_fd,
+ PIOCSTATUS, &status) < 0) {
+ /* Possible if the descriptor was stale, or */
+ /* we encountered the 2.3 _lwp_suspend bug. */
+ uncache_lwp(GC_current_ids[i]);
+ GC_current_ids[i] = me; /* handle next time. */
+ } else {
+ while (!(status.pr_flags & PR_STOPPED)) {
+ GC_msec_sleep(1);
+ if (syscall(SYS_ioctl, lwp_fd,
+ PIOCSTATUS, &status) < 0) {
+ ABORT("Repeated PIOCSTATUS failed");
+ }
+ if (status.pr_flags & PR_STOPPED) break;
+
+ GC_msec_sleep(20);
+ if (syscall(SYS_ioctl, lwp_fd,
+ PIOCSTATUS, &status) < 0) {
+ ABORT("Repeated PIOCSTATUS failed");
+ }
+ }
+ if (status.pr_who != GC_current_ids[i]) {
+ ABORT("Wrong lwp");
+ }
+ /* Save registers where collector can */
+ /* find them. */
+ BCOPY(status.pr_reg, GC_lwp_registers[i],
+ sizeof (prgregset_t));
+ }
+ }
+ }
+ }
+ if (!changed) break;
+ for (i = 0; i <= MAX_LWPS; i++) last_ids[i] = GC_current_ids[i];
+ }
+}
+
+/* Restart all lwps in process. Assumes preemption is off. */
+static void restart_all_lwps()
+{
+ int lwp_fd;
+ register int i;
+ bool changed;
+ lwpid_t me = _lwp_self();
+# define PARANOID
+
+ for (i = 0; GC_current_ids[i] != 0; i++) {
+# ifdef PARANOID
+ if (GC_current_ids[i] != me) {
+ int lwp_fd = open_lwp(GC_current_ids[i]);
+ prstatus_t status;
+ gwindows_t windows;
+
+ if (lwp_fd < 0) ABORT("open_lwp failed");
+ if (syscall(SYS_ioctl, lwp_fd,
+ PIOCSTATUS, &status) < 0) {
+ ABORT("PIOCSTATUS failed in restart_all_lwps");
+ }
+ if (memcmp(status.pr_reg, GC_lwp_registers[i],
+ sizeof (prgregset_t)) != 0) {
+ ABORT("Register contents changed");
+ }
+ if (!status.pr_flags & PR_STOPPED) {
+ ABORT("lwp no longer stopped");
+ }
+ if (syscall(SYS_ioctl, lwp_fd,
+ PIOCGWIN, &windows) < 0) {
+ ABORT("PIOCSTATUS failed in restart_all_lwps");
+ }
+ if (windows.wbcnt > 0) ABORT("unsaved register windows");
+ }
+# endif /* PARANOID */
+ if (GC_current_ids[i] == me) continue;
+ if (_lwp_continue(GC_current_ids[i]) < 0) {
+ ABORT("Failed to restart lwp");
+ }
+ }
+ if (i >= MAX_LWPS) ABORT("Too many lwps");
+}
+
+
+void GC_stop_world()
+{
+ preempt_off();
+ stop_all_lwps();
+}
+
+void GC_start_world()
+{
+ restart_all_lwps();
+ preempt_on();
+}
bool GC_thr_initialized = FALSE;
@@ -44,6 +294,7 @@ size_t GC_min_stack_sz;
size_t GC_page_sz;
+
# define N_FREE_LISTS 25
ptr_t GC_stack_free_lists[N_FREE_LISTS] = { 0 };
/* GC_stack_free_lists[i] is free list for stacks of */
@@ -52,7 +303,7 @@ ptr_t GC_stack_free_lists[N_FREE_LISTS] = { 0 };
/* Return a stack of size at least *stack_size. *stack_size is */
/* replaced by the actual stack size. */
-/* Caller holds GC_thr_lock. */
+/* Caller holds allocation lock. */
ptr_t GC_stack_alloc(size_t * stack_size)
{
register size_t requested_sz = *stack_size;
@@ -75,7 +326,9 @@ ptr_t GC_stack_alloc(size_t * stack_size)
result = (ptr_t) GC_scratch_alloc(search_sz + 2*GC_page_sz);
result = (ptr_t)(((word)result + GC_page_sz) & ~(GC_page_sz - 1));
/* Protect hottest page to detect overflow. */
- mprotect(result, GC_page_sz, PROT_NONE);
+# ifdef SOLARIS23_MPROTECT_BUG_FIXED
+ mprotect(result, GC_page_sz, PROT_NONE);
+# endif
GC_is_fresh((struct hblk *)result, divHBLKSZ(search_sz));
result += GC_page_sz;
}
@@ -83,7 +336,7 @@ ptr_t GC_stack_alloc(size_t * stack_size)
return(result);
}
-/* Caller holds GC_thr_lock. */
+/* Caller holds allocationlock. */
void GC_stack_free(ptr_t stack, size_t size)
{
register int index = 0;
@@ -102,6 +355,7 @@ void GC_my_stack_limits();
/* Notify virtual dirty bit implementation that known empty parts of */
/* stacks do not contain useful data. */
+/* Caller holds allocation lock. */
void GC_old_stacks_are_fresh()
{
register int i;
@@ -153,8 +407,7 @@ typedef struct GC_Thread_Rep {
volatile GC_thread GC_threads[THREAD_TABLE_SZ];
/* Add a thread to GC_threads. We assume it wasn't already there. */
-/* Caller holds GC_thr_lock if there is > 1 thread. */
-/* Initial caller may hold allocation lock. */
+/* Caller holds allocation lock. */
GC_thread GC_new_thread(thread_t id)
{
int hv = ((word)id) % THREAD_TABLE_SZ;
@@ -167,7 +420,8 @@ GC_thread GC_new_thread(thread_t id)
first_thread_used = TRUE;
/* Dont acquire allocation lock, since we may already hold it. */
} else {
- result = GC_NEW(struct GC_Thread_Rep);
+ result = (struct GC_Thread_Rep *)
+ GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
}
if (result == 0) return(0);
result -> id = id;
@@ -180,7 +434,7 @@ GC_thread GC_new_thread(thread_t id)
/* Delete a thread from GC_threads. We assume it is there. */
/* (The code intentionally traps if it wasn't.) */
-/* Caller holds GC_thr_lock. */
+/* Caller holds allocation lock. */
void GC_delete_thread(thread_t id)
{
int hv = ((word)id) % THREAD_TABLE_SZ;
@@ -200,7 +454,7 @@ void GC_delete_thread(thread_t id)
/* Return the GC_thread correpsonding to a given thread_t. */
/* Returns 0 if it's not there. */
-/* Caller holds GC_thr_lock. */
+/* Caller holds allocation lock. */
GC_thread GC_lookup_thread(thread_t id)
{
int hv = ((word)id) % THREAD_TABLE_SZ;
@@ -211,6 +465,7 @@ GC_thread GC_lookup_thread(thread_t id)
}
/* Notify dirty bit implementation of unused parts of my stack. */
+/* Caller holds allocation lock. */
void GC_my_stack_limits()
{
int dummy;
@@ -238,46 +493,14 @@ void GC_my_stack_limits()
}
-/* Caller holds allocation lock. */
-void GC_stop_world()
-{
- thread_t my_thread = thr_self();
- register int i;
- register GC_thread p;
-
- for (i = 0; i < THREAD_TABLE_SZ; i++) {
- for (p = GC_threads[i]; p != 0; p = p -> next) {
- if (p -> id != my_thread && !(p -> flags & SUSPENDED)) {
- if (thr_suspend(p -> id) < 0) ABORT("thr_suspend failed");
- }
- }
- }
-}
-
-/* Caller holds allocation lock. */
-void GC_start_world()
-{
- thread_t my_thread = thr_self();
- register int i;
- register GC_thread p;
-
- for (i = 0; i < THREAD_TABLE_SZ; i++) {
- for (p = GC_threads[i]; p != 0; p = p -> next) {
- if (p -> id != my_thread && !(p -> flags & SUSPENDED)) {
- if (thr_continue(p -> id) < 0) ABORT("thr_continue failed");
- }
- }
- }
-}
-
+extern ptr_t GC_approx_sp();
+/* We hold allocation lock. We assume the world is stopped. */
void GC_push_all_stacks()
{
- /* We assume the world is stopped. */
register int i;
register GC_thread p;
- word dummy;
- register ptr_t sp = (ptr_t) (&dummy);
+ register ptr_t sp = GC_approx_sp();
register ptr_t bottom, top;
struct rlimit rl;
@@ -286,7 +509,7 @@ void GC_push_all_stacks()
GC_push_dirty((bottom), (top), GC_page_was_ever_dirty, \
GC_push_all_stack); \
} else { \
- GC_push_all((bottom), (top)); \
+ GC_push_all_stack((bottom), (top)); \
}
if (!GC_thr_initialized) GC_thr_init();
for (i = 0; i < THREAD_TABLE_SZ; i++) {
@@ -318,47 +541,47 @@ void * GC_thr_daemon(void * dummy)
for(;;) {
start:
result = thr_join((thread_t)0, &departed, &status);
- mutex_lock(&GC_thr_lock);
+ LOCK();
if (result != 0) {
/* No more threads; wait for create. */
for (i = 0; i < THREAD_TABLE_SZ; i++) {
for (t = GC_threads[i]; t != 0; t = t -> next) {
if (!(t -> flags & (DETACHED | FINISHED))) {
- mutex_unlock(&GC_thr_lock);
+ UNLOCK();
goto start; /* Thread started just before we */
/* acquired the lock. */
}
}
}
- cond_wait(&GC_create_cv, &GC_thr_lock);
- mutex_unlock(&GC_thr_lock);
- goto start;
- }
- t = GC_lookup_thread(departed);
- if (!(t -> flags & CLIENT_OWNS_STACK)) {
- GC_stack_free(t -> stack, t -> stack_size);
- }
- if (t -> flags & DETACHED) {
- GC_delete_thread(departed);
+ cond_wait(&GC_create_cv, &GC_allocate_ml);
+ UNLOCK();
} else {
- t -> status = status;
- t -> flags |= FINISHED;
- cond_signal(&(t -> join_cv));
- cond_broadcast(&GC_prom_join_cv);
+ t = GC_lookup_thread(departed);
+ if (!(t -> flags & CLIENT_OWNS_STACK)) {
+ GC_stack_free(t -> stack, t -> stack_size);
+ }
+ if (t -> flags & DETACHED) {
+ GC_delete_thread(departed);
+ } else {
+ t -> status = status;
+ t -> flags |= FINISHED;
+ cond_signal(&(t -> join_cv));
+ cond_broadcast(&GC_prom_join_cv);
+ }
+ UNLOCK();
}
- mutex_unlock(&GC_thr_lock);
}
}
+/* We hold the allocation lock. */
GC_thr_init()
{
GC_thread t;
- /* This gets called from the first thread creation, so */
- /* mutual exclusion is not an issue. */
+
GC_thr_initialized = TRUE;
- GC_min_stack_sz = ((thr_min_stack() + HBLKSIZE-1) & ~(HBLKSIZE - 1));
+ GC_min_stack_sz = ((thr_min_stack() + 128*1024 + HBLKSIZE-1)
+ & ~(HBLKSIZE - 1));
GC_page_sz = sysconf(_SC_PAGESIZE);
- mutex_init(&GC_thr_lock, USYNC_THREAD, 0);
cond_init(&GC_prom_join_cv, USYNC_THREAD, 0);
cond_init(&GC_create_cv, USYNC_THREAD, 0);
/* Add the initial thread, so we can stop it. */
@@ -375,12 +598,13 @@ GC_thr_init()
/* We acquire the allocation lock to prevent races with */
/* stopping/starting world. */
+/* This is no more correct than the underlying Solaris 2.X */
+/* implementation. Under 2.3 THIS IS BROKEN. */
int GC_thr_suspend(thread_t target_thread)
{
GC_thread t;
int result;
- mutex_lock(&GC_thr_lock);
LOCK();
result = thr_suspend(target_thread);
if (result == 0) {
@@ -389,7 +613,6 @@ int GC_thr_suspend(thread_t target_thread)
t -> flags |= SUSPENDED;
}
UNLOCK();
- mutex_unlock(&GC_thr_lock);
return(result);
}
@@ -398,7 +621,6 @@ int GC_thr_continue(thread_t target_thread)
GC_thread t;
int result;
- mutex_lock(&GC_thr_lock);
LOCK();
result = thr_continue(target_thread);
if (result == 0) {
@@ -407,7 +629,6 @@ int GC_thr_continue(thread_t target_thread)
t -> flags &= ~SUSPENDED;
}
UNLOCK();
- mutex_unlock(&GC_thr_lock);
return(result);
}
@@ -416,7 +637,7 @@ int GC_thr_join(thread_t wait_for, thread_t *departed, void **status)
register GC_thread t;
int result = 0;
- mutex_lock(&GC_thr_lock);
+ LOCK();
if (wait_for == 0) {
register int i;
register bool thread_exists;
@@ -437,7 +658,7 @@ int GC_thr_join(thread_t wait_for, thread_t *departed, void **status)
result = ESRCH;
goto out;
}
- cond_wait(&GC_prom_join_cv, &GC_thr_lock);
+ cond_wait(&GC_prom_join_cv, &GC_allocate_ml);
}
} else {
t = GC_lookup_thread(wait_for);
@@ -450,7 +671,7 @@ int GC_thr_join(thread_t wait_for, thread_t *departed, void **status)
goto out;
}
while (!(t -> flags & FINISHED)) {
- cond_wait(&(t -> join_cv), &GC_thr_lock);
+ cond_wait(&(t -> join_cv), &GC_allocate_ml);
}
}
@@ -460,7 +681,7 @@ int GC_thr_join(thread_t wait_for, thread_t *departed, void **status)
cond_destroy(&(t -> join_cv));
GC_delete_thread(t -> id);
out:
- mutex_unlock(&GC_thr_lock);
+ UNLOCK();
return(result);
}
@@ -476,13 +697,13 @@ GC_thr_create(void *stack_base, size_t stack_size,
word my_flags = 0;
void * stack = stack_base;
+ LOCK();
if (!GC_thr_initialized) GC_thr_init();
- mutex_lock(&GC_thr_lock);
if (stack == 0) {
if (stack_size == 0) stack_size = GC_min_stack_sz;
stack = (void *)GC_stack_alloc(&stack_size);
if (stack == 0) {
- mutex_unlock(&GC_thr_lock);
+ UNLOCK();
return(ENOMEM);
}
} else {
@@ -503,7 +724,7 @@ GC_thr_create(void *stack_base, size_t stack_size,
} else if (!(my_flags & CLIENT_OWNS_STACK)) {
GC_stack_free(stack, stack_size);
}
- mutex_unlock(&GC_thr_lock);
+ UNLOCK();
return(result);
}
@@ -514,3 +735,4 @@ GC_thr_create(void *stack_base, size_t stack_size,
#endif
# endif /* SOLARIS_THREADS */
+
diff --git a/stubborn.c b/stubborn.c
index e674977b..ab228fba 100644
--- a/stubborn.c
+++ b/stubborn.c
@@ -11,7 +11,7 @@
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
-/* Boehm, May 19, 1994 2:11 pm PDT */
+/* Boehm, July 31, 1995 5:02 pm PDT */
#include "gc_priv.h"
@@ -22,24 +22,24 @@
extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
#define GENERAL_MALLOC(lb,k) \
- (extern_ptr_t)GC_clear_stack(GC_generic_malloc((word)lb, k))
+ (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
/* Data structure representing immutable objects that */
/* are still being initialized. */
/* This is a bit baroque in order to avoid acquiring */
/* the lock twice for a typical allocation. */
-extern_ptr_t * GC_changing_list_start;
+GC_PTR * GC_changing_list_start;
# ifdef THREADS
- VOLATILE extern_ptr_t * VOLATILE GC_changing_list_current;
+ VOLATILE GC_PTR * VOLATILE GC_changing_list_current;
# else
- extern_ptr_t * GC_changing_list_current;
+ GC_PTR * GC_changing_list_current;
# endif
/* Points at last added element. Also (ab)used for */
/* synchronization. Updates and reads are assumed atomic. */
-extern_ptr_t * GC_changing_list_limit;
+GC_PTR * GC_changing_list_limit;
/* Points at the last word of the buffer, which is always 0 */
/* All entries in (GC_changing_list_current, */
/* GC_changing_list_limit] are 0 */
@@ -49,12 +49,12 @@ void GC_stubborn_init()
{
# define INIT_SIZE 10
- GC_changing_list_start = (extern_ptr_t *)
+ GC_changing_list_start = (GC_PTR *)
GC_generic_malloc_inner(
- (word)(INIT_SIZE * sizeof(extern_ptr_t)),
+ (word)(INIT_SIZE * sizeof(GC_PTR)),
PTRFREE);
BZERO(GC_changing_list_start,
- INIT_SIZE * sizeof(extern_ptr_t));
+ INIT_SIZE * sizeof(GC_PTR));
if (GC_changing_list_start == 0) {
GC_err_printf0("Insufficient space to start up\n");
ABORT("GC_stubborn_init: put of space");
@@ -75,24 +75,26 @@ void GC_stubborn_init()
/* Returns FALSE on failure. */
bool GC_compact_changing_list()
{
- register extern_ptr_t *p, *q;
+ register GC_PTR *p, *q;
register word count = 0;
- word old_size = GC_changing_list_limit-GC_changing_list_start+1;
+ word old_size = (char **)GC_changing_list_limit
+ - (char **)GC_changing_list_start+1;
+ /* The casts are needed as a workaround for an Amiga bug */
register word new_size = old_size;
- extern_ptr_t * new_list;
+ GC_PTR * new_list;
for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
if (*p != 0) count++;
}
if (2 * count > old_size) new_size = 2 * count;
- new_list = (extern_ptr_t *)
+ new_list = (GC_PTR *)
GC_generic_malloc_inner(
- new_size * sizeof(extern_ptr_t), PTRFREE);
+ new_size * sizeof(GC_PTR), PTRFREE);
/* PTRFREE is a lie. But we don't want the collector to */
/* consider these. We do want the list itself to be */
/* collectable. */
if (new_list == 0) return(FALSE);
- BZERO(new_list, new_size * sizeof(extern_ptr_t));
+ BZERO(new_list, new_size * sizeof(GC_PTR));
q = new_list;
for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
if (*p != 0) *q++ = *p;
@@ -118,7 +120,7 @@ bool GC_compact_changing_list()
*GC_changing_list_current = p;
void GC_change_stubborn(p)
-extern_ptr_t p;
+GC_PTR p;
{
DCL_LOCK_STATE;
@@ -130,12 +132,12 @@ extern_ptr_t p;
}
void GC_end_stubborn_change(p)
-extern_ptr_t p;
+GC_PTR p;
{
# ifdef THREADS
- register VOLATILE extern_ptr_t * my_current = GC_changing_list_current;
+ register VOLATILE GC_PTR * my_current = GC_changing_list_current;
# else
- register extern_ptr_t * my_current = GC_changing_list_current;
+ register GC_PTR * my_current = GC_changing_list_current;
# endif
register bool tried_quick;
DCL_LOCK_STATE;
@@ -182,9 +184,9 @@ extern_ptr_t p;
/* GC_end_stubborn_change(p) where p is the value */
/* returned by GC_malloc_stubborn. */
# ifdef __STDC__
- extern_ptr_t GC_malloc_stubborn(size_t lb)
+ GC_PTR GC_malloc_stubborn(size_t lb)
# else
- extern_ptr_t GC_malloc_stubborn(lb)
+ GC_PTR GC_malloc_stubborn(lb)
size_t lb;
# endif
{
@@ -198,7 +200,7 @@ DCL_LOCK_STATE;
# ifdef MERGE_SIZES
lw = GC_size_map[lb];
# else
- lw = ROUNDED_UP_WORDS(lb);
+ lw = ALIGNED_WORDS(lb);
# endif
opp = &(GC_sobjfreelist[lw]);
FASTLOCK();
@@ -210,12 +212,12 @@ DCL_LOCK_STATE;
*opp = obj_link(op);
obj_link(op) = 0;
GC_words_allocd += lw;
- result = (extern_ptr_t) op;
+ result = (GC_PTR) op;
ADD_CHANGING(result);
FASTUNLOCK();
- return((extern_ptr_t)result);
+ return((GC_PTR)result);
} else {
- result = (extern_ptr_t)
+ result = (GC_PTR)
GC_generic_malloc((word)lb, STUBBORN);
}
record:
@@ -224,7 +226,7 @@ record:
ADD_CHANGING(result);
UNLOCK();
ENABLE_SIGNALS();
- return((extern_ptr_t)GC_clear_stack(result));
+ return((GC_PTR)GC_clear_stack(result));
}
@@ -232,8 +234,8 @@ record:
/* Report pages on which stubborn objects were changed. */
void GC_read_changed()
{
- register extern_ptr_t * p = GC_changing_list_start;
- register extern_ptr_t q;
+ register GC_PTR * p = GC_changing_list_start;
+ register GC_PTR q;
register struct hblk * h;
register word index;
@@ -262,8 +264,8 @@ struct hblk * h;
/* called with mark bits consistent and lock held. */
void GC_clean_changing_list()
{
- register extern_ptr_t * p = GC_changing_list_start;
- register extern_ptr_t q;
+ register GC_PTR * p = GC_changing_list_start;
+ register GC_PTR q;
register ptr_t r;
register unsigned long count = 0;
register unsigned long dropped_count = 0;
@@ -290,9 +292,9 @@ void GC_clean_changing_list()
#else /* !STUBBORN_ALLOC */
# ifdef __STDC__
- extern_ptr_t GC_malloc_stubborn(size_t lb)
+ GC_PTR GC_malloc_stubborn(size_t lb)
# else
- extern_ptr_t GC_malloc_stubborn(lb)
+ GC_PTR GC_malloc_stubborn(lb)
size_t lb;
# endif
{
@@ -301,13 +303,13 @@ void GC_clean_changing_list()
/*ARGSUSED*/
void GC_end_stubborn_change(p)
-extern_ptr_t p;
+GC_PTR p;
{
}
/*ARGSUSED*/
void GC_change_stubborn(p)
-extern_ptr_t p;
+GC_PTR p;
{
}
diff --git a/test.c b/test.c
index 070d892e..035dad2b 100644
--- a/test.c
+++ b/test.c
@@ -5,14 +5,21 @@
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
- * Permission is hereby granted to copy this garbage collector for any purpose,
- * provided the above notices are retained on all copies.
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
*/
-/* Boehm, May 6, 1994 3:32 pm PDT */
+/* Boehm, September 21, 1995 5:43 pm PDT */
/* An incomplete test for the garbage collector. */
/* Some more obscure entry points are not tested at all. */
-# include <stdlib.h>
+# if defined(mips) && defined(SYSTYPE_BSD43)
+ /* MIPS RISCOS 4 */
+# else
+# include <stdlib.h>
+# endif
# include <stdio.h>
# include "gc.h"
# include "gc_typed.h"
@@ -54,17 +61,15 @@ struct SEXPR {
struct SEXPR * sexpr_cdr;
};
-# ifdef __STDC__
- typedef void * void_star;
-# else
- typedef char * void_star;
-# endif
typedef struct SEXPR * sexpr;
+# define INT_TO_SEXPR(x) ((sexpr)(unsigned long)(x))
+
extern sexpr cons();
-# define nil ((sexpr) 0)
+# undef nil
+# define nil (INT_TO_SEXPR(0))
# define car(x) ((x) -> sexpr_car)
# define cdr(x) ((x) -> sexpr_cdr)
# define is_nil(x) ((x) == nil)
@@ -139,7 +144,7 @@ sexpr y;
exit(1);
}
r -> sexpr_car = x;
- r -> sexpr_cdr = (sexpr) (~(unsigned long)y);
+ r -> sexpr_cdr = (sexpr)(~(unsigned long)y);
return(r);
}
@@ -166,7 +171,7 @@ int low, up;
if (low > up) {
return(nil);
} else {
- return(small_cons(small_cons((sexpr)low, (sexpr)0), ints(low+1, up)));
+ return(small_cons(small_cons(INT_TO_SEXPR(low), nil), ints(low+1, up)));
}
}
@@ -178,7 +183,7 @@ int low, up;
if (low > up) {
return(nil);
} else {
- return(small_cons_uncollectable(small_cons((sexpr)low, (sexpr)0),
+ return(small_cons_uncollectable(small_cons(INT_TO_SEXPR(low), nil),
uncollectable_ints(low+1, up)));
}
}
@@ -187,15 +192,15 @@ void check_ints(list, low, up)
sexpr list;
int low, up;
{
- if ((int)(car(car(list))) != low) {
+ if ((int)(GC_word)(car(car(list))) != low) {
(void)GC_printf0(
"List reversal produced incorrect list - collector is broken\n");
- exit(1);
+ FAIL;
}
if (low == up) {
if (cdr(list) != nil) {
(void)GC_printf0("List too long - collector is broken\n");
- exit(1);
+ FAIL;
}
} else {
check_ints(cdr(list), low+1, up);
@@ -208,15 +213,15 @@ void check_uncollectable_ints(list, low, up)
sexpr list;
int low, up;
{
- if ((int)(car(car(list))) != low) {
+ if ((int)(GC_word)(car(car(list))) != low) {
(void)GC_printf0(
"Uncollectable list corrupted - collector is broken\n");
- exit(1);
+ FAIL;
}
if (low == up) {
if (UNCOLLECTABLE_CDR(list) != nil) {
(void)GC_printf0("Uncollectable ist too long - collector is broken\n");
- exit(1);
+ FAIL;
}
} else {
check_uncollectable_ints(UNCOLLECTABLE_CDR(list), low+1, up);
@@ -258,40 +263,67 @@ void reverse_test()
sexpr c;
sexpr d;
sexpr e;
-# if defined(MSWIN32)
+ sexpr *f, *g, *h;
+# if defined(MSWIN32) || defined(MACOS)
/* Win32S only allows 128K stacks */
# define BIG 1000
# else
-# define BIG 4500
+# if defined PCR
+ /* PCR default stack is 100K. Stack frames are up to 120 bytes. */
+# define BIG 700
+# else
+# define BIG 4500
+# endif
# endif
+ A.dummy = 17;
a = ints(1, 49);
b = ints(1, 50);
c = ints(1, BIG);
d = uncollectable_ints(1, 100);
e = uncollectable_ints(1, 1);
+ /* Check that realloc updates object descriptors correctly */
+ f = (sexpr *)GC_malloc(4 * sizeof(sexpr));
+ f = (sexpr *)GC_realloc((GC_PTR)f, 6 * sizeof(sexpr));
+ f[5] = ints(1,17);
+ g = (sexpr *)GC_malloc(513 * sizeof(sexpr));
+ g = (sexpr *)GC_realloc((GC_PTR)g, 800 * sizeof(sexpr));
+ g[799] = ints(1,18);
+ h = (sexpr *)GC_malloc(1025 * sizeof(sexpr));
+ h = (sexpr *)GC_realloc((GC_PTR)h, 2000 * sizeof(sexpr));
+ h[1999] = ints(1,19);
+ /* Try to force some collections and reuse of small list elements */
+ for (i = 0; i < 10; i++) {
+ (void)ints(1, BIG);
+ }
/* Superficially test interior pointer recognition on stack */
c = (sexpr)((char *)c + sizeof(char *));
d = (sexpr)((char *)d + sizeof(char *));
+
# ifdef __STDC__
GC_FREE((void *)e);
# else
GC_FREE((char *)e);
# endif
+ check_ints(b,1,50);
+ check_ints(a,1,49);
for (i = 0; i < 50; i++) {
+ check_ints(b,1,50);
b = reverse(reverse(b));
}
check_ints(b,1,50);
+ check_ints(a,1,49);
for (i = 0; i < 60; i++) {
/* This maintains the invariant that a always points to a list of */
- /* 49 integers. Thus this is thread safe without locks. */
+ /* 49 integers. Thus this is thread safe without locks, */
+ /* assuming atomic pointer assignments. */
a = reverse(reverse(a));
# if !defined(AT_END) && !defined(THREADS)
/* This is not thread safe, since realloc explicitly deallocates */
if (i & 1) {
- a = (sexpr)GC_REALLOC((void_star)a, 500);
+ a = (sexpr)GC_REALLOC((GC_PTR)a, 500);
} else {
- a = (sexpr)GC_REALLOC((void_star)a, 8200);
+ a = (sexpr)GC_REALLOC((GC_PTR)a, 8200);
}
# endif
}
@@ -301,7 +333,13 @@ void reverse_test()
d = (sexpr)((char *)d - sizeof(char *));
check_ints(c,1,BIG);
check_uncollectable_ints(d, 1, 100);
- a = b = c = 0;
+ check_ints(f[5], 1,17);
+ check_ints(g[799], 1,18);
+ check_ints(h[1999], 1,19);
+# ifndef THREADS
+ a = 0;
+# endif
+ b = c = 0;
}
/*
@@ -316,7 +354,7 @@ typedef struct treenode {
int finalizable_count = 0;
int finalized_count = 0;
-int dropped_something = 0;
+VOLATILE int dropped_something = 0;
# ifdef __STDC__
void finalizer(void * obj, void * client_data)
@@ -335,7 +373,7 @@ int dropped_something = 0;
static mutex_t incr_lock;
mutex_lock(&incr_lock);
# endif
- if ((int)client_data != t -> level) {
+ if ((int)(GC_word)client_data != t -> level) {
(void)GC_printf0("Wrong finalization data - collector is broken\n");
FAIL;
}
@@ -351,7 +389,14 @@ int dropped_something = 0;
size_t counter = 0;
# define MAX_FINALIZED 8000
-GC_FAR GC_word live_indicators[MAX_FINALIZED] = {0};
+
+# if !defined(MACOS)
+ GC_FAR GC_word live_indicators[MAX_FINALIZED] = {0};
+#else
+ /* Too big for THINK_C. have to allocate it dynamically. */
+ GC_word *live_indicators = 0;
+#endif
+
int live_indicators_count = 0;
tn * mktree(n)
@@ -359,6 +404,16 @@ int n;
{
tn * result = (tn *)GC_MALLOC(sizeof(tn));
+#if defined(MACOS)
+ /* get around static data limitations. */
+ if (!live_indicators)
+ live_indicators =
+ (GC_word*)NewPtrClear(MAX_FINALIZED * sizeof(GC_word));
+ if (!live_indicators) {
+ (void)GC_printf0("Out of memory\n");
+ exit(1);
+ }
+#endif
if (n == 0) return(0);
if (result == 0) {
(void)GC_printf0("Out of memory\n");
@@ -395,24 +450,28 @@ int n;
# endif
}
- GC_REGISTER_FINALIZER((void_star)result, finalizer, (void_star)n,
- (GC_finalization_proc *)0, (void_star *)0);
+ GC_REGISTER_FINALIZER((GC_PTR)result, finalizer, (GC_PTR)(GC_word)n,
+ (GC_finalization_proc *)0, (GC_PTR *)0);
+ if (my_index >= MAX_FINALIZED) {
+ GC_printf0("live_indicators overflowed\n");
+ FAIL;
+ }
live_indicators[my_index] = 13;
- if (GC_general_register_disappearing_link(
- (void_star *)(&(live_indicators[my_index])),
- (void_star)result) != 0) {
+ if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
+ (GC_PTR *)(&(live_indicators[my_index])),
+ (GC_PTR)result) != 0) {
GC_printf0("GC_general_register_disappearing_link failed\n");
FAIL;
}
if (GC_unregister_disappearing_link(
- (void_star *)
+ (GC_PTR *)
(&(live_indicators[my_index]))) == 0) {
GC_printf0("GC_unregister_disappearing_link failed\n");
FAIL;
}
- if (GC_general_register_disappearing_link(
- (void_star *)(&(live_indicators[my_index])),
- (void_star)result) != 0) {
+ if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
+ (GC_PTR *)(&(live_indicators[my_index])),
+ (GC_PTR)result) != 0) {
GC_printf0("GC_general_register_disappearing_link failed 2\n");
FAIL;
}
@@ -489,22 +548,29 @@ int n;
}
}
+# if defined(THREADS) && defined(GC_DEBUG)
+# define TREE_HEIGHT 15
+# else
+# define TREE_HEIGHT 16
+# endif
void tree_test()
{
tn * root;
register int i;
- root = mktree(16);
+ root = mktree(TREE_HEIGHT);
alloc_small(5000000);
- chktree(root, 16);
+ chktree(root, TREE_HEIGHT);
if (finalized_count && ! dropped_something) {
(void)GC_printf0("Premature finalization - collector is broken\n");
FAIL;
}
dropped_something = 1;
- root = mktree(16);
- chktree(root, 16);
- for (i = 16; i >= 0; i--) {
+ GC_noop(root); /* Root needs to remain live until */
+ /* dropped_something is set. */
+ root = mktree(TREE_HEIGHT);
+ chktree(root, TREE_HEIGHT);
+ for (i = TREE_HEIGHT; i >= 0; i--) {
root = mktree(i);
chktree(root, i);
}
@@ -571,18 +637,105 @@ void typed_test()
}
}
+int fail_count = 0;
+
+/*ARGSUSED*/
+void fail_proc(x)
+ptr_t x;
+{
+ fail_count++;
+}
+
+extern void (*GC_is_valid_displacement_print_proc)();
+
+extern void (*GC_is_visible_print_proc)();
+
+#ifdef THREADS
+# define TEST_FAIL_COUNT(n) 1
+#else
+# define TEST_FAIL_COUNT(n) (fail_count >= (n))
+#endif
+
void run_one_test()
{
+ char *x;
+# ifdef LINT
+ char *y = 0;
+# else
+ char *y = (char *)fail_proc;
+# endif
DCL_LOCK_STATE;
-# ifndef GC_DEBUG
- if (GC_size(GC_MALLOC(7)) != 8
- || GC_size(GC_MALLOC(15)) != 16) {
+# ifdef FIND_LEAK
+ (void)GC_printf0(
+ "This test program is not designed for leak detection mode\n");
+ (void)GC_printf0("Expect lots of problems.\n");
+# endif
+ if (GC_size(GC_malloc(7)) != 8
+ || GC_size(GC_malloc(15)) != 16) {
(void)GC_printf0("GC_size produced unexpected results\n");
FAIL;
- }
+ }
+ if (GC_size(GC_malloc(0)) != 4 && GC_size(GC_malloc(0)) != 8) {
+ (void)GC_printf0("GC_malloc(0) failed\n");
+ FAIL;
+ }
+ if (GC_size(GC_malloc_uncollectable(0)) != 4
+ && GC_size(GC_malloc_uncollectable(0)) != 8) {
+ (void)GC_printf0("GC_malloc_uncollectable(0) failed\n");
+ FAIL;
+ }
+ GC_is_valid_displacement_print_proc = fail_proc;
+ GC_is_visible_print_proc = fail_proc;
+ x = GC_malloc(16);
+ if (GC_base(x + 13) != x) {
+ (void)GC_printf0("GC_base(heap ptr) produced incorrect result\n");
+ FAIL;
+ }
+# ifndef PCR
+ if (GC_base(y) != 0) {
+ (void)GC_printf0("GC_base(fn_ptr) produced incorrect result\n");
+ FAIL;
+ }
# endif
- reverse_test();
+ if (GC_same_obj(x+5, x) != x + 5) {
+ (void)GC_printf0("GC_same_obj produced incorrect result\n");
+ FAIL;
+ }
+ if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
+ (void)GC_printf0("GC_is_visible produced incorrect result\n");
+ FAIL;
+ }
+ if (!TEST_FAIL_COUNT(1)) {
+# ifndef RS6000
+ /* ON RS6000s function pointers point to a descriptor in the */
+ /* data segment, so there should have been no failures. */
+ (void)GC_printf0("GC_is_visible produced wrong failure indication\n");
+ FAIL;
+# endif
+ }
+ if (GC_is_valid_displacement(y) != y
+ || GC_is_valid_displacement(x) != x
+ || GC_is_valid_displacement(x + 3) != x + 3) {
+ (void)GC_printf0(
+ "GC_is_valid_displacement produced incorrect result\n");
+ FAIL;
+ }
+# ifndef ALL_INTERIOR_POINTERS
+# ifdef RS6000
+ if (!TEST_FAIL_COUNT(1)) {
+# else
+ if (!TEST_FAIL_COUNT(2)) {
+# endif
+ (void)GC_printf0("GC_is_valid_displacement produced wrong failure indication\n");
+ FAIL;
+ }
+# endif
+ /* Test floating point alignment */
+ *(double *)GC_MALLOC(sizeof(double)) = 1.0;
+ *(double *)GC_MALLOC(sizeof(double)) = 1.0;
+ /* Repeated list reversal test. */
+ reverse_test();
# ifdef PRINTSTATS
GC_printf0("-------------Finished reverse_test\n");
# endif
@@ -606,7 +759,7 @@ void check_heap_stats()
if (sizeof(char *) > 4) {
max_heap_sz = 13000000;
} else {
- max_heap_sz = 10000000;
+ max_heap_sz = 11000000;
}
# ifdef GC_DEBUG
max_heap_sz *= 2;
@@ -616,6 +769,7 @@ void check_heap_stats()
# endif
/* Garbage collect repeatedly so that all inaccessible objects */
/* can be finalized. */
+ while (GC_collect_a_little()) { }
for (i = 0; i < 16; i++) {
GC_gcollect();
}
@@ -659,6 +813,37 @@ void check_heap_stats()
(void)GC_printf0("Collector appears to work\n");
}
+#if defined(MACOS)
+void SetMinimumStack(long minSize)
+{
+ long newApplLimit;
+
+ if (minSize > LMGetDefltStack())
+ {
+ newApplLimit = (long) GetApplLimit()
+ - (minSize - LMGetDefltStack());
+ SetApplLimit((Ptr) newApplLimit);
+ MaxApplZone();
+ }
+}
+
+#define cMinStackSpace (512L * 1024L)
+
+#endif
+
+#ifdef __STDC__
+ void warn_proc(char *msg, GC_word p)
+#else
+ void warn_proc(msg, p)
+ char *msg;
+ GC_word p;
+#endif
+{
+ GC_printf1(msg, (unsigned long)p);
+ FAIL;
+}
+
+
#if !defined(PCR) && !defined(SOLARIS_THREADS) || defined(LINT)
#ifdef MSWIN32
int APIENTRY WinMain(HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int n)
@@ -667,6 +852,15 @@ void check_heap_stats()
#endif
{
n_tests = 0;
+
+# if defined(MACOS)
+ /* Make sure we have lots and lots of stack space. */
+ SetMinimumStack(cMinStackSpace);
+ /* Cheat and let stdio initialize toolbox for us. */
+ printf("Testing GC Macintosh port.\n");
+# endif
+ GC_INIT(); /* Only needed if gc is dynamic library. */
+ (void) GC_set_warn_proc(warn_proc);
# if defined(MPROTECT_VDB) || defined(PROC_VDB)
GC_enable_incremental();
(void) GC_printf0("Switched to incremental mode\n");
@@ -685,12 +879,16 @@ void check_heap_stats()
/* This is a bit SunOS4 specific. */
GC_noop(GC_expand_hp, GC_add_roots, GC_clear_roots,
GC_register_disappearing_link,
+ GC_register_finalizer_ignore_self,
+ GC_debug_register_displacement,
GC_print_obj, GC_debug_change_stubborn,
GC_debug_end_stubborn_change, GC_debug_malloc_uncollectable,
GC_debug_free, GC_debug_realloc, GC_generic_malloc_words_small,
GC_init, GC_make_closure, GC_debug_invoke_finalizer,
GC_page_was_ever_dirty, GC_is_fresh,
- GC_malloc_ignore_off_page);
+ GC_malloc_ignore_off_page, GC_malloc_atomic_ignore_off_page,
+ GC_set_max_heap_size, GC_get_bytes_since_gc,
+ GC_pre_incr, GC_post_incr);
# endif
return(0);
}
@@ -704,7 +902,8 @@ test()
int code;
n_tests = 0;
- GC_enable_incremental();
+ /* GC_enable_incremental(); */
+ (void) GC_set_warn_proc(warn_proc);
th1 = PCR_Th_Fork(run_one_test, 0);
th2 = PCR_Th_Fork(run_one_test, 0);
run_one_test();
@@ -728,6 +927,11 @@ void * thr_run_one_test(void * arg)
run_one_test();
return(0);
}
+
+#ifdef GC_DEBUG
+# define GC_free GC_debug_free
+#endif
+
main()
{
thread_t th1;
@@ -735,7 +939,9 @@ main()
int code;
n_tests = 0;
+ GC_INIT(); /* Only needed if gc is dynamic library. */
GC_enable_incremental();
+ (void) GC_set_warn_proc(warn_proc);
if (thr_keycreate(&fl_key, GC_free) != 0) {
(void)GC_printf1("Key creation failed %lu\n", (unsigned long)code);
FAIL;
diff --git a/test_cpp.cc b/test_cpp.cc
new file mode 100644
index 00000000..6cd99d51
--- /dev/null
+++ b/test_cpp.cc
@@ -0,0 +1,235 @@
+/****************************************************************************
+Copyright (c) 1994 by Xerox Corporation. All rights reserved.
+
+THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+Permission is hereby granted to use or copy this program for any
+purpose, provided the above notices are retained on all copies.
+Permission to modify the code and to distribute modified code is
+granted, provided the above notices are retained, and a notice that
+the code was modified is included with the above copyright notice.
+****************************************************************************
+Last modified on Mon Jul 10 21:06:03 PDT 1995 by ellis
+ modified on December 20, 1994 7:27 pm PST by boehm
+
+usage: test_cpp number-of-iterations
+
+This program tries to test the specific C++ functionality provided by
+gc_c++.h that isn't tested by the more general test routines of the
+collector.
+
+A recommended value for number-of-iterations is 10, which will take a
+few minutes to complete.
+
+***************************************************************************/
+
+#include "gc_cpp.h"
+#include <stdio.h>
+#include <stdlib.h>
+extern "C" {
+#include "gc_priv.h"
+}
+# ifdef MSWIN32
+# include <windows.h>
+# endif
+
+
+#define my_assert( e ) \
+ if (! (e)) { \
+ GC_printf1( "Assertion failure in " __FILE__ ", line %d: " #e "\n", \
+ __LINE__ ); \
+ exit( 1 ); }
+
+
+class A {public:
+ /* An uncollectable class. */
+
+ A( int iArg ): i( iArg ) {}
+ void Test( int iArg ) {
+ my_assert( i == iArg );}
+ int i;};
+
+
+class B: public gc, public A {public:
+ /* A collectable class. */
+
+ B( int j ): A( j ) {}
+ ~B() {
+ my_assert( deleting );}
+ static void Deleting( int on ) {
+ deleting = on;}
+ static int deleting;};
+
+int B::deleting = 0;
+
+
+class C: public gc_cleanup, public A {public:
+ /* A collectable class with cleanup and virtual multiple inheritance. */
+
+ C( int levelArg ): A( levelArg ), level( levelArg ) {
+ nAllocated++;
+ if (level > 0) {
+ left = new C( level - 1 );
+ right = new C( level - 1 );}
+ else {
+ left = right = 0;}}
+ ~C() {
+ this->A::Test( level );
+ nFreed++;
+ my_assert( level == 0 ?
+ left == 0 && right == 0 :
+ level == left->level + 1 && level == right->level + 1 );
+ left = right = 0;
+ level = -123456;}
+ static void Test() {
+ my_assert( nFreed <= nAllocated && nFreed >= .8 * nAllocated );}
+
+ static int nFreed;
+ static int nAllocated;
+ int level;
+ C* left;
+ C* right;};
+
+int C::nFreed = 0;
+int C::nAllocated = 0;
+
+
+class D: public gc {public:
+ /* A collectable class with a static member function to be used as
+ an explicit clean-up function supplied to ::new. */
+
+ D( int iArg ): i( iArg ) {
+ nAllocated++;}
+ static void CleanUp( void* obj, void* data ) {
+ D* self = (D*) obj;
+ nFreed++;
+ my_assert( self->i == (int) data );}
+ static void Test() {
+ my_assert( nFreed >= .8 * nAllocated );}
+
+ int i;
+ static int nFreed;
+ static int nAllocated;};
+
+int D::nFreed = 0;
+int D::nAllocated = 0;
+
+
+class E: public gc_cleanup {public:
+ /* A collectable class with clean-up for use by F. */
+
+ E() {
+ nAllocated++;}
+ ~E() {
+ nFreed++;}
+
+ static int nFreed;
+ static int nAllocated;};
+
+int E::nFreed = 0;
+int E::nAllocated = 0;
+
+
+class F: public E {public:
+ /* A collectable class with clean-up, a base with clean-up, and a
+ member with clean-up. */
+
+ F() {
+ nAllocated++;}
+ ~F() {
+ nFreed++;}
+ static void Test() {
+ my_assert( nFreed >= .8 * nAllocated );
+ my_assert( 2 * nFreed == E::nFreed );}
+
+ E e;
+ static int nFreed;
+ static int nAllocated;};
+
+int F::nFreed = 0;
+int F::nAllocated = 0;
+
+
+long Disguise( void* p ) {
+ return ~ (long) p;}
+
+void* Undisguise( long i ) {
+ return (void*) ~ i;}
+
+
+#ifdef MSWIN32
+int APIENTRY WinMain(
+ HINSTANCE instance, HINSTANCE prev, LPSTR cmd, int cmdShow )
+{
+ int argc;
+ char* argv[ 3 ];
+
+ for (argc = 1; argc < sizeof( argv ) / sizeof( argv[ 0 ] ); argc++) {
+ argv[ argc ] = strtok( argc == 1 ? cmd : 0, " \t" );
+ if (0 == argv[ argc ]) break;}
+
+#else
+int main( int argc, char* argv[] ) {
+#endif
+
+ int i, iters, n;
+
+ if (argc != 2 || (0 >= (n = atoi( argv[ 1 ] )))) {
+ GC_printf0( "usage: test_cpp number-of-iterations\n" );
+ exit( 1 );}
+
+ for (iters = 1; iters <= n; iters++) {
+ GC_printf1( "Starting iteration %d\n", iters );
+
+ /* Allocate some uncollectable As and disguise their pointers.
+ Later we'll check to see if the objects are still there. We're
+ checking to make sure these objects really are uncollectable. */
+ long as[ 1000 ];
+ long bs[ 1000 ];
+ for (i = 0; i < 1000; i++) {
+ as[ i ] = Disguise( new (NoGC) A( i ) );
+ bs[ i ] = Disguise( new (NoGC) B( i ) );}
+
+ /* Allocate a fair number of finalizable Cs, Ds, and Fs.
+ Later we'll check to make sure they've gone away. */
+ for (i = 0; i < 1000; i++) {
+ C* c = new C( 2 );
+ C c1( 2 ); /* stack allocation should work too */
+ D* d = ::new (GC, D::CleanUp, (void*) i) D( i );
+ F* f = new F;
+ if (0 == i % 10) delete c;}
+
+ /* Allocate a very large number of collectable As and Bs and
+ drop the references to them immediately, forcing many
+ collections. */
+ for (i = 0; i < 1000000; i++) {
+ A* a = new (GC) A( i );
+ B* b = new B( i );
+ b = new (GC) B( i );
+ if (0 == i % 10) {
+ B::Deleting( 1 );
+ delete b;
+ B::Deleting( 0 );}}
+
+ /* Make sure the uncollectable As and Bs are still there. */
+ for (i = 0; i < 1000; i++) {
+ A* a = (A*) Undisguise( as[ i ] );
+ B* b = (B*) Undisguise( bs[ i ] );
+ a->Test( i );
+ delete a;
+ b->Test( i );
+ B::Deleting( 1 );
+ delete b;
+ B::Deleting( 0 );}
+
+ /* Make sure most of the finalizable Cs, Ds, and Fs have
+ gone away. */
+ C::Test();
+ D::Test();
+ F::Test();}
+
+ GC_printf0( "The test appears to have succeeded.\n" );
+ return( 0 );}
+
+
diff --git a/typd_mlc.c b/typd_mlc.c
index b04cbbeb..72fd4217 100644
--- a/typd_mlc.c
+++ b/typd_mlc.c
@@ -11,7 +11,7 @@
* modified is included with the above copyright notice.
*
*/
-/* Boehm, May 19, 1994 2:06 pm PDT */
+/* Boehm, July 31, 1995 5:02 pm PDT */
/*
@@ -343,10 +343,6 @@ ptr_t * GC_eobjfreelist;
ptr_t * GC_arobjfreelist;
-struct hblk ** GC_ereclaim_list;
-
-struct hblk ** GC_arreclaim_list;
-
mse * GC_typed_mark_proc();
mse * GC_array_mark_proc();
@@ -377,14 +373,9 @@ void GC_init_explicit_typing()
GC_generic_malloc_inner((MAXOBJSZ+1)*sizeof(ptr_t), PTRFREE);
if (GC_eobjfreelist == 0) ABORT("Couldn't allocate GC_eobjfreelist");
BZERO(GC_eobjfreelist, (MAXOBJSZ+1)*sizeof(ptr_t));
- GC_ereclaim_list = (struct hblk **)
- GC_generic_malloc_inner((MAXOBJSZ+1)*sizeof(struct hblk *), PTRFREE);
- if (GC_ereclaim_list == 0)
- ABORT("Couldn't allocate GC_ereclaim_list");
- BZERO(GC_ereclaim_list, (MAXOBJSZ+1)*sizeof(struct hblk *));
GC_explicit_kind = GC_n_kinds++;
GC_obj_kinds[GC_explicit_kind].ok_freelist = GC_eobjfreelist;
- GC_obj_kinds[GC_explicit_kind].ok_reclaim_list = GC_ereclaim_list;
+ GC_obj_kinds[GC_explicit_kind].ok_reclaim_list = 0;
GC_obj_kinds[GC_explicit_kind].ok_descriptor =
(((word)WORDS_TO_BYTES(-1)) | DS_PER_OBJECT);
GC_obj_kinds[GC_explicit_kind].ok_relocate_descr = TRUE;
@@ -399,11 +390,6 @@ void GC_init_explicit_typing()
GC_generic_malloc_inner((MAXOBJSZ+1)*sizeof(ptr_t), PTRFREE);
if (GC_arobjfreelist == 0) ABORT("Couldn't allocate GC_arobjfreelist");
BZERO(GC_arobjfreelist, (MAXOBJSZ+1)*sizeof(ptr_t));
- GC_arreclaim_list = (struct hblk **)
- GC_generic_malloc_inner((MAXOBJSZ+1)*sizeof(struct hblk *), PTRFREE);
- if (GC_arreclaim_list == 0) ABORT("Couldn't allocate GC_arreclaim_list");
- BZERO(GC_arreclaim_list, (MAXOBJSZ+1)*sizeof(struct hblk *));
- if (GC_arreclaim_list == 0) ABORT("Couldn't allocate GC_arreclaim_list");
if (GC_n_mark_procs >= MAX_MARK_PROCS)
ABORT("No slot for array mark proc");
GC_array_mark_proc_index = GC_n_mark_procs++;
@@ -411,7 +397,7 @@ void GC_init_explicit_typing()
ABORT("No kind available for array objects");
GC_array_kind = GC_n_kinds++;
GC_obj_kinds[GC_array_kind].ok_freelist = GC_arobjfreelist;
- GC_obj_kinds[GC_array_kind].ok_reclaim_list = GC_arreclaim_list;
+ GC_obj_kinds[GC_array_kind].ok_reclaim_list = 0;
GC_obj_kinds[GC_array_kind].ok_descriptor =
MAKE_PROC(GC_array_mark_proc_index, 0);;
GC_obj_kinds[GC_array_kind].ok_relocate_descr = FALSE;
@@ -642,7 +628,7 @@ word env;
ptr_t GC_clear_stack();
#define GENERAL_MALLOC(lb,k) \
- (extern_ptr_t)GC_clear_stack(GC_generic_malloc((word)lb, k))
+ (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
#if defined(__STDC__) || defined(__cplusplus)
extern void * GC_malloc_explicitly_typed(size_t lb, GC_descr d)
@@ -662,7 +648,7 @@ DCL_LOCK_STATE;
# ifdef MERGE_SIZES
lw = GC_size_map[lb];
# else
- lw = ROUNDED_UP_WORDS(lb);
+ lw = ALIGNED_WORDS(lb);
# endif
opp = &(GC_eobjfreelist[lw]);
FASTLOCK();
@@ -682,7 +668,7 @@ DCL_LOCK_STATE;
lw = BYTES_TO_WORDS(GC_size(op));
}
((word *)op)[lw - 1] = d;
- return((extern_ptr_t) op);
+ return((GC_PTR) op);
}
#if defined(__STDC__) || defined(__cplusplus)
@@ -723,7 +709,7 @@ DCL_LOCK_STATE;
# ifdef MERGE_SIZES
lw = GC_size_map[lb];
# else
- lw = ROUNDED_UP_WORDS(lb);
+ lw = ALIGNED_WORDS(lb);
# endif
opp = &(GC_arobjfreelist[lw]);
FASTLOCK();
@@ -762,9 +748,9 @@ DCL_LOCK_STATE;
/* Make sure the descriptor is cleared once there is any danger */
/* it may have been collected. */
(void)
- GC_general_register_disappearing_link((extern_ptr_t *)
+ GC_general_register_disappearing_link((GC_PTR *)
((word *)op+lw-1),
- (extern_ptr_t) op);
+ (GC_PTR) op);
if (ff != GC_finalization_failures) {
/* We may have failed to register op due to lack of memory. */
/* We were out of memory very recently, so we can safely */
@@ -773,5 +759,5 @@ DCL_LOCK_STATE;
return(0);
}
}
- return((extern_ptr_t) op);
+ return((GC_PTR) op);
}
diff --git a/weakpointer.h b/weakpointer.h
new file mode 100644
index 00000000..84906b00
--- /dev/null
+++ b/weakpointer.h
@@ -0,0 +1,221 @@
+#ifndef _weakpointer_h_
+#define _weakpointer_h_
+
+/****************************************************************************
+
+WeakPointer and CleanUp
+
+ Copyright (c) 1991 by Xerox Corporation. All rights reserved.
+
+ THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+
+ Permission is hereby granted to copy this code for any purpose,
+ provided the above notices are retained on all copies.
+
+ Last modified on Mon Jul 17 18:16:01 PDT 1995 by ellis
+
+****************************************************************************/
+
+/****************************************************************************
+
+WeakPointer
+
+A weak pointer is a pointer to a heap-allocated object that doesn't
+prevent the object from being garbage collected. Weak pointers can be
+used to track which objects haven't yet been reclaimed by the
+collector. A weak pointer is deactivated when the collector discovers
+its referent object is unreachable by normal pointers (reachability
+and deactivation are defined more precisely below). A deactivated weak
+pointer remains deactivated forever.
+
+****************************************************************************/
+
+
+template< class T > class WeakPointer {
+public:
+
+WeakPointer( T* t = 0 )
+ /* Constructs a weak pointer for *t. t may be null. It is an error
+ if t is non-null and *t is not a collected object. */
+ {impl = _WeakPointer_New( t );}
+
+T* Pointer()
+ /* wp.Pointer() returns a pointer to the referent object of wp or
+ null if wp has been deactivated (because its referent object
+ has been discovered unreachable by the collector). */
+ {return (T*) _WeakPointer_Pointer( this->impl );}
+
+int operator==( WeakPointer< T > wp2 )
+ /* Given weak pointers wp1 and wp2, if wp1 == wp2, then wp1 and
+ wp2 refer to the same object. If wp1 != wp2, then either wp1
+ and wp2 don't refer to the same object, or if they do, one or
+ both of them has been deactivated. (Note: If objects t1 and t2
+ are never made reachable by their clean-up functions, then
+ WeakPointer<T>(t1) == WeakPointer<T>(t2) if and only t1 == t2.) */
+ {return _WeakPointer_Equal( this->impl, wp2.impl );}
+
+int Hash()
+ /* Returns a hash code suitable for use by multiplicative- and
+ division-based hash tables. If wp1 == wp2, then wp1.Hash() ==
+ wp2.Hash(). */
+ {return _WeakPointer_Hash( this->impl );}
+
+private:
+void* impl;
+};
+
+/*****************************************************************************
+
+CleanUp
+
+A garbage-collected object can have an associated clean-up function
+that will be invoked some time after the collector discovers the
+object is unreachable via normal pointers. Clean-up functions can be
+used to release resources such as open-file handles or window handles
+when their containing objects become unreachable. If a C++ object has
+a non-empty explicit destructor (i.e. it contains programmer-written
+code), the destructor will be automatically registered as the object's
+initial clean-up function.
+
+There is no guarantee that the collector will detect every unreachable
+object (though it will find almost all of them). Clients should not
+rely on clean-up to cause some action to occur immediately -- clean-up
+is only a mechanism for improving resource usage.
+
+Every object with a clean-up function also has a clean-up queue. When
+the collector finds the object is unreachable, it enqueues it on its
+queue. The clean-up function is applied when the object is removed
+from the queue. By default, objects are enqueued on the garbage
+collector's queue, and the collector removes all objects from its
+queue after each collection. If a client supplies another queue for
+objects, it is his responsibility to remove objects (and cause their
+functions to be called) by polling it periodically.
+
+Clean-up queues allow clean-up functions accessing global data to
+synchronize with the main program. Garbage collection can occur at any
+time, and clean-ups invoked by the collector might access data in an
+inconsistent state. A client can control this by defining an explicit
+queue for objects and polling it at safe points.
+
+The following definitions are used by the specification below:
+
+Given a pointer t to a collected object, the base object BO(t) is the
+value returned by new when it created the object. (Because of multiple
+inheritance, t and BO(t) may not be the same address.)
+
+A weak pointer wp references an object *t if BO(wp.Pointer()) ==
+BO(t).
+
+***************************************************************************/
+
+template< class T, class Data > class CleanUp {
+public:
+
+static void Set( T* t, void c( Data* d, T* t ), Data* d = 0 )
+ /* Sets the clean-up function of object BO(t) to be <c, d>,
+ replacing any previously defined clean-up function for BO(t); c
+ and d can be null, but t cannot. Sets the clean-up queue for
+ BO(t) to be the collector's queue. When t is removed from its
+ clean-up queue, its clean-up will be applied by calling c(d,
+ t). It is an error if *t is not a collected object. */
+ {_CleanUp_Set( t, c, d );}
+
+static void Call( T* t )
+ /* Sets the new clean-up function for BO(t) to be null and, if the
+ old one is non-null, calls it immediately, even if BO(t) is
+ still reachable. Deactivates any weak pointers to BO(t). */
+ {_CleanUp_Call( t );}
+
+class Queue {public:
+ Queue()
+ /* Constructs a new queue. */
+ {this->head = _CleanUp_Queue_NewHead();}
+
+ void Set( T* t )
+ /* q.Set(t) sets the clean-up queue of BO(t) to be q. */
+ {_CleanUp_Queue_Set( this->head, t );}
+
+ int Call()
+ /* If q is non-empty, q.Call() removes the first object and
+ calls its clean-up function; does nothing if q is
+ empty. Returns true if there are more objects in the
+ queue. */
+ {return _CleanUp_Queue_Call( this->head );}
+
+ private:
+ void* head;
+ };
+};
+
+/**********************************************************************
+
+Reachability and Clean-up
+
+An object O is reachable if it can be reached via a non-empty path of
+normal pointers from the registers, stacks, global variables, or an
+object with a non-null clean-up function (including O itself),
+ignoring pointers from an object to itself.
+
+This definition of reachability ensures that if object B is accessible
+from object A (and not vice versa) and if both A and B have clean-up
+functions, then A will always be cleaned up before B. Note that as
+long as an object with a clean-up function is contained in a cycle of
+pointers, it will always be reachable and will never be cleaned up or
+collected.
+
+When the collector finds an unreachable object with a null clean-up
+function, it atomically deactivates all weak pointers referencing the
+object and recycles its storage. If object B is accessible from object
+A via a path of normal pointers, A will be discovered unreachable no
+later than B, and a weak pointer to A will be deactivated no later
+than a weak pointer to B.
+
+When the collector finds an unreachable object with a non-null
+clean-up function, the collector atomically deactivates all weak
+pointers referencing the object, redefines its clean-up function to be
+null, and enqueues it on its clean-up queue. The object then becomes
+reachable again and remains reachable at least until its clean-up
+function executes.
+
+The clean-up function is assured that its argument is the only
+accessible pointer to the object. Nothing prevents the function from
+redefining the object's clean-up function or making the object
+reachable again (for example, by storing the pointer in a global
+variable).
+
+If the clean-up function does not make its object reachable again and
+does not redefine its clean-up function, then the object will be
+collected by a subsequent collection (because the object remains
+unreachable and now has a null clean-up function). If the clean-up
+function does make its object reachable again and a clean-up function
+is subsequently redefined for the object, then the new clean-up
+function will be invoked the next time the collector finds the object
+unreachable.
+
+Note that a destructor for a collected object cannot safely redefine a
+clean-up function for its object, since after the destructor executes,
+the object has been destroyed into "raw memory". (In most
+implementations, destroying an object mutates its vtbl.)
+
+Finally, note that calling delete t on a collected object first
+deactivates any weak pointers to t and then invokes its clean-up
+function (destructor).
+
+**********************************************************************/
+
+extern "C" {
+ void* _WeakPointer_New( void* t );
+ void* _WeakPointer_Pointer( void* wp );
+ int _WeakPointer_Equal( void* wp1, void* wp2 );
+ int _WeakPointer_Hash( void* wp );
+ void _CleanUp_Set( void* t, void (*c)( void* d, void* t ), void* d );
+ void _CleanUp_Call( void* t );
+ void* _CleanUp_Queue_NewHead ();
+ void _CleanUp_Queue_Set( void* h, void* t );
+ int _CleanUp_Queue_Call( void* h );
+}
+
+#endif /* _weakpointer_h_ */
+
+