/* * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. * Copyright (c) 2008-2022 Ivan Maidanski * * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. * * Permission is hereby granted to use or copy this program * for any purpose, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was * modified is included with the above copyright notice. */ #ifndef GC_PRIVATE_H #define GC_PRIVATE_H #ifdef HAVE_CONFIG_H # include "config.h" #endif #if !defined(GC_BUILD) && !defined(NOT_GCBUILD) # define GC_BUILD #endif #if (defined(__linux__) || defined(__GLIBC__) || defined(__GNU__) \ || defined(__CYGWIN__) || defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID) \ || defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID_AND_ARG) \ || defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)) && !defined(_GNU_SOURCE) /* Can't test LINUX, since this must be defined before other includes. */ # define _GNU_SOURCE 1 #endif #if defined(__INTERIX) && !defined(_ALL_SOURCE) # define _ALL_SOURCE 1 #endif #if (defined(DGUX) && defined(GC_THREADS) || defined(DGUX386_THREADS) \ || defined(GC_DGUX386_THREADS)) && !defined(_USING_POSIX4A_DRAFT10) # define _USING_POSIX4A_DRAFT10 1 #endif #if defined(__MINGW32__) && !defined(__MINGW_EXCPT_DEFINE_PSDK) \ && defined(__i386__) && defined(GC_EXTERN) /* defined in gc.c */ /* See the description in mark.c. */ # define __MINGW_EXCPT_DEFINE_PSDK 1 #endif # if defined(NO_DEBUGGING) && !defined(GC_ASSERTIONS) && !defined(NDEBUG) /* To turn off assertion checking (in atomic_ops.h). */ # define NDEBUG 1 # endif #ifndef GC_H # include "gc/gc.h" #endif #include #if !defined(sony_news) # include #endif #ifdef DGUX # include # include # include #endif /* DGUX */ #ifdef BSD_TIME # include # include # include #endif /* BSD_TIME */ #ifdef PARALLEL_MARK # define AO_REQUIRE_CAS # if !defined(__GNUC__) && !defined(AO_ASSUME_WINDOWS98) # define AO_ASSUME_WINDOWS98 # endif #endif #include "gc/gc_tiny_fl.h" #include "gc/gc_mark.h" typedef GC_word word; typedef GC_signed_word signed_word; typedef unsigned int unsigned32; typedef int GC_bool; #define TRUE 1 #define FALSE 0 #ifndef PTR_T_DEFINED typedef char * ptr_t; /* A generic pointer to which we can add */ /* byte displacements and which can be used */ /* for address comparisons. */ # define PTR_T_DEFINED #endif #ifndef SIZE_MAX # include #endif #if defined(SIZE_MAX) && !defined(CPPCHECK) # define GC_SIZE_MAX ((size_t)SIZE_MAX) /* Extra cast to workaround some buggy SIZE_MAX definitions. */ #else # define GC_SIZE_MAX (~(size_t)0) #endif #if GC_GNUC_PREREQ(3, 0) && !defined(LINT2) # define EXPECT(expr, outcome) __builtin_expect(expr,outcome) /* Equivalent to (expr), but predict that usually (expr)==outcome. */ #else # define EXPECT(expr, outcome) (expr) #endif /* __GNUC__ */ /* Saturated addition of size_t values. Used to avoid value wrap */ /* around on overflow. The arguments should have no side effects. */ #define SIZET_SAT_ADD(a, b) \ (EXPECT((a) < GC_SIZE_MAX - (b), TRUE) ? (a) + (b) : GC_SIZE_MAX) #include "gcconfig.h" #if !defined(GC_ATOMIC_UNCOLLECTABLE) && defined(ATOMIC_UNCOLLECTABLE) /* For compatibility with old-style naming. */ # define GC_ATOMIC_UNCOLLECTABLE #endif #ifndef GC_INNER /* This tagging macro must be used at the start of every variable */ /* definition which is declared with GC_EXTERN. Should be also used */ /* for the GC-scope function definitions and prototypes. Must not be */ /* used in gcconfig.h. Shouldn't be used for the debugging-only */ /* functions. Currently, not used for the functions declared in or */ /* called from the "dated" source files (located in "extra" folder). */ # if defined(GC_DLL) && defined(__GNUC__) && !defined(MSWIN32) \ && !defined(MSWINCE) && !defined(CYGWIN32) # if GC_GNUC_PREREQ(4, 0) && !defined(GC_NO_VISIBILITY) /* See the corresponding GC_API definition. */ # define GC_INNER __attribute__((__visibility__("hidden"))) # else /* The attribute is unsupported. */ # define GC_INNER /* empty */ # endif # else # define GC_INNER /* empty */ # endif # define GC_EXTERN extern GC_INNER /* Used only for the GC-scope variables (prefixed with "GC_") */ /* declared in the header files. Must not be used for thread-local */ /* variables. Must not be used in gcconfig.h. Shouldn't be used for */ /* the debugging-only or profiling-only variables. Currently, not */ /* used for the variables accessed from the "dated" source files */ /* (specific.c/h, and in the "extra" folder). */ /* The corresponding variable definition must start with GC_INNER. */ #endif /* !GC_INNER */ #ifdef __cplusplus /* Register storage specifier is deprecated in C++11. */ # define REGISTER /* empty */ #else /* Used only for several local variables in the performance-critical */ /* functions. Should not be used for new code. */ # define REGISTER register #endif #if defined(CPPCHECK) # define MACRO_BLKSTMT_BEGIN { # define MACRO_BLKSTMT_END } # define LOCAL_VAR_INIT_OK =0 /* to avoid "uninit var" false positive */ #else # define MACRO_BLKSTMT_BEGIN do { # define MACRO_BLKSTMT_END } while (0) # define LOCAL_VAR_INIT_OK /* empty */ #endif #if defined(M68K) && defined(__GNUC__) /* By default, __alignof__(word) is 2 on m68k. Use this attribute to */ /* have proper word alignment (i.e. 4-byte on a 32-bit arch). */ # define GC_ATTR_WORD_ALIGNED __attribute__((__aligned__(sizeof(word)))) #else # define GC_ATTR_WORD_ALIGNED /* empty */ #endif #ifndef HEADERS_H # include "gc_hdrs.h" #endif #ifndef GC_ATTR_NO_SANITIZE_ADDR # ifndef ADDRESS_SANITIZER # define GC_ATTR_NO_SANITIZE_ADDR /* empty */ # elif GC_CLANG_PREREQ(3, 8) # define GC_ATTR_NO_SANITIZE_ADDR __attribute__((no_sanitize("address"))) # else # define GC_ATTR_NO_SANITIZE_ADDR __attribute__((no_sanitize_address)) # endif #endif /* !GC_ATTR_NO_SANITIZE_ADDR */ #ifndef GC_ATTR_NO_SANITIZE_MEMORY # ifndef MEMORY_SANITIZER # define GC_ATTR_NO_SANITIZE_MEMORY /* empty */ # elif GC_CLANG_PREREQ(3, 8) # define GC_ATTR_NO_SANITIZE_MEMORY __attribute__((no_sanitize("memory"))) # else # define GC_ATTR_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) # endif #endif /* !GC_ATTR_NO_SANITIZE_MEMORY */ #ifndef GC_ATTR_NO_SANITIZE_THREAD # ifndef THREAD_SANITIZER # define GC_ATTR_NO_SANITIZE_THREAD /* empty */ # elif GC_CLANG_PREREQ(3, 8) # define GC_ATTR_NO_SANITIZE_THREAD __attribute__((no_sanitize("thread"))) # else /* It seems that no_sanitize_thread attribute has no effect if the */ /* function is inlined (as of gcc 11.1.0, at least). */ # define GC_ATTR_NO_SANITIZE_THREAD \ GC_ATTR_NOINLINE __attribute__((no_sanitize_thread)) # endif #endif /* !GC_ATTR_NO_SANITIZE_THREAD */ #ifndef UNUSED_ARG # define UNUSED_ARG(arg) ((void)(arg)) #endif #ifdef HAVE_CONFIG_H /* The "inline" keyword is determined by Autoconf AC_C_INLINE. */ # define GC_INLINE static inline #elif defined(_MSC_VER) || defined(__INTEL_COMPILER) || defined(__DMC__) \ || (GC_GNUC_PREREQ(3, 0) && defined(__STRICT_ANSI__)) \ || defined(__BORLANDC__) || defined(__WATCOMC__) # define GC_INLINE static __inline #elif GC_GNUC_PREREQ(3, 0) || defined(__sun) # define GC_INLINE static inline #else # define GC_INLINE static #endif #ifndef GC_ATTR_NOINLINE # if GC_GNUC_PREREQ(4, 0) # define GC_ATTR_NOINLINE __attribute__((__noinline__)) # elif _MSC_VER >= 1400 # define GC_ATTR_NOINLINE __declspec(noinline) # else # define GC_ATTR_NOINLINE /* empty */ # endif #endif #ifndef GC_API_OSCALL /* This is used to identify GC routines called by name from OS. */ # if defined(__GNUC__) # if GC_GNUC_PREREQ(4, 0) && !defined(GC_NO_VISIBILITY) /* Same as GC_API if GC_DLL. */ # define GC_API_OSCALL extern __attribute__((__visibility__("default"))) # else /* The attribute is unsupported. */ # define GC_API_OSCALL extern # endif # else # define GC_API_OSCALL GC_API # endif #endif #ifndef GC_API_PRIV # define GC_API_PRIV GC_API #endif #if defined(THREADS) && !defined(NN_PLATFORM_CTR) # include "gc_atomic_ops.h" # ifndef AO_HAVE_compiler_barrier # define AO_HAVE_compiler_barrier 1 # endif #endif #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32) # ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN 1 # endif # define NOSERVICE # include # include #endif #include "gc_locks.h" #define GC_WORD_MAX (~(word)0) # ifdef STACK_GROWS_DOWN # define COOLER_THAN > # define HOTTER_THAN < # define MAKE_COOLER(x,y) if ((word)((x) + (y)) > (word)(x)) {(x) += (y);} \ else (x) = (ptr_t)GC_WORD_MAX # define MAKE_HOTTER(x,y) (x) -= (y) # else # define COOLER_THAN < # define HOTTER_THAN > # define MAKE_COOLER(x,y) if ((word)((x) - (y)) < (word)(x)) {(x) -= (y);} \ else (x) = 0 # define MAKE_HOTTER(x,y) (x) += (y) # endif #if defined(AMIGA) && defined(__SASC) # define GC_FAR __far #else # define GC_FAR #endif #ifdef GC_ASSERTIONS # define GC_ASSERT(expr) \ do { \ if (EXPECT(!(expr), FALSE)) { \ GC_err_printf("Assertion failure: %s:%d\n", __FILE__, __LINE__); \ ABORT("assertion failure"); \ } \ } while (0) #else # define GC_ASSERT(expr) #endif #include "gc/gc_inline.h" /*********************************/ /* */ /* Definitions for conservative */ /* collector */ /* */ /*********************************/ /*********************************/ /* */ /* Easily changeable parameters */ /* */ /*********************************/ /* #define ALL_INTERIOR_POINTERS */ /* Forces all pointers into the interior of an */ /* object to be considered valid. Also causes the */ /* sizes of all objects to be inflated by at least */ /* one byte. This should suffice to guarantee */ /* that in the presence of a compiler that does */ /* not perform garbage-collector-unsafe */ /* optimizations, all portable, strictly ANSI */ /* conforming C programs should be safely usable */ /* with malloc replaced by GC_malloc and free */ /* calls removed. There are several disadvantages: */ /* 1. There are probably no interesting, portable, */ /* strictly ANSI conforming C programs. */ /* 2. This option makes it hard for the collector */ /* to allocate space that is not "pointed to" */ /* by integers, etc. Under SunOS 4.X with a */ /* statically linked libc, we empirically */ /* observed that it would be difficult to */ /* allocate individual objects > 100 KB. */ /* Even if only smaller objects are allocated, */ /* more swap space is likely to be needed. */ /* Fortunately, much of this will never be */ /* touched. */ /* If you can easily avoid using this option, do. */ /* If not, try to keep individual objects small. */ /* This is now really controlled at startup, */ /* through GC_all_interior_pointers. */ EXTERN_C_BEGIN #ifndef GC_NO_FINALIZATION # define GC_INVOKE_FINALIZERS() GC_notify_or_invoke_finalizers() GC_INNER void GC_notify_or_invoke_finalizers(void); /* If GC_finalize_on_demand is not set, invoke */ /* eligible finalizers. Otherwise: */ /* Call *GC_finalizer_notifier if there are */ /* finalizers to be run, and we haven't called */ /* this procedure yet this GC cycle. */ GC_INNER void GC_finalize(void); /* Perform all indicated finalization actions */ /* on unmarked objects. */ /* Unreachable finalizable objects are enqueued */ /* for processing by GC_invoke_finalizers. */ /* Invoked with lock. */ # ifndef GC_TOGGLE_REFS_NOT_NEEDED GC_INNER void GC_process_togglerefs(void); /* Process the toggle-refs before GC starts. */ # endif # ifndef SMALL_CONFIG GC_INNER void GC_print_finalization_stats(void); # endif #else # define GC_INVOKE_FINALIZERS() (void)0 #endif /* GC_NO_FINALIZATION */ #if !defined(DONT_ADD_BYTE_AT_END) # ifdef LINT2 /* Explicitly instruct the code analysis tool that */ /* GC_all_interior_pointers is assumed to have only 0 or 1 value. */ # define EXTRA_BYTES ((size_t)(GC_all_interior_pointers? 1 : 0)) # else # define EXTRA_BYTES (size_t)GC_all_interior_pointers # endif # define MAX_EXTRA_BYTES 1 #else # define EXTRA_BYTES 0 # define MAX_EXTRA_BYTES 0 #endif # ifndef LARGE_CONFIG # define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */ /* Must be multiple of largest page size. */ # define MAXHINCR 2048 /* Maximum heap increment, in blocks */ # else # define MINHINCR 64 # define MAXHINCR 4096 # endif # define BL_LIMIT GC_black_list_spacing /* If we need a block of N bytes, and we have */ /* a block of N + BL_LIMIT bytes available, */ /* and N > BL_LIMIT, */ /* but all possible positions in it are */ /* blacklisted, we just use it anyway (and */ /* print a warning, if warnings are enabled). */ /* This risks subsequently leaking the block */ /* due to a false reference. But not using */ /* the block risks unreasonable immediate */ /* heap growth. */ /*********************************/ /* */ /* Stack saving for debugging */ /* */ /*********************************/ #ifdef NEED_CALLINFO struct callinfo { word ci_pc; /* Caller, not callee, pc */ # if NARGS > 0 word ci_arg[NARGS]; /* bit-wise complement to avoid retention */ # endif # if (NFRAMES * (NARGS + 1)) % 2 == 1 /* Likely alignment problem. */ word ci_dummy; # endif }; #endif #ifdef SAVE_CALL_CHAIN /* Fill in the pc and argument information for up to NFRAMES of my */ /* callers. Ignore my frame and my callers frame. */ GC_INNER void GC_save_callers(struct callinfo info[NFRAMES]); GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]); #endif EXTERN_C_END /*********************************/ /* */ /* OS interface routines */ /* */ /*********************************/ #ifndef NO_CLOCK #ifdef BSD_TIME # undef CLOCK_TYPE # undef GET_TIME # undef MS_TIME_DIFF # define CLOCK_TYPE struct timeval # define CLOCK_TYPE_INITIALIZER { 0, 0 } # define GET_TIME(x) \ do { \ struct rusage rusage; \ getrusage(RUSAGE_SELF, &rusage); \ x = rusage.ru_utime; \ } while (0) # define MS_TIME_DIFF(a,b) ((unsigned long)((long)(a.tv_sec-b.tv_sec) * 1000 \ + (long)(a.tv_usec - b.tv_usec) / 1000 \ - (a.tv_usec < b.tv_usec \ && (long)(a.tv_usec - b.tv_usec) % 1000 != 0 ? 1 : 0))) /* "a" time is expected to be not earlier than */ /* "b" one; the result has unsigned long type. */ # define NS_FRAC_TIME_DIFF(a, b) ((unsigned long) \ ((a.tv_usec < b.tv_usec \ && (long)(a.tv_usec - b.tv_usec) % 1000 != 0 ? 1000L : 0) \ + (long)(a.tv_usec - b.tv_usec) % 1000) * 1000) /* The total time difference could be computed as */ /* MS_TIME_DIFF(a,b)*1000000+NS_FRAC_TIME_DIFF(a,b).*/ #elif defined(MSWIN32) || defined(MSWINCE) || defined(WINXP_USE_PERF_COUNTER) # if defined(MSWINRT_FLAVOR) || defined(WINXP_USE_PERF_COUNTER) # define CLOCK_TYPE ULONGLONG # define GET_TIME(x) \ do { \ LARGE_INTEGER freq, tc; \ if (!QueryPerformanceFrequency(&freq)) \ ABORT("QueryPerformanceFrequency requires WinXP+"); \ /* Note: two standalone if statements are needed to */ \ /* avoid MS VC false warning about potentially */ \ /* uninitialized tc variable. */ \ if (!QueryPerformanceCounter(&tc)) \ ABORT("QueryPerformanceCounter failed"); \ x = (CLOCK_TYPE)((double)tc.QuadPart/freq.QuadPart * 1e9); \ } while (0) /* TODO: Call QueryPerformanceFrequency once at GC init. */ # define MS_TIME_DIFF(a, b) ((unsigned long)(((a) - (b)) / 1000000UL)) # define NS_FRAC_TIME_DIFF(a, b) ((unsigned long)(((a) - (b)) % 1000000UL)) # else # define CLOCK_TYPE DWORD # define GET_TIME(x) (void)(x = GetTickCount()) # define MS_TIME_DIFF(a, b) ((unsigned long)((a) - (b))) # define NS_FRAC_TIME_DIFF(a, b) 0UL # endif /* !WINXP_USE_PERF_COUNTER */ #elif defined(NN_PLATFORM_CTR) # define CLOCK_TYPE long long EXTERN_C_BEGIN CLOCK_TYPE n3ds_get_system_tick(void); CLOCK_TYPE n3ds_convert_tick_to_ms(CLOCK_TYPE tick); EXTERN_C_END # define GET_TIME(x) (void)(x = n3ds_get_system_tick()) # define MS_TIME_DIFF(a,b) ((unsigned long)n3ds_convert_tick_to_ms((a)-(b))) # define NS_FRAC_TIME_DIFF(a, b) 0UL /* TODO: implement it */ #elif defined(HAVE_CLOCK_GETTIME) # include # define CLOCK_TYPE struct timespec # define CLOCK_TYPE_INITIALIZER { 0, 0 } # if defined(_POSIX_MONOTONIC_CLOCK) && !defined(NINTENDO_SWITCH) # define GET_TIME(x) \ do { \ if (clock_gettime(CLOCK_MONOTONIC, &x) == -1) \ ABORT("clock_gettime failed"); \ } while (0) # else # define GET_TIME(x) \ do { \ if (clock_gettime(CLOCK_REALTIME, &x) == -1) \ ABORT("clock_gettime failed"); \ } while (0) # endif # define MS_TIME_DIFF(a, b) \ /* a.tv_nsec - b.tv_nsec is in range -1e9 to 1e9 exclusively */ \ ((unsigned long)((a).tv_nsec + (1000000L*1000 - (b).tv_nsec)) / 1000000UL \ + ((unsigned long)((a).tv_sec - (b).tv_sec) * 1000UL) - 1000UL) # define NS_FRAC_TIME_DIFF(a, b) \ ((unsigned long)((a).tv_nsec + (1000000L*1000 - (b).tv_nsec)) % 1000000UL) #else /* !BSD_TIME && !LINUX && !NN_PLATFORM_CTR && !MSWIN32 */ # include # if defined(FREEBSD) && !defined(CLOCKS_PER_SEC) # include # define CLOCKS_PER_SEC CLK_TCK # endif # if !defined(CLOCKS_PER_SEC) # define CLOCKS_PER_SEC 1000000 /* This is technically a bug in the implementation. */ /* ANSI requires that CLOCKS_PER_SEC be defined. But at least */ /* under SunOS 4.1.1, it isn't. Also note that the combination of */ /* ANSI C and POSIX is incredibly gross here. The type clock_t */ /* is used by both clock() and times(). But on some machines */ /* these use different notions of a clock tick, CLOCKS_PER_SEC */ /* seems to apply only to clock. Hence we use it here. On many */ /* machines, including SunOS, clock actually uses units of */ /* microseconds (which are not really clock ticks). */ # endif # define CLOCK_TYPE clock_t # define GET_TIME(x) (void)(x = clock()) # define MS_TIME_DIFF(a,b) (CLOCKS_PER_SEC % 1000 == 0 ? \ (unsigned long)((a) - (b)) / (unsigned long)(CLOCKS_PER_SEC / 1000) \ : ((unsigned long)((a) - (b)) * 1000) / (unsigned long)CLOCKS_PER_SEC) /* Avoid using double type since some targets (like ARM) might */ /* require -lm option for double-to-long conversion. */ # define NS_FRAC_TIME_DIFF(a, b) (CLOCKS_PER_SEC <= 1000 ? 0UL \ : (unsigned long)(CLOCKS_PER_SEC <= (clock_t)1000000UL \ ? (((a) - (b)) * ((clock_t)1000000UL / CLOCKS_PER_SEC) % 1000) * 1000 \ : (CLOCKS_PER_SEC <= (clock_t)1000000UL * 1000 \ ? ((a) - (b)) * ((clock_t)1000000UL * 1000 / CLOCKS_PER_SEC) \ : (((a) - (b)) * (clock_t)1000000UL * 1000) / CLOCKS_PER_SEC) \ % (clock_t)1000000UL)) #endif /* !BSD_TIME && !MSWIN32 */ # ifndef CLOCK_TYPE_INITIALIZER /* This is used to initialize CLOCK_TYPE variables (to some value) */ /* to avoid "variable might be uninitialized" compiler warnings. */ # define CLOCK_TYPE_INITIALIZER 0 # endif #endif /* !NO_CLOCK */ /* We use bzero and bcopy internally. They may not be available. */ # if defined(SPARC) && defined(SUNOS4) \ || (defined(M68K) && defined(NEXT)) || defined(VAX) # define BCOPY_EXISTS # elif defined(AMIGA) || defined(DARWIN) # include # define BCOPY_EXISTS # elif defined(MACOS) && defined(POWERPC) # include # define bcopy(x,y,n) BlockMoveData(x, y, n) # define bzero(x,n) BlockZero(x, n) # define BCOPY_EXISTS # endif # if !defined(BCOPY_EXISTS) || defined(CPPCHECK) # include # define BCOPY(x,y,n) memcpy(y, x, (size_t)(n)) # define BZERO(x,n) memset(x, 0, (size_t)(n)) # else # define BCOPY(x,y,n) bcopy((void *)(x),(void *)(y),(size_t)(n)) # define BZERO(x,n) bzero((void *)(x),(size_t)(n)) # endif #ifdef PCR # include "th/PCR_ThCtl.h" #endif EXTERN_C_BEGIN #if defined(CPPCHECK) \ && (defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)) # undef TEXT # ifdef UNICODE # define TEXT(s) L##s # else # define TEXT(s) s # endif #endif /* CPPCHECK */ /* * Stop and restart mutator threads. */ # ifdef PCR # define STOP_WORLD() \ PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \ PCR_allSigsBlocked, \ PCR_waitForever) # define START_WORLD() \ PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \ PCR_allSigsBlocked, \ PCR_waitForever) # else # if defined(NN_PLATFORM_CTR) || defined(NINTENDO_SWITCH) \ || defined(GC_WIN32_THREADS) || defined(GC_PTHREADS) GC_INNER void GC_stop_world(void); GC_INNER void GC_start_world(void); # define STOP_WORLD() GC_stop_world() # define START_WORLD() GC_start_world() # else /* Just do a sanity check: we are not inside GC_do_blocking(). */ # define STOP_WORLD() GC_ASSERT(GC_blocked_sp == NULL) # define START_WORLD() # endif # endif #ifdef THREADS GC_EXTERN GC_on_thread_event_proc GC_on_thread_event; #endif /* Abandon ship */ # if defined(SMALL_CONFIG) || defined(PCR) # define GC_on_abort(msg) (void)0 /* be silent on abort */ # else GC_API_PRIV GC_abort_func GC_on_abort; # endif # if defined(CPPCHECK) # define ABORT(msg) { GC_on_abort(msg); abort(); } # elif defined(PCR) # define ABORT(s) PCR_Base_Panic(s) # else # if defined(MSWIN_XBOX1) && !defined(DebugBreak) # define DebugBreak() __debugbreak() # elif defined(MSWINCE) && !defined(DebugBreak) \ && (!defined(UNDER_CE) || (defined(__MINGW32CE__) && !defined(ARM32))) /* This simplifies linking for WinCE (and, probably, doesn't */ /* hurt debugging much); use -DDebugBreak=DebugBreak to override */ /* this behavior if really needed. This is also a workaround for */ /* x86mingw32ce toolchain (if it is still declaring DebugBreak() */ /* instead of defining it as a macro). */ # define DebugBreak() _exit(-1) /* there is no abort() in WinCE */ # endif # if defined(MSWIN32) && (defined(NO_DEBUGGING) || defined(LINT2)) /* A more user-friendly abort after showing fatal message. */ # define ABORT(msg) (GC_on_abort(msg), _exit(-1)) /* Exit on error without running "at-exit" callbacks. */ # elif defined(MSWINCE) && defined(NO_DEBUGGING) # define ABORT(msg) (GC_on_abort(msg), ExitProcess(-1)) # elif defined(MSWIN32) || defined(MSWINCE) # if defined(_CrtDbgBreak) && defined(_DEBUG) && defined(_MSC_VER) # define ABORT(msg) { GC_on_abort(msg); \ _CrtDbgBreak() /* __debugbreak() */; } # else # define ABORT(msg) { GC_on_abort(msg); DebugBreak(); } /* Note that: on a WinCE box, this could be silently */ /* ignored (i.e., the program is not aborted); */ /* DebugBreak is a statement in some toolchains. */ # endif # else # define ABORT(msg) (GC_on_abort(msg), abort()) # endif /* !MSWIN32 */ # endif /* !PCR */ /* For abort message with 1-3 arguments. C_msg and C_fmt should be */ /* literals. C_msg should not contain format specifiers. Arguments */ /* should match their format specifiers. */ #define ABORT_ARG1(C_msg, C_fmt, arg1) \ MACRO_BLKSTMT_BEGIN \ GC_ERRINFO_PRINTF(C_msg /* + */ C_fmt "\n", arg1); \ ABORT(C_msg); \ MACRO_BLKSTMT_END #define ABORT_ARG2(C_msg, C_fmt, arg1, arg2) \ MACRO_BLKSTMT_BEGIN \ GC_ERRINFO_PRINTF(C_msg /* + */ C_fmt "\n", arg1, arg2); \ ABORT(C_msg); \ MACRO_BLKSTMT_END #define ABORT_ARG3(C_msg, C_fmt, arg1, arg2, arg3) \ MACRO_BLKSTMT_BEGIN \ GC_ERRINFO_PRINTF(C_msg /* + */ C_fmt "\n", \ arg1, arg2, arg3); \ ABORT(C_msg); \ MACRO_BLKSTMT_END /* Same as ABORT but does not have 'no-return' attribute. */ /* ABORT on a dummy condition (which is always true). */ #define ABORT_RET(msg) \ if ((signed_word)GC_current_warn_proc == -1) {} else ABORT(msg) /* Exit abnormally, but without making a mess (e.g. out of memory) */ # ifdef PCR # define EXIT() PCR_Base_Exit(1,PCR_waitForever) # else # define EXIT() (GC_on_abort(NULL), exit(1 /* EXIT_FAILURE */)) # endif /* Print warning message, e.g. almost out of memory. */ /* The argument (if any) format specifier should be: */ /* "%s", "%p", "%"WARN_PRIdPTR or "%"WARN_PRIuPTR. */ #define WARN(msg, arg) \ (*GC_current_warn_proc)((/* no const */ char *)("GC Warning: " msg), \ (word)(arg)) GC_EXTERN GC_warn_proc GC_current_warn_proc; /* Print format type macro for decimal signed_word value passed WARN(). */ /* This could be redefined for Win64 or LLP64, but typically should */ /* not be done as the WARN format string is, possibly, processed on the */ /* client side, so non-standard print type modifiers (like MS "I64d") */ /* should be avoided here if possible. */ #ifndef WARN_PRIdPTR /* Assume sizeof(void *) == sizeof(long) or a little-endian machine. */ # define WARN_PRIdPTR "ld" # define WARN_PRIuPTR "lu" #endif /* A tagging macro (for a code static analyzer) to indicate that the */ /* string obtained from an untrusted source (e.g., argv[], getenv) is */ /* safe to use in a vulnerable operation (e.g., open, exec). */ #define TRUSTED_STRING(s) (char*)COVERT_DATAFLOW(s) /* Get environment entry */ #ifdef GC_READ_ENV_FILE GC_INNER char * GC_envfile_getenv(const char *name); # define GETENV(name) GC_envfile_getenv(name) #elif defined(NO_GETENV) && !defined(CPPCHECK) # define GETENV(name) NULL #elif defined(EMPTY_GETENV_RESULTS) /* Workaround for a reputed Wine bug. */ GC_INLINE char * fixed_getenv(const char *name) { char *value = getenv(name); return value != NULL && *value != '\0' ? value : NULL; } # define GETENV(name) fixed_getenv(name) #else # define GETENV(name) getenv(name) #endif EXTERN_C_END #if defined(DARWIN) # include # ifndef MAC_OS_X_VERSION_MAX_ALLOWED # include /* Include this header just to import the above macro. */ # endif # if defined(POWERPC) # if CPP_WORDSZ == 32 # define GC_THREAD_STATE_T ppc_thread_state_t # else # define GC_THREAD_STATE_T ppc_thread_state64_t # define GC_MACH_THREAD_STATE PPC_THREAD_STATE64 # define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE64_COUNT # endif # elif defined(I386) || defined(X86_64) # if CPP_WORDSZ == 32 # if defined(i386_THREAD_STATE_COUNT) && !defined(x86_THREAD_STATE32_COUNT) /* Use old naming convention for 32-bit x86. */ # define GC_THREAD_STATE_T i386_thread_state_t # define GC_MACH_THREAD_STATE i386_THREAD_STATE # define GC_MACH_THREAD_STATE_COUNT i386_THREAD_STATE_COUNT # else # define GC_THREAD_STATE_T x86_thread_state32_t # define GC_MACH_THREAD_STATE x86_THREAD_STATE32 # define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE32_COUNT # endif # else # define GC_THREAD_STATE_T x86_thread_state64_t # define GC_MACH_THREAD_STATE x86_THREAD_STATE64 # define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT # endif # elif defined(ARM32) && defined(ARM_UNIFIED_THREAD_STATE) \ && !defined(CPPCHECK) # define GC_THREAD_STATE_T arm_unified_thread_state_t # define GC_MACH_THREAD_STATE ARM_UNIFIED_THREAD_STATE # define GC_MACH_THREAD_STATE_COUNT ARM_UNIFIED_THREAD_STATE_COUNT # elif defined(ARM32) # define GC_THREAD_STATE_T arm_thread_state_t # ifdef ARM_MACHINE_THREAD_STATE_COUNT # define GC_MACH_THREAD_STATE ARM_MACHINE_THREAD_STATE # define GC_MACH_THREAD_STATE_COUNT ARM_MACHINE_THREAD_STATE_COUNT # endif # elif defined(AARCH64) # define GC_THREAD_STATE_T arm_thread_state64_t # define GC_MACH_THREAD_STATE ARM_THREAD_STATE64 # define GC_MACH_THREAD_STATE_COUNT ARM_THREAD_STATE64_COUNT # elif !defined(CPPCHECK) # error define GC_THREAD_STATE_T # endif # ifndef GC_MACH_THREAD_STATE # define GC_MACH_THREAD_STATE MACHINE_THREAD_STATE # define GC_MACH_THREAD_STATE_COUNT MACHINE_THREAD_STATE_COUNT # endif # if CPP_WORDSZ == 32 # define GC_MACH_HEADER mach_header # define GC_MACH_SECTION section # define GC_GETSECTBYNAME getsectbynamefromheader # else # define GC_MACH_HEADER mach_header_64 # define GC_MACH_SECTION section_64 # define GC_GETSECTBYNAME getsectbynamefromheader_64 # endif /* Try to work out the right way to access thread state structure */ /* members. The structure has changed its definition in different */ /* Darwin versions. This now defaults to the (older) names */ /* without __, thus hopefully, not breaking any existing */ /* Makefile.direct builds. */ # if __DARWIN_UNIX03 # define THREAD_FLD_NAME(x) __ ## x # else # define THREAD_FLD_NAME(x) x # endif # if defined(ARM32) && defined(ARM_UNIFIED_THREAD_STATE) # define THREAD_FLD(x) ts_32.THREAD_FLD_NAME(x) # else # define THREAD_FLD(x) THREAD_FLD_NAME(x) # endif #endif /* DARWIN */ #ifndef WASI # include #endif #include #if __STDC_VERSION__ >= 201112L # include /* for static_assert */ #endif EXTERN_C_BEGIN /*********************************/ /* */ /* Word-size-dependent defines */ /* */ /*********************************/ #if CPP_WORDSZ == 32 # define WORDS_TO_BYTES(x) ((x)<<2) # define BYTES_TO_WORDS(x) ((x)>>2) # define LOGWL ((word)5) /* log[2] of CPP_WORDSZ */ # define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */ # if ALIGNMENT != 4 # define UNALIGNED_PTRS # endif #endif #if CPP_WORDSZ == 64 # define WORDS_TO_BYTES(x) ((x)<<3) # define BYTES_TO_WORDS(x) ((x)>>3) # define LOGWL ((word)6) /* log[2] of CPP_WORDSZ */ # define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */ # if ALIGNMENT != 8 # define UNALIGNED_PTRS # endif #endif /* The first TINY_FREELISTS free lists correspond to the first */ /* TINY_FREELISTS multiples of GRANULE_BYTES, i.e. we keep */ /* separate free lists for each multiple of GRANULE_BYTES */ /* up to (TINY_FREELISTS-1) * GRANULE_BYTES. After that they */ /* may be spread out further. */ #define GRANULE_BYTES GC_GRANULE_BYTES #define TINY_FREELISTS GC_TINY_FREELISTS #define WORDSZ ((word)CPP_WORDSZ) #define SIGNB ((word)1 << (WORDSZ-1)) #define BYTES_PER_WORD ((word)(sizeof (word))) #define divWORDSZ(n) ((n) >> LOGWL) /* divide n by size of word */ #if GRANULE_BYTES == 8 # define BYTES_TO_GRANULES(n) ((n)>>3) # define GRANULES_TO_BYTES(n) ((n)<<3) # if CPP_WORDSZ == 64 # define GRANULES_TO_WORDS(n) (n) # elif CPP_WORDSZ == 32 # define GRANULES_TO_WORDS(n) ((n)<<1) # else # define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n)) # endif #elif GRANULE_BYTES == 16 # define BYTES_TO_GRANULES(n) ((n)>>4) # define GRANULES_TO_BYTES(n) ((n)<<4) # if CPP_WORDSZ == 64 # define GRANULES_TO_WORDS(n) ((n)<<1) # elif CPP_WORDSZ == 32 # define GRANULES_TO_WORDS(n) ((n)<<2) # else # define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n)) # endif #else # error Bad GRANULE_BYTES value #endif /*********************/ /* */ /* Size Parameters */ /* */ /*********************/ /* Heap block size, bytes. Should be power of 2. */ /* Incremental GC with MPROTECT_VDB currently requires the */ /* page size to be a multiple of HBLKSIZE. Since most modern */ /* architectures support variable page sizes down to 4 KB, and */ /* x86 is generally 4 KB, we now default to 4 KB, except for */ /* Alpha: Seems to be used with 8 KB pages. */ /* SMALL_CONFIG: Want less block-level fragmentation. */ #ifndef HBLKSIZE # if defined(LARGE_CONFIG) || !defined(SMALL_CONFIG) # ifdef ALPHA # define CPP_LOG_HBLKSIZE 13 # else # define CPP_LOG_HBLKSIZE 12 # endif # else # define CPP_LOG_HBLKSIZE 10 # endif #else # if HBLKSIZE == 512 # define CPP_LOG_HBLKSIZE 9 # elif HBLKSIZE == 1024 # define CPP_LOG_HBLKSIZE 10 # elif HBLKSIZE == 2048 # define CPP_LOG_HBLKSIZE 11 # elif HBLKSIZE == 4096 # define CPP_LOG_HBLKSIZE 12 # elif HBLKSIZE == 8192 # define CPP_LOG_HBLKSIZE 13 # elif HBLKSIZE == 16384 # define CPP_LOG_HBLKSIZE 14 # elif HBLKSIZE == 32768 # define CPP_LOG_HBLKSIZE 15 # elif HBLKSIZE == 65536 # define CPP_LOG_HBLKSIZE 16 # elif !defined(CPPCHECK) # error Bad HBLKSIZE value # endif # undef HBLKSIZE #endif # define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE) # define LOG_HBLKSIZE ((size_t)CPP_LOG_HBLKSIZE) # define HBLKSIZE ((size_t)CPP_HBLKSIZE) #define GC_SQRT_SIZE_MAX ((((size_t)1) << (WORDSZ / 2)) - 1) /* Max size objects supported by freelist (larger objects are */ /* allocated directly with allchblk(), by rounding to the next */ /* multiple of HBLKSIZE). */ #define CPP_MAXOBJBYTES (CPP_HBLKSIZE/2) #define MAXOBJBYTES ((size_t)CPP_MAXOBJBYTES) #define CPP_MAXOBJWORDS BYTES_TO_WORDS(CPP_MAXOBJBYTES) #define MAXOBJWORDS ((size_t)CPP_MAXOBJWORDS) #define CPP_MAXOBJGRANULES BYTES_TO_GRANULES(CPP_MAXOBJBYTES) #define MAXOBJGRANULES ((size_t)CPP_MAXOBJGRANULES) # define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE) # define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q) /* Equivalent to subtracting 2 hblk pointers. */ /* We do it this way because a compiler should */ /* find it hard to use an integer division */ /* instead of a shift. The bundled SunOS 4.1 */ /* o.w. sometimes pessimizes the subtraction to */ /* involve a call to .div. */ # define modHBLKSZ(n) ((n) & (HBLKSIZE-1)) # define HBLKPTR(objptr) ((struct hblk *)(((word)(objptr)) \ & ~(word)(HBLKSIZE-1))) # define HBLKDISPL(objptr) modHBLKSZ((size_t)(objptr)) /* Round up allocation size (in bytes) to a multiple of a granule. */ #define ROUNDUP_GRANULE_SIZE(lb) /* lb should have no side-effect */ \ (SIZET_SAT_ADD(lb, GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1)) /* Round up byte allocation request (after adding EXTRA_BYTES) to */ /* a multiple of a granule, then convert it to granules. */ #define ALLOC_REQUEST_GRANS(lb) /* lb should have no side-effect */ \ BYTES_TO_GRANULES(SIZET_SAT_ADD(lb, GRANULE_BYTES - 1 + EXTRA_BYTES)) #if MAX_EXTRA_BYTES == 0 # define ADD_EXTRA_BYTES(lb) (lb) # define SMALL_OBJ(bytes) EXPECT((bytes) <= MAXOBJBYTES, TRUE) #else # define ADD_EXTRA_BYTES(lb) /* lb should have no side-effect */ \ SIZET_SAT_ADD(lb, EXTRA_BYTES) # define SMALL_OBJ(bytes) /* bytes argument should have no side-effect */ \ (EXPECT((bytes) <= MAXOBJBYTES - MAX_EXTRA_BYTES, TRUE) \ || (bytes) <= MAXOBJBYTES - EXTRA_BYTES) /* This really just tests bytes <= MAXOBJBYTES - EXTRA_BYTES. */ /* But we try to avoid looking up EXTRA_BYTES. */ #endif /* * Hash table representation of sets of pages. * Implements a map from aligned HBLKSIZE chunks of the address space to one * bit each. * This assumes it is OK to spuriously set bits, e.g. because multiple * addresses are represented by a single location. * Used by black-listing code, and perhaps by dirty bit maintenance code. */ #ifndef LOG_PHT_ENTRIES # ifdef LARGE_CONFIG # if CPP_WORDSZ == 32 # define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */ /* which is >= 4 GB. Each table takes */ /* 128 KB, some of which may never be */ /* touched. */ # else # define LOG_PHT_ENTRIES 21 /* Collisions likely at 2M blocks, */ /* which is >= 8 GB. Each table takes */ /* 256 KB, some of which may never be */ /* touched. */ # endif # elif !defined(SMALL_CONFIG) # define LOG_PHT_ENTRIES 18 /* Collisions are likely if heap grows */ /* to more than 256K hblks >= 1 GB. */ /* Each hash table occupies 32 KB. */ /* Even for somewhat smaller heaps, */ /* say half that, collisions may be an */ /* issue because we blacklist */ /* addresses outside the heap. */ # else # define LOG_PHT_ENTRIES 15 /* Collisions are likely if heap grows */ /* to more than 32K hblks (128 MB). */ /* Each hash table occupies 4 KB. */ # endif #endif /* !LOG_PHT_ENTRIES */ # define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES) # define PHT_SIZE (PHT_ENTRIES >> LOGWL) typedef word page_hash_table[PHT_SIZE]; # define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1)) # define get_pht_entry_from_index(bl, index) \ (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1) # define set_pht_entry_from_index(bl, index) \ (void)((bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)) #if defined(THREADS) && defined(AO_HAVE_or) /* And, one more version for GC_add_to_black_list_normal/stack */ /* (invoked indirectly by GC_do_local_mark) and */ /* async_set_pht_entry_from_index (invoked by GC_dirty or the write */ /* fault handler). */ # define set_pht_entry_from_index_concurrent(bl, index) \ AO_or((volatile AO_t *)&(bl)[divWORDSZ(index)], \ (AO_t)((word)1 << modWORDSZ(index))) #else # define set_pht_entry_from_index_concurrent(bl, index) \ set_pht_entry_from_index(bl, index) #endif /********************************************/ /* */ /* H e a p B l o c k s */ /* */ /********************************************/ #define MARK_BITS_PER_HBLK (HBLKSIZE/GRANULE_BYTES) /* upper bound */ /* We allocate 1 bit per allocation granule. */ /* If MARK_BIT_PER_GRANULE is defined, we use */ /* every nth bit, where n is the number of */ /* allocation granules per object. If */ /* MARK_BIT_PER_OBJ is defined, we only use the */ /* initial group of mark bits, and it is safe */ /* to allocate smaller header for large objects. */ union word_ptr_ao_u { word w; signed_word sw; void *vp; # ifdef PARALLEL_MARK volatile AO_t ao; # endif }; /* We maintain layout maps for heap blocks containing objects of a given */ /* size. Each entry in this map describes a byte offset and has the */ /* following type. */ struct hblkhdr { struct hblk * hb_next; /* Link field for hblk free list */ /* and for lists of chunks waiting to be */ /* reclaimed. */ struct hblk * hb_prev; /* Backwards link for free list. */ struct hblk * hb_block; /* The corresponding block. */ unsigned char hb_obj_kind; /* Kind of objects in the block. Each kind */ /* identifies a mark procedure and a set of */ /* list headers. Sometimes called regions. */ unsigned char hb_flags; # define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */ /* point to the first hblk of */ /* this object. */ # define WAS_UNMAPPED 2 /* This is a free block, which has */ /* been unmapped from the address */ /* space. */ /* GC_remap must be invoked on it */ /* before it can be reallocated. */ /* Only set with USE_MUNMAP. */ # define FREE_BLK 4 /* Block is free, i.e. not in use. */ # ifdef ENABLE_DISCLAIM # define HAS_DISCLAIM 8 /* This kind has a callback on reclaim. */ # define MARK_UNCONDITIONALLY 0x10 /* Mark from all objects, marked or */ /* not. Used to mark objects needed by */ /* reclaim notifier. */ # endif # ifdef MARK_BIT_PER_GRANULE # define LARGE_BLOCK 0x20 # endif unsigned short hb_last_reclaimed; /* Value of GC_gc_no when block was */ /* last allocated or swept. May wrap. */ /* For a free block, this is maintained */ /* only for USE_MUNMAP, and indicates */ /* when the header was allocated, or */ /* when the size of the block last */ /* changed. */ # ifdef MARK_BIT_PER_OBJ unsigned32 hb_inv_sz; /* A good upper bound for 2**32/hb_sz. */ /* For large objects, we use */ /* LARGE_INV_SZ. */ # define LARGE_INV_SZ (1 << 16) # endif word hb_sz; /* If in use, size in bytes, of objects in the block. */ /* if free, the size in bytes of the whole block. */ /* We assume that this is convertible to signed_word */ /* without generating a negative result. We avoid */ /* generating free blocks larger than that. */ word hb_descr; /* object descriptor for marking. See */ /* gc_mark.h. */ # ifdef MARK_BIT_PER_GRANULE unsigned short * hb_map; /* Essentially a table of remainders */ /* mod BYTES_TO_GRANULES(hb_sz), except */ /* for large blocks. See GC_obj_map. */ # endif # ifdef PARALLEL_MARK volatile AO_t hb_n_marks; /* Number of set mark bits, excluding */ /* the one always set at the end. */ /* Currently it is concurrently */ /* updated and hence only approximate. */ /* But a zero value does guarantee that */ /* the block contains no marked */ /* objects. */ /* Ensuring this property means that we */ /* never decrement it to zero during a */ /* collection, and hence the count may */ /* be one too high. Due to concurrent */ /* updates, an arbitrary number of */ /* increments, but not all of them (!) */ /* may be lost, hence it may in theory */ /* be much too low. */ /* The count may also be too high if */ /* multiple mark threads mark the */ /* same object due to a race. */ # else size_t hb_n_marks; /* Without parallel marking, the count */ /* is accurate. */ # endif # ifdef USE_MARK_BYTES # define MARK_BITS_SZ (MARK_BITS_PER_HBLK + 1) /* Unlike the other case, this is in units of bytes. */ /* Since we force double-word alignment, we need at most one */ /* mark bit per 2 words. But we do allocate and set one */ /* extra mark bit to avoid an explicit check for the */ /* partial object at the end of each block. */ union { char _hb_marks[MARK_BITS_SZ]; /* The i'th byte is 1 if the object */ /* starting at granule i or object i is */ /* marked, 0 otherwise. */ /* The mark bit for the "one past the end" */ /* object is always set to avoid a special */ /* case test in the marker. */ word dummy; /* Force word alignment of mark bytes. */ } _mark_byte_union; # define hb_marks _mark_byte_union._hb_marks # else # define MARK_BITS_SZ (MARK_BITS_PER_HBLK/CPP_WORDSZ + 1) word hb_marks[MARK_BITS_SZ]; # endif /* !USE_MARK_BYTES */ }; # define ANY_INDEX 23 /* "Random" mark bit index for assertions */ /* heap block body */ # define HBLK_WORDS (HBLKSIZE/sizeof(word)) # define HBLK_GRANULES (HBLKSIZE/GRANULE_BYTES) /* The number of objects in a block dedicated to a certain size. */ /* may erroneously yield zero (instead of one) for large objects. */ # define HBLK_OBJS(sz_in_bytes) (HBLKSIZE/(sz_in_bytes)) struct hblk { char hb_body[HBLKSIZE]; }; # define HBLK_IS_FREE(hdr) (((hdr) -> hb_flags & FREE_BLK) != 0) # define OBJ_SZ_TO_BLOCKS(lb) divHBLKSZ((lb) + HBLKSIZE-1) # define OBJ_SZ_TO_BLOCKS_CHECKED(lb) /* lb should have no side-effect */ \ divHBLKSZ(SIZET_SAT_ADD(lb, HBLKSIZE-1)) /* Size of block (in units of HBLKSIZE) needed to hold objects of */ /* given lb (in bytes). The checked variant prevents wrap around. */ /* Object free list link */ # define obj_link(p) (*(void **)(p)) # define LOG_MAX_MARK_PROCS 6 # define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS) /* Root sets. Logically private to mark_rts.c. But we don't want the */ /* tables scanned, so we put them here. */ /* MAX_ROOT_SETS is the maximum number of ranges that can be */ /* registered as static roots. */ # ifdef LARGE_CONFIG # define MAX_ROOT_SETS 8192 # elif !defined(SMALL_CONFIG) # define MAX_ROOT_SETS 2048 # else # define MAX_ROOT_SETS 512 # endif # define MAX_EXCLUSIONS (MAX_ROOT_SETS/4) /* Maximum number of segments that can be excluded from root sets. */ /* * Data structure for excluded static roots. */ struct exclusion { ptr_t e_start; ptr_t e_end; }; /* Data structure for list of root sets. */ /* We keep a hash table, so that we can filter out duplicate additions. */ /* Under Win32, we need to do a better job of filtering overlaps, so */ /* we resort to sequential search, and pay the price. */ struct roots { ptr_t r_start;/* multiple of word size */ ptr_t r_end; /* multiple of word size and greater than r_start */ # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) struct roots * r_next; # endif GC_bool r_tmp; /* Delete before registering new dynamic libraries */ }; #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) /* Size of hash table index to roots. */ # define LOG_RT_SIZE 6 # define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */ #endif #if (!defined(MAX_HEAP_SECTS) || defined(CPPCHECK)) \ && (defined(CYGWIN32) || defined(MSWIN32) || defined(MSWINCE) \ || defined(USE_PROC_FOR_LIBRARIES)) # ifdef LARGE_CONFIG # if CPP_WORDSZ > 32 # define MAX_HEAP_SECTS 81920 # else # define MAX_HEAP_SECTS 7680 # endif # elif defined(SMALL_CONFIG) && !defined(USE_PROC_FOR_LIBRARIES) # if defined(PARALLEL_MARK) && (defined(MSWIN32) || defined(CYGWIN32)) # define MAX_HEAP_SECTS 384 # else # define MAX_HEAP_SECTS 128 /* Roughly 256 MB (128*2048*1024) */ # endif # elif CPP_WORDSZ > 32 # define MAX_HEAP_SECTS 1024 /* Roughly 8 GB */ # else # define MAX_HEAP_SECTS 512 /* Roughly 4 GB */ # endif #endif /* !MAX_HEAP_SECTS */ typedef struct GC_ms_entry { ptr_t mse_start; /* First word of object, word aligned. */ union word_ptr_ao_u mse_descr; /* Descriptor; low order two bits are tags, */ /* as described in gc_mark.h. */ } mse; typedef int mark_state_t; /* Current state of marking. */ /* Used to remember where we are during */ /* concurrent marking. */ struct disappearing_link; struct finalizable_object; struct dl_hashtbl_s { struct disappearing_link **head; word entries; unsigned log_size; }; struct fnlz_roots_s { struct finalizable_object **fo_head; /* List of objects that should be finalized now: */ struct finalizable_object *finalize_now; }; union toggle_ref_u { /* The lowest bit is used to distinguish between choices. */ void *strong_ref; GC_hidden_pointer weak_ref; }; /* Extended descriptors. GC_typed_mark_proc understands these. */ /* These are used for simple objects that are larger than what */ /* can be described by a BITMAP_BITS sized bitmap. */ typedef struct { word ed_bitmap; /* lsb corresponds to first word. */ GC_bool ed_continued; /* next entry is continuation. */ } typed_ext_descr_t; struct HeapSect { ptr_t hs_start; size_t hs_bytes; }; /* Lists of all heap blocks and free lists */ /* as well as other random data structures */ /* that should not be scanned by the */ /* collector. */ /* These are grouped together in a struct */ /* so that they can be easily skipped by the */ /* GC_mark routine. */ /* The ordering is weird to make GC_malloc */ /* faster by keeping the important fields */ /* sufficiently close together that a */ /* single load of a base register will do. */ /* Scalars that could easily appear to */ /* be pointers are also put here. */ /* The main fields should precede any */ /* conditionally included fields, so that */ /* gc_inline.h will work even if a different */ /* set of macros is defined when the client is */ /* compiled. */ struct _GC_arrays { word _heapsize; /* Heap size in bytes (value never goes down). */ word _requested_heapsize; /* Heap size due to explicit expansion. */ # define GC_heapsize_on_gc_disable GC_arrays._heapsize_on_gc_disable word _heapsize_on_gc_disable; ptr_t _last_heap_addr; word _large_free_bytes; /* Total bytes contained in blocks on large object free */ /* list. */ word _large_allocd_bytes; /* Total number of bytes in allocated large objects blocks. */ /* For the purposes of this counter and the next one only, a */ /* large object is one that occupies a block of at least */ /* 2*HBLKSIZE. */ word _max_large_allocd_bytes; /* Maximum number of bytes that were ever allocated in */ /* large object blocks. This is used to help decide when it */ /* is safe to split up a large block. */ word _bytes_allocd_before_gc; /* Number of bytes allocated before this */ /* collection cycle. */ # define GC_our_mem_bytes GC_arrays._our_mem_bytes word _our_mem_bytes; # ifndef SEPARATE_GLOBALS # define GC_bytes_allocd GC_arrays._bytes_allocd word _bytes_allocd; /* Number of bytes allocated during this collection cycle. */ # endif word _bytes_dropped; /* Number of black-listed bytes dropped during GC cycle */ /* as a result of repeated scanning during allocation */ /* attempts. These are treated largely as allocated, */ /* even though they are not useful to the client. */ word _bytes_finalized; /* Approximate number of bytes in objects (and headers) */ /* that became ready for finalization in the last */ /* collection. */ word _bytes_freed; /* Number of explicitly deallocated bytes of memory */ /* since last collection. */ word _finalizer_bytes_freed; /* Bytes of memory explicitly deallocated while */ /* finalizers were running. Used to approximate memory */ /* explicitly deallocated by finalizers. */ bottom_index *_all_bottom_indices; /* Pointer to the first (lowest address) bottom_index; */ /* assumes the lock is held. */ bottom_index *_all_bottom_indices_end; /* Pointer to the last (highest address) bottom_index; */ /* assumes the lock is held. */ ptr_t _scratch_free_ptr; hdr *_hdr_free_list; ptr_t _scratch_end_ptr; /* GC_scratch_end_ptr is end point of the current scratch area. */ # if defined(IRIX5) || (defined(USE_PROC_FOR_LIBRARIES) && !defined(LINUX)) # define USE_SCRATCH_LAST_END_PTR # define GC_scratch_last_end_ptr GC_arrays._scratch_last_end_ptr ptr_t _scratch_last_end_ptr; /* GC_scratch_last_end_ptr is the end point of the last */ /* obtained scratch area. */ /* Used by GC_register_dynamic_libraries(). */ # endif # if defined(GC_ASSERTIONS) || (defined(KEEP_BACK_PTRS) && ALIGNMENT == 1) # define SET_REAL_HEAP_BOUNDS # define GC_least_real_heap_addr GC_arrays._least_real_heap_addr # define GC_greatest_real_heap_addr GC_arrays._greatest_real_heap_addr word _least_real_heap_addr; word _greatest_real_heap_addr; /* Similar to GC_least/greatest_plausible_heap_addr but */ /* do not include future (potential) heap expansion. */ /* Both variables are zero initially. */ # endif mse *_mark_stack; /* Limits of stack for GC_mark routine. All ranges */ /* between GC_mark_stack (incl.) and GC_mark_stack_top */ /* (incl.) still need to be marked from. */ mse *_mark_stack_limit; # ifdef PARALLEL_MARK mse *volatile _mark_stack_top; /* Updated only with mark lock held, but read asynchronously. */ /* TODO: Use union to avoid casts to AO_t */ # else mse *_mark_stack_top; # endif word _composite_in_use; /* Number of bytes in the accessible */ /* composite objects. */ word _atomic_in_use; /* Number of bytes in the accessible */ /* atomic objects. */ # define GC_last_heap_growth_gc_no GC_arrays._last_heap_growth_gc_no word _last_heap_growth_gc_no; /* GC number of latest successful GC_expand_hp_inner call */ # ifdef USE_MUNMAP # define GC_unmapped_bytes GC_arrays._unmapped_bytes word _unmapped_bytes; # ifdef COUNT_UNMAPPED_REGIONS # define GC_num_unmapped_regions GC_arrays._num_unmapped_regions signed_word _num_unmapped_regions; # endif # else # define GC_unmapped_bytes 0 # endif bottom_index * _all_nils; # define GC_scan_ptr GC_arrays._scan_ptr struct hblk * _scan_ptr; # ifdef PARALLEL_MARK # define GC_main_local_mark_stack GC_arrays._main_local_mark_stack mse *_main_local_mark_stack; # define GC_first_nonempty GC_arrays._first_nonempty volatile AO_t _first_nonempty; /* Lowest entry on mark stack that may be */ /* nonempty. Updated only by initiating thread. */ # endif # define GC_mark_stack_size GC_arrays._mark_stack_size size_t _mark_stack_size; # define GC_mark_state GC_arrays._mark_state mark_state_t _mark_state; /* Initialized to MS_NONE (0). */ # define GC_mark_stack_too_small GC_arrays._mark_stack_too_small GC_bool _mark_stack_too_small; /* We need a larger mark stack. May be set by */ /* client supplied mark routines. */ # define GC_objects_are_marked GC_arrays._objects_are_marked GC_bool _objects_are_marked; /* Are there collectible marked objects in the heap? */ # ifdef ENABLE_TRACE # define GC_trace_addr GC_arrays._trace_addr ptr_t _trace_addr; # endif # define GC_capacity_heap_sects GC_arrays._capacity_heap_sects size_t _capacity_heap_sects; # define GC_n_heap_sects GC_arrays._n_heap_sects word _n_heap_sects; /* Number of separately added heap sections. */ # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32) # define GC_n_heap_bases GC_arrays._n_heap_bases word _n_heap_bases; /* See GC_heap_bases. */ # endif # ifdef USE_PROC_FOR_LIBRARIES # define GC_n_memory GC_arrays._n_memory word _n_memory; /* Number of GET_MEM allocated memory sections. */ # endif # ifdef GC_GCJ_SUPPORT # define GC_gcjobjfreelist GC_arrays._gcjobjfreelist ptr_t *_gcjobjfreelist; # endif # define GC_fo_entries GC_arrays._fo_entries word _fo_entries; # ifndef GC_NO_FINALIZATION # define GC_dl_hashtbl GC_arrays._dl_hashtbl # define GC_fnlz_roots GC_arrays._fnlz_roots # define GC_log_fo_table_size GC_arrays._log_fo_table_size # ifndef GC_LONG_REFS_NOT_NEEDED # define GC_ll_hashtbl GC_arrays._ll_hashtbl struct dl_hashtbl_s _ll_hashtbl; # endif struct dl_hashtbl_s _dl_hashtbl; struct fnlz_roots_s _fnlz_roots; unsigned _log_fo_table_size; # ifndef GC_TOGGLE_REFS_NOT_NEEDED # define GC_toggleref_arr GC_arrays._toggleref_arr # define GC_toggleref_array_size GC_arrays._toggleref_array_size # define GC_toggleref_array_capacity GC_arrays._toggleref_array_capacity union toggle_ref_u *_toggleref_arr; size_t _toggleref_array_size; size_t _toggleref_array_capacity; # endif # endif # ifdef TRACE_BUF # define GC_trace_buf_ptr GC_arrays._trace_buf_ptr int _trace_buf_ptr; # endif # ifdef ENABLE_DISCLAIM # define GC_finalized_kind GC_arrays._finalized_kind int _finalized_kind; # endif # define n_root_sets GC_arrays._n_root_sets # define GC_excl_table_entries GC_arrays._excl_table_entries int _n_root_sets; /* GC_static_roots[0..n_root_sets) contains the */ /* valid root sets. */ size_t _excl_table_entries; /* Number of entries in use. */ # ifdef THREADS # define GC_roots_were_cleared GC_arrays._roots_were_cleared GC_bool _roots_were_cleared; # endif # define GC_explicit_typing_initialized GC_arrays._explicit_typing_initialized # define GC_ed_size GC_arrays._ed_size # define GC_avail_descr GC_arrays._avail_descr # define GC_ext_descriptors GC_arrays._ext_descriptors # ifdef AO_HAVE_load_acquire volatile AO_t _explicit_typing_initialized; # else GC_bool _explicit_typing_initialized; # endif size_t _ed_size; /* Current size of above arrays. */ size_t _avail_descr; /* Next available slot. */ typed_ext_descr_t *_ext_descriptors; /* Points to array of extended */ /* descriptors. */ GC_mark_proc _mark_procs[MAX_MARK_PROCS]; /* Table of user-defined mark procedures. There is */ /* a small number of these, which can be referenced */ /* by DS_PROC mark descriptors. See gc_mark.h. */ char _modws_valid_offsets[sizeof(word)]; /* GC_valid_offsets[i] ==> */ /* GC_modws_valid_offsets[i%sizeof(word)] */ # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) # define GC_root_index GC_arrays._root_index struct roots * _root_index[RT_SIZE]; # endif # ifdef SAVE_CALL_CHAIN # define GC_last_stack GC_arrays._last_stack struct callinfo _last_stack[NFRAMES]; /* Stack at last garbage collection. Useful for */ /* debugging mysterious object disappearances. In the */ /* multi-threaded case, we currently only save the */ /* calling stack. */ # endif # ifndef SEPARATE_GLOBALS # define GC_objfreelist GC_arrays._objfreelist void *_objfreelist[MAXOBJGRANULES+1]; /* free list for objects */ # define GC_aobjfreelist GC_arrays._aobjfreelist void *_aobjfreelist[MAXOBJGRANULES+1]; /* free list for atomic objects */ # endif void *_uobjfreelist[MAXOBJGRANULES+1]; /* Uncollectible but traced objects. */ /* Objects on this and _auobjfreelist */ /* are always marked, except during */ /* garbage collections. */ # ifdef GC_ATOMIC_UNCOLLECTABLE # define GC_auobjfreelist GC_arrays._auobjfreelist void *_auobjfreelist[MAXOBJGRANULES+1]; /* Atomic uncollectible but traced objects. */ # endif size_t _size_map[MAXOBJBYTES+1]; /* Number of granules to allocate when asked for a certain */ /* number of bytes (plus EXTRA_BYTES). Should be accessed with */ /* the allocation lock held. */ # ifdef MARK_BIT_PER_GRANULE # define GC_obj_map GC_arrays._obj_map unsigned short * _obj_map[MAXOBJGRANULES + 1]; /* If not NULL, then a pointer to a map of valid */ /* object addresses. */ /* GC_obj_map[sz_in_granules][i] is */ /* i % sz_in_granules. */ /* This is now used purely to replace a */ /* division in the marker by a table lookup. */ /* _obj_map[0] is used for large objects and */ /* contains all nonzero entries. This gets us */ /* out of the marker fast path without an extra */ /* test. */ # define OBJ_MAP_LEN BYTES_TO_GRANULES(HBLKSIZE) # endif # define VALID_OFFSET_SZ HBLKSIZE char _valid_offsets[VALID_OFFSET_SZ]; /* GC_valid_offsets[i] == TRUE ==> i */ /* is registered as a displacement. */ # ifndef GC_DISABLE_INCREMENTAL # define GC_grungy_pages GC_arrays._grungy_pages page_hash_table _grungy_pages; /* Pages that were dirty at last */ /* GC_read_dirty. */ # define GC_dirty_pages GC_arrays._dirty_pages volatile page_hash_table _dirty_pages; /* Pages dirtied since last GC_read_dirty. */ # endif # if (defined(CHECKSUMS) && (defined(GWW_VDB) || defined(SOFT_VDB))) \ || defined(PROC_VDB) # define GC_written_pages GC_arrays._written_pages page_hash_table _written_pages; /* Pages ever dirtied */ # endif # define GC_heap_sects GC_arrays._heap_sects struct HeapSect *_heap_sects; /* Heap segments potentially */ /* client objects. */ # if defined(USE_PROC_FOR_LIBRARIES) # define GC_our_memory GC_arrays._our_memory struct HeapSect _our_memory[MAX_HEAP_SECTS]; /* All GET_MEM allocated */ /* memory. Includes block */ /* headers and the like. */ # endif # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32) # define GC_heap_bases GC_arrays._heap_bases ptr_t _heap_bases[MAX_HEAP_SECTS]; /* Start address of memory regions obtained from kernel. */ # endif # ifdef MSWINCE # define GC_heap_lengths GC_arrays._heap_lengths word _heap_lengths[MAX_HEAP_SECTS]; /* Committed lengths of memory regions obtained from kernel. */ # endif struct roots _static_roots[MAX_ROOT_SETS]; struct exclusion _excl_table[MAX_EXCLUSIONS]; /* Block header index; see gc_headers.h */ bottom_index * _top_index[TOP_SZ]; }; GC_API_PRIV GC_FAR struct _GC_arrays GC_arrays; #define GC_all_nils GC_arrays._all_nils #define GC_atomic_in_use GC_arrays._atomic_in_use #define GC_bytes_allocd_before_gc GC_arrays._bytes_allocd_before_gc #define GC_bytes_dropped GC_arrays._bytes_dropped #define GC_bytes_finalized GC_arrays._bytes_finalized #define GC_bytes_freed GC_arrays._bytes_freed #define GC_composite_in_use GC_arrays._composite_in_use #define GC_excl_table GC_arrays._excl_table #define GC_finalizer_bytes_freed GC_arrays._finalizer_bytes_freed #define GC_heapsize GC_arrays._heapsize #define GC_large_allocd_bytes GC_arrays._large_allocd_bytes #define GC_large_free_bytes GC_arrays._large_free_bytes #define GC_last_heap_addr GC_arrays._last_heap_addr #define GC_mark_stack GC_arrays._mark_stack #define GC_mark_stack_limit GC_arrays._mark_stack_limit #define GC_mark_stack_top GC_arrays._mark_stack_top #define GC_mark_procs GC_arrays._mark_procs #define GC_max_large_allocd_bytes GC_arrays._max_large_allocd_bytes #define GC_modws_valid_offsets GC_arrays._modws_valid_offsets #define GC_requested_heapsize GC_arrays._requested_heapsize #define GC_all_bottom_indices GC_arrays._all_bottom_indices #define GC_all_bottom_indices_end GC_arrays._all_bottom_indices_end #define GC_scratch_free_ptr GC_arrays._scratch_free_ptr #define GC_hdr_free_list GC_arrays._hdr_free_list #define GC_scratch_end_ptr GC_arrays._scratch_end_ptr #define GC_size_map GC_arrays._size_map #define GC_static_roots GC_arrays._static_roots #define GC_top_index GC_arrays._top_index #define GC_uobjfreelist GC_arrays._uobjfreelist #define GC_valid_offsets GC_arrays._valid_offsets #define beginGC_arrays ((ptr_t)(&GC_arrays)) #define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays)) /* Object kinds: */ #ifndef MAXOBJKINDS # define MAXOBJKINDS 16 #endif GC_EXTERN struct obj_kind { void **ok_freelist; /* Array of free list headers for this kind of */ /* object. Point either to GC_arrays or to */ /* storage allocated with GC_scratch_alloc. */ struct hblk **ok_reclaim_list; /* List headers for lists of blocks waiting to */ /* be swept. Indexed by object size in */ /* granules. */ word ok_descriptor; /* Descriptor template for objects in this */ /* block. */ GC_bool ok_relocate_descr; /* Add object size in bytes to descriptor */ /* template to obtain descriptor. Otherwise */ /* template is used as is. */ GC_bool ok_init; /* Clear objects before putting them on the free list. */ # ifdef ENABLE_DISCLAIM GC_bool ok_mark_unconditionally; /* Mark from all, including unmarked, objects */ /* in block. Used to protect objects reachable */ /* from reclaim notifiers. */ int (GC_CALLBACK *ok_disclaim_proc)(void * /*obj*/); /* The disclaim procedure is called before obj */ /* is reclaimed, but must also tolerate being */ /* called with object from freelist. Non-zero */ /* exit prevents object from being reclaimed. */ # define OK_DISCLAIM_INITZ /* comma */, FALSE, 0 # else # define OK_DISCLAIM_INITZ /* empty */ # endif /* !ENABLE_DISCLAIM */ } GC_obj_kinds[MAXOBJKINDS]; #define beginGC_obj_kinds ((ptr_t)(&GC_obj_kinds)) #define endGC_obj_kinds (beginGC_obj_kinds + (sizeof GC_obj_kinds)) /* Variables that used to be in GC_arrays, but need to be accessed by */ /* inline allocation code. If they were in GC_arrays, the inlined */ /* allocation code would include GC_arrays offsets (as it did), which */ /* introduce maintenance problems. */ #ifdef SEPARATE_GLOBALS extern word GC_bytes_allocd; /* Number of bytes allocated during this collection cycle. */ extern ptr_t GC_objfreelist[MAXOBJGRANULES+1]; /* free list for NORMAL objects */ # define beginGC_objfreelist ((ptr_t)(&GC_objfreelist)) # define endGC_objfreelist (beginGC_objfreelist + sizeof(GC_objfreelist)) extern ptr_t GC_aobjfreelist[MAXOBJGRANULES+1]; /* free list for atomic (PTRFREE) objects */ # define beginGC_aobjfreelist ((ptr_t)(&GC_aobjfreelist)) # define endGC_aobjfreelist (beginGC_aobjfreelist + sizeof(GC_aobjfreelist)) #endif /* SEPARATE_GLOBALS */ /* Predefined kinds: */ #define PTRFREE GC_I_PTRFREE #define NORMAL GC_I_NORMAL #define UNCOLLECTABLE 2 #ifdef GC_ATOMIC_UNCOLLECTABLE # define AUNCOLLECTABLE 3 # define IS_UNCOLLECTABLE(k) (((k) & ~1) == UNCOLLECTABLE) # define GC_N_KINDS_INITIAL_VALUE 4 #else # define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE) # define GC_N_KINDS_INITIAL_VALUE 3 #endif GC_EXTERN unsigned GC_n_kinds; GC_EXTERN size_t GC_page_size; /* May mean the allocation granularity size, not page size. */ #ifdef REAL_PAGESIZE_NEEDED GC_EXTERN size_t GC_real_page_size; #else # define GC_real_page_size GC_page_size #endif /* Round up allocation size to a multiple of a page size. */ /* GC_setpagesize() is assumed to be already invoked. */ #define ROUNDUP_PAGESIZE(lb) /* lb should have no side-effect */ \ (SIZET_SAT_ADD(lb, GC_page_size - 1) & ~(GC_page_size - 1)) /* Same as above but used to make GET_MEM() argument safe. */ #ifdef MMAP_SUPPORTED # define ROUNDUP_PAGESIZE_IF_MMAP(lb) ROUNDUP_PAGESIZE(lb) #else # define ROUNDUP_PAGESIZE_IF_MMAP(lb) (lb) #endif #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32) GC_EXTERN SYSTEM_INFO GC_sysinfo; GC_INNER GC_bool GC_is_heap_base(const void *p); #endif GC_EXTERN word GC_black_list_spacing; /* Average number of bytes between blacklisted */ /* blocks. Approximate. */ /* Counts only blocks that are */ /* "stack-blacklisted", i.e. that are */ /* problematic in the interior of an object. */ #ifdef GC_GCJ_SUPPORT extern struct hblk * GC_hblkfreelist[]; extern word GC_free_bytes[]; /* Both remain visible to GNU GCJ. */ #endif GC_EXTERN word GC_root_size; /* Total size of registered root sections. */ GC_EXTERN GC_bool GC_debugging_started; /* GC_debug_malloc has been called. */ /* This is used by GC_do_blocking[_inner](). */ struct blocking_data { GC_fn_type fn; void * client_data; /* and result */ }; /* This is used by GC_call_with_gc_active(), GC_push_all_stack_sections(). */ struct GC_traced_stack_sect_s { ptr_t saved_stack_ptr; # ifdef IA64 ptr_t saved_backing_store_ptr; ptr_t backing_store_end; # endif struct GC_traced_stack_sect_s *prev; }; #ifdef THREADS /* Process all "traced stack sections" - scan entire stack except for */ /* frames belonging to the user functions invoked by GC_do_blocking. */ GC_INNER void GC_push_all_stack_sections(ptr_t lo, ptr_t hi, struct GC_traced_stack_sect_s *traced_stack_sect); GC_EXTERN word GC_total_stacksize; /* updated on every push_all_stacks */ #else GC_EXTERN ptr_t GC_blocked_sp; GC_EXTERN struct GC_traced_stack_sect_s *GC_traced_stack_sect; /* Points to the "frame" data held in stack by */ /* the innermost GC_call_with_gc_active(). */ /* NULL if no such "frame" active. */ #endif /* !THREADS */ #ifdef IA64 GC_EXTERN ptr_t GC_register_stackbottom; #endif #if defined(E2K) || defined(IA64) /* Similar to GC_push_all_stack_sections() but for IA-64 registers store. */ GC_INNER void GC_push_all_register_sections(ptr_t bs_lo, ptr_t bs_hi, int eager, struct GC_traced_stack_sect_s *traced_stack_sect); #endif /* Marks are in a reserved area in */ /* each heap block. Each word has one mark bit associated */ /* with it. Only those corresponding to the beginning of an */ /* object are used. */ /* Mark bit operations */ /* * Retrieve, set, clear the nth mark bit in a given heap block. * * (Recall that bit n corresponds to nth object or allocation granule * relative to the beginning of the block, including unused words) */ #ifdef USE_MARK_BYTES # define mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n]) # define set_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n] = 1) # define clear_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n] = 0) #else /* Set mark bit correctly, even if mark bits may be concurrently */ /* accessed. */ # if defined(PARALLEL_MARK) || (defined(THREAD_SANITIZER) && defined(THREADS)) /* Workaround TSan false positive: there is no race between */ /* mark_bit_from_hdr and set_mark_bit_from_hdr when n is different */ /* (alternatively, USE_MARK_BYTES could be used). If TSan is off, */ /* AO_or() is used only if we set USE_MARK_BITS explicitly. */ # define OR_WORD(addr, bits) AO_or((volatile AO_t *)(addr), (AO_t)(bits)) # else # define OR_WORD(addr, bits) (void)(*(addr) |= (bits)) # endif # define mark_bit_from_hdr(hhdr,n) \ (((hhdr)->hb_marks[divWORDSZ(n)] >> modWORDSZ(n)) & (word)1) # define set_mark_bit_from_hdr(hhdr,n) \ OR_WORD((hhdr)->hb_marks+divWORDSZ(n), (word)1 << modWORDSZ(n)) # define clear_mark_bit_from_hdr(hhdr,n) \ ((hhdr)->hb_marks[divWORDSZ(n)] &= ~((word)1 << modWORDSZ(n))) #endif /* !USE_MARK_BYTES */ #ifdef MARK_BIT_PER_OBJ # define MARK_BIT_NO(offset, sz) (((word)(offset))/(sz)) /* Get the mark bit index corresponding to the given byte */ /* offset and size (in bytes). */ # define MARK_BIT_OFFSET(sz) 1 /* Spacing between useful mark bits. */ # define IF_PER_OBJ(x) x # define FINAL_MARK_BIT(sz) ((sz) > MAXOBJBYTES? 1 : HBLK_OBJS(sz)) /* Position of final, always set, mark bit. */ #else /* MARK_BIT_PER_GRANULE */ # define MARK_BIT_NO(offset, sz) BYTES_TO_GRANULES((word)(offset)) # define MARK_BIT_OFFSET(sz) BYTES_TO_GRANULES(sz) # define IF_PER_OBJ(x) # define FINAL_MARK_BIT(sz) \ ((sz) > MAXOBJBYTES ? MARK_BITS_PER_HBLK \ : BYTES_TO_GRANULES((sz) * HBLK_OBJS(sz))) #endif /* Important internal collector routines */ GC_INNER ptr_t GC_approx_sp(void); GC_INNER GC_bool GC_should_collect(void); GC_INNER struct hblk * GC_next_block(struct hblk *h, GC_bool allow_free); /* Get the next block whose address is at least */ /* h. Returned block is managed by GC. The */ /* block must be in use unless allow_free is */ /* true. Return 0 if there is no such block. */ GC_INNER struct hblk * GC_prev_block(struct hblk * h); /* Get the last (highest address) block whose */ /* address is at most h. Returned block is */ /* managed by GC, but may or may not be in use. */ /* Return 0 if there is no such block. */ GC_INNER void GC_mark_init(void); GC_INNER void GC_clear_marks(void); /* Clear mark bits for all heap objects. */ GC_INNER void GC_invalidate_mark_state(void); /* Tell the marker that marked */ /* objects may point to unmarked */ /* ones, and roots may point to */ /* unmarked objects. Reset mark stack. */ GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame); /* Perform about one pages worth of marking */ /* work of whatever kind is needed. Returns */ /* quickly if no collection is in progress. */ /* Return TRUE if mark phase finished. */ GC_INNER void GC_initiate_gc(void); /* initiate collection. */ /* If the mark state is invalid, this */ /* becomes full collection. Otherwise */ /* it's partial. */ GC_INNER GC_bool GC_collection_in_progress(void); /* Collection is in progress, or was abandoned. */ #define GC_PUSH_ALL_SYM(sym) \ GC_push_all((/* no volatile */ void *)&(sym), \ (/* no volatile */ void *)(&(sym) + 1)) GC_INNER void GC_push_all_stack(ptr_t b, ptr_t t); /* As GC_push_all but consider */ /* interior pointers as valid. */ #ifdef NO_VDB_FOR_STATIC_ROOTS # define GC_push_conditional_static(b, t, all) \ ((void)(all), GC_push_all(b, t)) #else /* Same as GC_push_conditional (does either of GC_push_all or */ /* GC_push_selected depending on the third argument) but the caller */ /* guarantees the region belongs to the registered static roots. */ GC_INNER void GC_push_conditional_static(void *b, void *t, GC_bool all); #endif #if defined(WRAP_MARK_SOME) && defined(PARALLEL_MARK) /* GC_mark_local does not handle memory protection faults yet. So, */ /* the static data regions are scanned immediately by GC_push_roots. */ GC_INNER void GC_push_conditional_eager(void *bottom, void *top, GC_bool all); #endif /* In the threads case, we push part of the current thread stack */ /* with GC_push_all_eager when we push the registers. This gets the */ /* callee-save registers that may disappear. The remainder of the */ /* stacks are scheduled for scanning in *GC_push_other_roots, which */ /* is thread-package-specific. */ GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame); /* Push all or dirty roots. */ GC_API_PRIV GC_push_other_roots_proc GC_push_other_roots; /* Push system or application specific roots */ /* onto the mark stack. In some environments */ /* (e.g. threads environments) this is */ /* predefined to be non-zero. A client */ /* supplied replacement should also call the */ /* original function. Remains externally */ /* visible as used by some well-known 3rd-party */ /* software (e.g., ECL) currently. */ #ifdef THREADS void GC_push_thread_structures(void); #endif GC_EXTERN void (*GC_push_typed_structures)(void); /* A pointer such that we can avoid linking in */ /* the typed allocation support if unused. */ GC_INNER void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *), volatile ptr_t arg); #if defined(E2K) || defined(IA64) || defined(SPARC) /* Cause all stacked registers to be saved in memory. Return a */ /* pointer to the top of the corresponding memory stack. */ ptr_t GC_save_regs_in_stack(void); #endif #ifdef E2K /* Copy the full procedure stack to the provided buffer (with the */ /* given capacity). Returns either the required buffer size if it */ /* is bigger than capacity, otherwise the amount of copied bytes. */ /* May be called from a signal handler. */ GC_INNER size_t GC_get_procedure_stack(ptr_t, size_t); # if defined(CPPCHECK) # define PS_ALLOCA_BUF(sz) NULL # define ALLOCA_SAFE_LIMIT 0 # else # define PS_ALLOCA_BUF(sz) alloca(sz) /* cannot return NULL */ # ifndef ALLOCA_SAFE_LIMIT # define ALLOCA_SAFE_LIMIT (HBLKSIZE*256) # endif # endif /* !CPPCHECK */ /* Copy procedure (register) stack to a stack-allocated or */ /* memory-mapped buffer. Usable from a signal handler. */ /* FREE_PROCEDURE_STACK_LOCAL() must be called with the same */ /* *pbuf and *psz values before the caller function returns */ /* (thus, the buffer is valid only within the function). */ # define GET_PROCEDURE_STACK_LOCAL(pbuf, psz) \ do { \ size_t capacity = 0; \ GC_ASSERT(GC_page_size != 0); \ for (*(pbuf) = NULL; ; capacity = *(psz)) { \ *(psz) = GC_get_procedure_stack(*(pbuf), capacity); \ if (*(psz) <= capacity) break; \ if (*(psz) > ALLOCA_SAFE_LIMIT \ || EXPECT(capacity != 0, FALSE)) { \ /* Deallocate old buffer if any. */ \ if (EXPECT(capacity > ALLOCA_SAFE_LIMIT, FALSE)) \ GC_unmap_procedure_stack_buf(*(pbuf),capacity); \ *(psz) = ROUNDUP_PAGESIZE(*(psz)); \ *(pbuf) = GC_mmap_procedure_stack_buf(*(psz)); \ } else { \ /* Allocate buffer on the stack if not large. */ \ *(pbuf) = PS_ALLOCA_BUF(*(psz)); \ } \ } \ if (capacity > ALLOCA_SAFE_LIMIT \ && EXPECT(((capacity - *(psz)) \ & ~(GC_page_size-1)) != 0, FALSE)) { \ /* Ensure sz value passed to munmap() later */ \ /* matches that passed to mmap() above. */ \ *(psz) = capacity - (GC_page_size - 1); \ } \ } while (0) /* Indicate that the buffer with copied procedure stack is not needed. */ # define FREE_PROCEDURE_STACK_LOCAL(buf, sz) \ (void)((sz) > ALLOCA_SAFE_LIMIT \ ? (GC_unmap_procedure_stack_buf(buf, sz), 0) : 0) GC_INNER ptr_t GC_mmap_procedure_stack_buf(size_t); GC_INNER void GC_unmap_procedure_stack_buf(ptr_t, size_t); # ifdef THREADS /* Allocate a buffer in the GC heap (as an atomic object) and copy */ /* procedure stack there. Assumes the GC allocation lock is held. */ /* May trigger a collection (thus, cannot be used in GC_push_roots */ /* or in a signal handler). The buffer should be freed with */ /* GC_INTERNAL_FREE later when not needed (or, alternatively, it */ /* could be just garbage-collected). */ /* Similar to GET_PROCEDURE_STACK_LOCAL in other aspects. */ GC_INNER size_t GC_alloc_and_get_procedure_stack(ptr_t *pbuf); # endif #endif /* E2K */ #if defined(E2K) && defined(USE_PTR_HWTAG) /* Load value and get tag of the target memory. */ # if defined(__ptr64__) # define LOAD_TAGGED_VALUE(v, tag, p) \ do { \ word val; \ __asm__ __volatile__ ( \ "ldd, sm %[adr], 0x0, %[val]\n\t" \ "gettagd %[val], %[tag]\n" \ : [val] "=r" (val), \ [tag] "=r" (tag) \ : [adr] "r" (p)); \ v = val; \ } while (0) # elif !defined(CPPCHECK) # error Unsupported -march for e2k target # endif # define LOAD_WORD_OR_CONTINUE(v, p) \ { \ int tag LOCAL_VAR_INIT_OK; \ LOAD_TAGGED_VALUE(v, tag, p); \ if (tag != 0) continue; \ } #else # define LOAD_WORD_OR_CONTINUE(v, p) (void)(v = *(word *)(p)) #endif /* !E2K */ #if defined(AMIGA) || defined(MACOS) || defined(GC_DARWIN_THREADS) void GC_push_one(word p); /* If p points to an object, mark it */ /* and push contents on the mark stack */ /* Pointer recognition test always */ /* accepts interior pointers, i.e. this */ /* is appropriate for pointers found on */ /* stack. */ #endif #ifdef GC_WIN32_THREADS /* Same as GC_push_one but for a sequence of registers. */ GC_INNER void GC_push_many_regs(const word *regs, unsigned count); #endif #if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS) GC_INNER void GC_mark_and_push_stack(ptr_t p, ptr_t source); /* Ditto, omits plausibility test */ #else GC_INNER void GC_mark_and_push_stack(ptr_t p); #endif GC_INNER void GC_clear_hdr_marks(hdr * hhdr); /* Clear the mark bits in a header */ GC_INNER void GC_set_hdr_marks(hdr * hhdr); /* Set the mark bits in a header */ GC_INNER void GC_set_fl_marks(ptr_t p); /* Set all mark bits associated with */ /* a free list. */ #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC) void GC_check_fl_marks(void **); /* Check that all mark bits */ /* associated with a free list are */ /* set. Abort if not. */ #endif #ifndef AMIGA GC_INNER #endif void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp); #ifdef USE_PROC_FOR_LIBRARIES GC_INNER void GC_remove_roots_subregion(ptr_t b, ptr_t e); #endif GC_INNER void GC_exclude_static_roots_inner(void *start, void *finish); #if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \ || defined(CYGWIN32) || defined(PCR) GC_INNER void GC_register_dynamic_libraries(void); /* Add dynamic library data sections to the root set. */ #endif GC_INNER void GC_cond_register_dynamic_libraries(void); /* Remove and reregister dynamic libraries if we're */ /* configured to do that at each GC. */ /* Machine dependent startup routines */ ptr_t GC_get_main_stack_base(void); /* Cold end of stack. */ #ifdef IA64 GC_INNER ptr_t GC_get_register_stack_base(void); /* Cold end of register stack. */ #endif void GC_register_data_segments(void); #ifdef THREADS /* Both are invoked from GC_init only. */ GC_INNER void GC_thr_init(void); GC_INNER void GC_init_parallel(void); # ifndef DONT_USE_ATEXIT GC_INNER GC_bool GC_is_main_thread(void); # endif #else GC_INNER GC_bool GC_is_static_root(void *p); /* Is the address p in one of the registered static */ /* root sections? */ # ifdef TRACE_BUF void GC_add_trace_entry(char *kind, word arg1, word arg2); # endif #endif /* !THREADS */ /* Black listing: */ #ifdef PRINT_BLACK_LIST GC_INNER void GC_add_to_black_list_normal(word p, ptr_t source); /* Register bits as a possible future false */ /* reference from the heap or static data */ # define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \ if (GC_all_interior_pointers) { \ GC_add_to_black_list_stack((word)(bits), (source)); \ } else \ GC_add_to_black_list_normal((word)(bits), (source)) GC_INNER void GC_add_to_black_list_stack(word p, ptr_t source); # define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \ GC_add_to_black_list_stack((word)(bits), (source)) #else GC_INNER void GC_add_to_black_list_normal(word p); # define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \ if (GC_all_interior_pointers) { \ GC_add_to_black_list_stack((word)(bits)); \ } else \ GC_add_to_black_list_normal((word)(bits)) GC_INNER void GC_add_to_black_list_stack(word p); # define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \ GC_add_to_black_list_stack((word)(bits)) #endif /* PRINT_BLACK_LIST */ GC_INNER void GC_promote_black_lists(void); /* Declare an end to a black listing phase. */ GC_INNER void GC_unpromote_black_lists(void); /* Approximately undo the effect of the above. */ /* This actually loses some information, but */ /* only in a reasonably safe way. */ GC_INNER ptr_t GC_scratch_alloc(size_t bytes); /* GC internal memory allocation for */ /* small objects. Deallocation is not */ /* possible. May return NULL. */ #ifdef GWW_VDB /* GC_scratch_recycle_no_gww() not used. */ #else # define GC_scratch_recycle_no_gww GC_scratch_recycle_inner #endif GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t bytes); /* Reuse the memory region by the heap. */ /* Heap block layout maps: */ #ifdef MARK_BIT_PER_GRANULE GC_INNER GC_bool GC_add_map_entry(size_t sz); /* Add a heap block map for objects of */ /* size sz to obj_map. */ /* Return FALSE on failure. */ #endif GC_INNER void GC_register_displacement_inner(size_t offset); /* Version of GC_register_displacement */ /* that assumes lock is already held. */ /* hblk allocation: */ GC_INNER void GC_new_hblk(size_t size_in_granules, int kind); /* Allocate a new heap block, and build */ /* a free list in it. */ GC_INNER ptr_t GC_build_fl(struct hblk *h, size_t words, GC_bool clear, ptr_t list); /* Build a free list for objects of */ /* size sz in block h. Append list to */ /* end of the free lists. Possibly */ /* clear objects on the list. Normally */ /* called by GC_new_hblk, but also */ /* called explicitly without GC lock. */ GC_INNER struct hblk * GC_allochblk(size_t size_in_bytes, int kind, unsigned flags, size_t align_m1); /* Allocate (and return pointer to) */ /* a heap block for objects of the */ /* given size and alignment (in bytes), */ /* searching over the appropriate free */ /* block lists; inform the marker */ /* that the found block is valid for */ /* objects of the indicated size. */ /* The client is responsible for */ /* clearing the block, if necessary. */ /* Note: we set obj_map field in the */ /* header correctly; the caller is */ /* responsible for building an object */ /* freelist in the block. */ GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags, size_t align_m1); /* Allocate a large block of size lb bytes with */ /* the requested alignment (align_m1 plus one). */ /* The block is not cleared. Assumes that */ /* EXTRA_BYTES value is already added to lb. */ /* The flags argument should be IGNORE_OFF_PAGE */ /* or 0. Calls GC_allochblk() to do the actual */ /* allocation, but also triggers GC and/or heap */ /* expansion as appropriate. Updates value of */ /* GC_bytes_allocd; does also other accounting. */ GC_INNER void GC_freehblk(struct hblk * p); /* Deallocate a heap block and mark it */ /* as invalid. */ /* Miscellaneous GC routines. */ GC_INNER GC_bool GC_expand_hp_inner(word n); GC_INNER void GC_start_reclaim(GC_bool abort_if_found); /* Restore unmarked objects to free */ /* lists, or (if abort_if_found is */ /* TRUE) report them. */ /* Sweeping of small object pages is */ /* largely deferred. */ GC_INNER void GC_continue_reclaim(word sz, int kind); /* Sweep pages of the given size and */ /* kind, as long as possible, and */ /* as long as the corresponding free */ /* list is empty. sz is in granules. */ GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old); /* Reclaim all blocks. Abort (in a */ /* consistent state) if f returns TRUE. */ GC_INNER ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz, GC_bool init, ptr_t list, signed_word *count); /* Rebuild free list in hbp with */ /* header hhdr, with objects of size sz */ /* bytes. Add list to the end of the */ /* free list. Add the number of */ /* reclaimed bytes to *count. */ GC_INNER GC_bool GC_block_empty(hdr * hhdr); /* Block completely unmarked? */ GC_INNER int GC_CALLBACK GC_never_stop_func(void); /* Always returns 0 (FALSE). */ GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func f); /* Collect; caller must have acquired */ /* lock. Collection is aborted if f */ /* returns TRUE. Returns TRUE if it */ /* completes successfully. */ #define GC_gcollect_inner() \ (void)GC_try_to_collect_inner(GC_never_stop_func) #ifdef THREADS GC_EXTERN GC_bool GC_in_thread_creation; /* We may currently be in thread creation or destruction. */ /* Only set to TRUE while allocation lock is held. */ /* When set, it is OK to run GC from unknown thread. */ #endif GC_EXTERN GC_bool GC_is_initialized; /* GC_init() has been run. */ GC_INNER void GC_collect_a_little_inner(int n); /* Do n units worth of garbage */ /* collection work, if appropriate. */ /* A unit is an amount appropriate for */ /* HBLKSIZE bytes of allocation. */ GC_INNER void * GC_generic_malloc_aligned(size_t lb, int k, unsigned flags, size_t align_m1); GC_INNER void * GC_generic_malloc_inner(size_t lb, int k, unsigned flags); /* Allocate an object of the given */ /* kind but assuming lock already held. */ /* Should not be used to directly */ /* allocate objects requiring special */ /* handling on allocation. The flags */ /* argument should be IGNORE_OFF_PAGE */ /* or 0. In the first case the client */ /* guarantees that there will always be */ /* a pointer to the beginning (i.e. */ /* within the first hblk) of the object */ /* while it is live. */ GC_INNER GC_bool GC_collect_or_expand(word needed_blocks, unsigned flags, GC_bool retry); GC_INNER ptr_t GC_allocobj(size_t gran, int kind); /* Make the indicated free list */ /* nonempty, and return its head. */ /* The size (gran) is in granules. */ #ifdef GC_ADD_CALLER /* GC_DBG_EXTRAS is used by GC debug API functions (unlike GC_EXTRAS */ /* used by GC debug API macros) thus GC_RETURN_ADDR_PARENT (pointing */ /* to client caller) should be used if possible. */ # ifdef GC_HAVE_RETURN_ADDR_PARENT # define GC_DBG_EXTRAS GC_RETURN_ADDR_PARENT, NULL, 0 # else # define GC_DBG_EXTRAS GC_RETURN_ADDR, NULL, 0 # endif #else # define GC_DBG_EXTRAS "unknown", 0 #endif /* !GC_ADD_CALLER */ #ifdef GC_COLLECT_AT_MALLOC extern size_t GC_dbg_collect_at_malloc_min_lb; /* variable visible outside for debugging */ # define GC_DBG_COLLECT_AT_MALLOC(lb) \ (void)((lb) >= GC_dbg_collect_at_malloc_min_lb ? \ (GC_gcollect(), 0) : 0) #else # define GC_DBG_COLLECT_AT_MALLOC(lb) (void)0 #endif /* !GC_COLLECT_AT_MALLOC */ /* Allocation routines that bypass the thread local cache. */ #if defined(THREAD_LOCAL_ALLOC) && defined(GC_GCJ_SUPPORT) GC_INNER void *GC_core_gcj_malloc(size_t lb, void *, unsigned flags); #endif GC_INNER void GC_init_headers(void); GC_INNER struct hblkhdr * GC_install_header(struct hblk *h); /* Install a header for block h. */ /* Return 0 on failure, or the header */ /* otherwise. */ GC_INNER GC_bool GC_install_counts(struct hblk * h, size_t sz); /* Set up forwarding counts for block */ /* h of size sz. */ /* Return FALSE on failure. */ GC_INNER void GC_remove_header(struct hblk * h); /* Remove the header for block h. */ GC_INNER void GC_remove_counts(struct hblk * h, size_t sz); /* Remove forwarding counts for h. */ GC_INNER hdr * GC_find_header(ptr_t h); #ifdef USE_PROC_FOR_LIBRARIES GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes); /* Add a chunk to GC_our_memory. */ #else # define GC_add_to_our_memory(p, bytes) \ (GC_our_mem_bytes += (bytes), (void)(p)) #endif GC_INNER void GC_print_all_errors(void); /* Print smashed and leaked objects, if any. */ /* Clear the lists of such objects. */ GC_EXTERN void (*GC_check_heap)(void); /* Check that all objects in the heap with */ /* debugging info are intact. */ /* Add any that are not to GC_smashed list. */ GC_EXTERN void (*GC_print_all_smashed)(void); /* Print GC_smashed if it's not empty. */ /* Clear GC_smashed list. */ GC_EXTERN void (*GC_print_heap_obj)(ptr_t p); /* If possible print (using GC_err_printf) */ /* a more detailed description (terminated with */ /* "\n") of the object referred to by p. */ #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG) void GC_print_address_map(void); /* Print an address map of the process. */ #endif #ifndef SHORT_DBG_HDRS GC_EXTERN GC_bool GC_findleak_delay_free; /* Do not immediately deallocate object on */ /* free() in the leak-finding mode, just mark */ /* it as freed (and deallocate it after GC). */ GC_INNER GC_bool GC_check_leaked(ptr_t base); /* from dbg_mlc.c */ #endif #ifdef AO_HAVE_store GC_EXTERN volatile AO_t GC_have_errors; # define GC_SET_HAVE_ERRORS() AO_store(&GC_have_errors, (AO_t)TRUE) # define get_have_errors() ((GC_bool)AO_load(&GC_have_errors)) /* The barriers are not needed. */ #else GC_EXTERN GC_bool GC_have_errors; # define GC_SET_HAVE_ERRORS() (void)(GC_have_errors = TRUE) # define get_have_errors() GC_have_errors #endif /* We saw a smashed or leaked object. */ /* Call error printing routine */ /* occasionally. It is OK to read it */ /* without acquiring the lock. */ /* If set to true, it is never cleared. */ #define VERBOSE 2 #if !defined(NO_CLOCK) || !defined(SMALL_CONFIG) GC_EXTERN int GC_print_stats; /* Value 1 generates basic GC log; */ /* VERBOSE generates additional messages. */ #else /* SMALL_CONFIG */ # define GC_print_stats 0 /* Will this remove the message character strings from the executable? */ /* With a particular level of optimizations, it should... */ #endif #ifdef KEEP_BACK_PTRS GC_EXTERN long GC_backtraces; #endif #if defined(THREADS) || defined(LINT2) /* A trivial (linear congruential) pseudo-random numbers generator, */ /* safe for the concurrent usage. */ # define GC_RAND_MAX ((int)(~0U >> 1)) # if defined(AO_HAVE_store) && defined(THREAD_SANITIZER) # define GC_RAND_STATE_T volatile AO_t # define GC_RAND_NEXT(pseed) GC_rand_next(pseed) GC_INLINE int GC_rand_next(GC_RAND_STATE_T *pseed) { AO_t next = (AO_t)((AO_load(pseed) * 1103515245U + 12345) & (unsigned)GC_RAND_MAX); AO_store(pseed, next); return (int)next; } # else # define GC_RAND_STATE_T unsigned # define GC_RAND_NEXT(pseed) /* overflow and race are OK */ \ (int)(*(pseed) = (*(pseed) * 1103515245U + 12345) \ & (unsigned)GC_RAND_MAX) # endif #endif /* THREADS || LINT2 */ GC_EXTERN GC_bool GC_print_back_height; #ifdef MAKE_BACK_GRAPH void GC_print_back_graph_stats(void); #endif #ifdef THREADS /* Explicitly deallocate the object when we already hold lock. */ /* Only used for internally allocated objects. */ GC_INNER void GC_free_inner(void * p); #endif /* Macros used for collector internal allocation. */ /* These assume the collector lock is held. */ #ifdef DBG_HDRS_ALL GC_INNER void * GC_debug_generic_malloc_inner(size_t lb, int k, unsigned flags); # define GC_INTERNAL_MALLOC(lb, k) GC_debug_generic_malloc_inner(lb, k, 0) # define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(lb, k) \ GC_debug_generic_malloc_inner(lb, k, IGNORE_OFF_PAGE) # ifdef THREADS GC_INNER void GC_debug_free_inner(void * p); # define GC_INTERNAL_FREE GC_debug_free_inner # else # define GC_INTERNAL_FREE GC_debug_free # endif #else # define GC_INTERNAL_MALLOC(lb, k) GC_generic_malloc_inner(lb, k, 0) # define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(lb, k) \ GC_generic_malloc_inner(lb, k, IGNORE_OFF_PAGE) # ifdef THREADS # define GC_INTERNAL_FREE GC_free_inner # else # define GC_INTERNAL_FREE GC_free # endif #endif /* !DBG_HDRS_ALL */ #ifdef USE_MUNMAP /* Memory unmapping: */ GC_INNER void GC_unmap_old(unsigned threshold); GC_INNER void GC_merge_unmapped(void); GC_INNER void GC_unmap(ptr_t start, size_t bytes); GC_INNER void GC_remap(ptr_t start, size_t bytes); GC_INNER void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2, size_t bytes2); # ifndef NOT_GCBUILD /* Compute end address for an unmap operation on the indicated block. */ GC_INLINE ptr_t GC_unmap_end(ptr_t start, size_t bytes) { return (ptr_t)((word)(start + bytes) & ~(GC_page_size - 1)); } # endif #endif /* USE_MUNMAP */ #ifdef CAN_HANDLE_FORK GC_EXTERN int GC_handle_fork; /* Fork-handling mode: */ /* 0 means no fork handling requested (but client could */ /* anyway call fork() provided it is surrounded with */ /* GC_atfork_prepare/parent/child calls); */ /* -1 means GC tries to use pthread_at_fork if it is */ /* available (if it succeeds then GC_handle_fork value */ /* is changed to 1), client should nonetheless surround */ /* fork() with GC_atfork_prepare/parent/child (for the */ /* case of pthread_at_fork failure or absence); */ /* 1 (or other values) means client fully relies on */ /* pthread_at_fork (so if it is missing or failed then */ /* abort occurs in GC_init), GC_atfork_prepare and the */ /* accompanying routines are no-op in such a case. */ #endif #ifdef GC_DISABLE_INCREMENTAL # define GC_incremental FALSE # define GC_auto_incremental FALSE # define GC_manual_vdb FALSE # define GC_dirty(p) (void)(p) # define REACHABLE_AFTER_DIRTY(p) (void)(p) #else /* !GC_DISABLE_INCREMENTAL */ GC_EXTERN GC_bool GC_incremental; /* Using incremental/generational collection. */ /* Assumes dirty bits are being maintained. */ /* Virtual dirty bit implementation: */ /* Each implementation exports the following: */ GC_INNER void GC_read_dirty(GC_bool output_unneeded); /* Retrieve dirty bits. Set output_unneeded to */ /* indicate that reading of the retrieved dirty */ /* bits is not planned till the next retrieval. */ GC_INNER GC_bool GC_page_was_dirty(struct hblk *h); /* Read retrieved dirty bits. */ GC_INNER void GC_remove_protection(struct hblk *h, word nblocks, GC_bool pointerfree); /* h is about to be written or allocated. Ensure that */ /* it is not write protected by the virtual dirty bit */ /* implementation. I.e., this is a call that: */ /* - hints that [h, h+nblocks) is about to be written; */ /* - guarantees that protection is removed; */ /* - may speed up some dirty bit implementations; */ /* - may be essential if we need to ensure that */ /* pointer-free system call buffers in the heap are */ /* not protected. */ # if !defined(NO_VDB_FOR_STATIC_ROOTS) && !defined(PROC_VDB) GC_INNER GC_bool GC_is_vdb_for_static_roots(void); /* Is VDB working for static roots? */ # endif # ifdef CAN_HANDLE_FORK # if defined(PROC_VDB) || defined(SOFT_VDB) GC_INNER void GC_dirty_update_child(void); /* Update pid-specific resources (like /proc file */ /* descriptors) needed by the dirty bits implementation */ /* after fork in the child process. */ # else # define GC_dirty_update_child() (void)0 # endif # endif /* CAN_HANDLE_FORK */ GC_INNER GC_bool GC_dirty_init(void); /* Returns true if dirty bits are maintained (otherwise */ /* it is OK to be called again if the client invokes */ /* GC_enable_incremental once more). */ GC_EXTERN GC_bool GC_manual_vdb; /* The incremental collection is in the manual VDB */ /* mode. Assumes GC_incremental is true. Should not */ /* be modified once GC_incremental is set to true. */ # define GC_auto_incremental (GC_incremental && !GC_manual_vdb) GC_INNER void GC_dirty_inner(const void *p); /* does not require locking */ # define GC_dirty(p) (GC_manual_vdb ? GC_dirty_inner(p) : (void)0) # define REACHABLE_AFTER_DIRTY(p) GC_reachable_here(p) #endif /* !GC_DISABLE_INCREMENTAL */ /* Same as GC_base but excepts and returns a pointer to const object. */ #define GC_base_C(p) ((const void *)GC_base((/* no const */ void *)(p))) /* Debugging print routines: */ void GC_print_block_list(void); void GC_print_hblkfreelist(void); void GC_print_heap_sects(void); void GC_print_static_roots(void); #ifdef KEEP_BACK_PTRS GC_INNER void GC_store_back_pointer(ptr_t source, ptr_t dest); GC_INNER void GC_marked_for_finalization(ptr_t dest); # define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest) # define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest) #else # define GC_STORE_BACK_PTR(source, dest) (void)(source) # define GC_MARKED_FOR_FINALIZATION(dest) #endif /* !KEEP_BACK_PTRS */ /* Make arguments appear live to compiler */ void GC_noop6(word, word, word, word, word, word); #ifndef GC_ATTR_FORMAT_PRINTF # if GC_GNUC_PREREQ(3, 0) # define GC_ATTR_FORMAT_PRINTF(spec_argnum, first_checked) \ __attribute__((__format__(__printf__, spec_argnum, first_checked))) # else # define GC_ATTR_FORMAT_PRINTF(spec_argnum, first_checked) # endif #endif /* Logging and diagnostic output: */ /* GC_printf is used typically on client explicit print requests. */ /* For all GC_X_printf routines, it is recommended to put "\n" at */ /* 'format' string end (for output atomicity). */ GC_API_PRIV void GC_printf(const char * format, ...) GC_ATTR_FORMAT_PRINTF(1, 2); /* A version of printf that doesn't allocate, */ /* 1 KB total output length. */ /* (We use sprintf. Hopefully that doesn't */ /* allocate for long arguments.) */ GC_API_PRIV void GC_err_printf(const char * format, ...) GC_ATTR_FORMAT_PRINTF(1, 2); /* Basic logging routine. Typically, GC_log_printf is called directly */ /* only inside various DEBUG_x blocks. */ GC_API_PRIV void GC_log_printf(const char * format, ...) GC_ATTR_FORMAT_PRINTF(1, 2); #ifndef GC_ANDROID_LOG # define GC_PRINT_STATS_FLAG (GC_print_stats != 0) # define GC_INFOLOG_PRINTF GC_COND_LOG_PRINTF /* GC_verbose_log_printf is called only if GC_print_stats is VERBOSE. */ # define GC_verbose_log_printf GC_log_printf #else extern GC_bool GC_quiet; # define GC_PRINT_STATS_FLAG (!GC_quiet) /* INFO/DBG loggers are enabled even if GC_print_stats is off. */ # ifndef GC_INFOLOG_PRINTF # define GC_INFOLOG_PRINTF if (GC_quiet) {} else GC_info_log_printf # endif GC_INNER void GC_info_log_printf(const char *format, ...) GC_ATTR_FORMAT_PRINTF(1, 2); GC_INNER void GC_verbose_log_printf(const char *format, ...) GC_ATTR_FORMAT_PRINTF(1, 2); #endif /* GC_ANDROID_LOG */ #if defined(SMALL_CONFIG) || defined(GC_ANDROID_LOG) # define GC_ERRINFO_PRINTF GC_INFOLOG_PRINTF #else # define GC_ERRINFO_PRINTF GC_log_printf #endif /* Convenient macros for GC_[verbose_]log_printf invocation. */ #define GC_COND_LOG_PRINTF \ if (EXPECT(!GC_print_stats, TRUE)) {} else GC_log_printf #define GC_VERBOSE_LOG_PRINTF \ if (EXPECT(GC_print_stats != VERBOSE, TRUE)) {} else GC_verbose_log_printf #ifndef GC_DBGLOG_PRINTF # define GC_DBGLOG_PRINTF if (!GC_PRINT_STATS_FLAG) {} else GC_log_printf #endif void GC_err_puts(const char *s); /* Write s to stderr, don't buffer, don't add */ /* newlines, don't ... */ /* Handy macro for logging size values (of word type) in KiB (rounding */ /* to nearest value). */ #define TO_KiB_UL(v) ((unsigned long)(((v) + ((1 << 9) - 1)) >> 10)) GC_EXTERN unsigned GC_fail_count; /* How many consecutive GC/expansion failures? */ /* Reset by GC_allochblk(); defined in alloc.c. */ GC_EXTERN long GC_large_alloc_warn_interval; /* defined in misc.c */ GC_EXTERN signed_word GC_bytes_found; /* Number of reclaimed bytes after garbage collection; */ /* protected by GC lock; defined in reclaim.c. */ #ifndef GC_GET_HEAP_USAGE_NOT_NEEDED GC_EXTERN word GC_reclaimed_bytes_before_gc; /* Number of bytes reclaimed before this */ /* collection cycle; used for statistics only. */ #endif #ifdef USE_MUNMAP GC_EXTERN unsigned GC_unmap_threshold; /* defined in alloc.c */ GC_EXTERN GC_bool GC_force_unmap_on_gcollect; /* defined in misc.c */ #endif #ifdef MSWIN32 GC_EXTERN GC_bool GC_no_win32_dlls; /* defined in os_dep.c */ GC_EXTERN GC_bool GC_wnt; /* Is Windows NT derivative; */ /* defined and set in os_dep.c. */ #endif #ifdef THREADS # if (defined(MSWIN32) && !defined(CONSOLE_LOG)) || defined(MSWINCE) GC_EXTERN CRITICAL_SECTION GC_write_cs; /* defined in misc.c */ # ifdef GC_ASSERTIONS GC_EXTERN GC_bool GC_write_disabled; /* defined in win32_threads.c; */ /* protected by GC_write_cs. */ # endif # endif /* MSWIN32 || MSWINCE */ # if defined(GC_DISABLE_INCREMENTAL) || defined(HAVE_LOCKFREE_AO_OR) # define GC_acquire_dirty_lock() (void)0 # define GC_release_dirty_lock() (void)0 # else /* Acquire the spin lock we use to update dirty bits. */ /* Threads should not get stopped holding it. But we may */ /* acquire and release it during GC_remove_protection call. */ # define GC_acquire_dirty_lock() \ do { /* empty */ \ } while (AO_test_and_set_acquire(&GC_fault_handler_lock) == AO_TS_SET) # define GC_release_dirty_lock() AO_CLEAR(&GC_fault_handler_lock) GC_EXTERN volatile AO_TS_t GC_fault_handler_lock; /* defined in os_dep.c */ # endif # ifdef MSWINCE GC_EXTERN GC_bool GC_dont_query_stack_min; /* Defined and set in os_dep.c. */ # endif #elif defined(IA64) GC_EXTERN ptr_t GC_save_regs_ret_val; /* defined in mach_dep.c. */ /* Previously set to backing store pointer. */ #endif /* !THREADS */ #ifdef THREAD_LOCAL_ALLOC GC_EXTERN GC_bool GC_world_stopped; /* defined in alloc.c */ GC_INNER void GC_mark_thread_local_free_lists(void); #endif #if defined(GLIBC_2_19_TSX_BUG) && defined(GC_PTHREADS_PARAMARK) /* Parse string like [.[]] and return major value. */ GC_INNER int GC_parse_version(int *pminor, const char *pverstr); #endif #if defined(MPROTECT_VDB) && defined(GWW_VDB) GC_INNER GC_bool GC_gww_dirty_init(void); /* Returns TRUE if GetWriteWatch is available. */ /* May be called repeatedly. May be called */ /* with or without the GC lock held. */ #endif #if defined(CHECKSUMS) || defined(PROC_VDB) GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h); /* Could the page contain valid heap pointers? */ #endif #ifdef CHECKSUMS # if defined(MPROTECT_VDB) && !defined(DARWIN) void GC_record_fault(struct hblk * h); # endif void GC_check_dirty(void); #endif GC_INNER void GC_default_print_heap_obj_proc(ptr_t p); GC_INNER void GC_setpagesize(void); GC_INNER void GC_initialize_offsets(void); /* defined in obj_map.c */ GC_INNER void GC_bl_init(void); GC_INNER void GC_bl_init_no_interiors(void); /* defined in blacklst.c */ GC_INNER void GC_start_debugging_inner(void); /* defined in dbg_mlc.c. */ /* Should not be called if GC_debugging_started. */ /* Store debugging info into p. Return displaced pointer. */ /* Assumes we hold the allocation lock. */ GC_INNER void *GC_store_debug_info_inner(void *p, word sz, const char *str, int linenum); #ifdef REDIRECT_MALLOC # ifdef GC_LINUX_THREADS GC_INNER GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp); /* from os_dep.c */ # endif #elif defined(USE_WINALLOC) GC_INNER void GC_add_current_malloc_heap(void); #endif /* !REDIRECT_MALLOC */ #ifdef MAKE_BACK_GRAPH GC_INNER void GC_build_back_graph(void); GC_INNER void GC_traverse_back_graph(void); #endif #ifdef MSWIN32 GC_INNER void GC_init_win32(void); #endif #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) GC_INNER void * GC_roots_present(ptr_t); /* The type is a lie, since the real type doesn't make sense here, */ /* and we only test for NULL. */ #endif #ifdef GC_WIN32_THREADS GC_INNER void GC_get_next_stack(char *start, char * limit, char **lo, char **hi); # if defined(MPROTECT_VDB) && !defined(CYGWIN32) GC_INNER void GC_set_write_fault_handler(void); # endif # if defined(WRAP_MARK_SOME) && !defined(GC_PTHREADS) GC_INNER GC_bool GC_started_thread_while_stopped(void); /* Did we invalidate mark phase with an unexpected thread start? */ # endif #endif /* GC_WIN32_THREADS */ #if defined(GC_DARWIN_THREADS) && defined(MPROTECT_VDB) GC_INNER void GC_mprotect_stop(void); GC_INNER void GC_mprotect_resume(void); # ifndef GC_NO_THREADS_DISCOVERY GC_INNER void GC_darwin_register_self_mach_handler(void); # endif #endif #ifdef THREADS # ifndef GC_NO_FINALIZATION GC_INNER void GC_reset_finalizer_nested(void); GC_INNER unsigned char *GC_check_finalizer_nested(void); # endif GC_INNER void GC_do_blocking_inner(ptr_t data, void * context); GC_INNER void GC_push_all_stacks(void); # ifdef USE_PROC_FOR_LIBRARIES GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi); # endif # if (defined(HAVE_PTHREAD_ATTR_GET_NP) || defined(HAVE_PTHREAD_GETATTR_NP)) \ && defined(IA64) GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound); # endif #endif /* THREADS */ #ifdef DYNAMIC_LOADING GC_INNER GC_bool GC_register_main_static_data(void); # ifdef DARWIN GC_INNER void GC_init_dyld(void); # endif #endif /* DYNAMIC_LOADING */ #ifdef SEARCH_FOR_DATA_START GC_INNER void GC_init_linux_data_start(void); void * GC_find_limit(void *, int); #endif #ifdef UNIX_LIKE GC_INNER void GC_set_and_save_fault_handler(void (*handler)(int)); #endif #ifdef NEED_PROC_MAPS # if defined(DYNAMIC_LOADING) && defined(USE_PROC_FOR_LIBRARIES) GC_INNER const char *GC_parse_map_entry(const char *maps_ptr, ptr_t *start, ptr_t *end, const char **prot, unsigned *maj_dev, const char **mapping_name); # endif # if defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR) GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr, ptr_t *startp, ptr_t *endp); # endif GC_INNER const char *GC_get_maps(void); #endif /* NEED_PROC_MAPS */ #ifdef GC_ASSERTIONS GC_INNER word GC_compute_large_free_bytes(void); GC_INNER word GC_compute_root_size(void); #endif /* Check a compile time assertion at compile time. */ #if _MSC_VER >= 1700 # define GC_STATIC_ASSERT(expr) \ static_assert(expr, "static assertion failed: " #expr) #elif defined(static_assert) && !defined(CPPCHECK) \ && (__STDC_VERSION__ >= 201112L) # define GC_STATIC_ASSERT(expr) static_assert(expr, #expr) #elif defined(mips) && !defined(__GNUC__) && !defined(CPPCHECK) /* DOB: MIPSPro C gets an internal error taking the sizeof an array type. This code works correctly (ugliness is to avoid "unused var" warnings) */ # define GC_STATIC_ASSERT(expr) \ do { if (0) { char j[(expr)? 1 : -1]; j[0]='\0'; j[0]=j[0]; } } while(0) #else /* The error message for failure is a bit baroque, but ... */ # define GC_STATIC_ASSERT(expr) (void)sizeof(char[(expr)? 1 : -1]) #endif /* Runtime check for an argument declared as non-null is actually not null. */ #if GC_GNUC_PREREQ(4, 0) /* Workaround tautological-pointer-compare Clang warning. */ # define NONNULL_ARG_NOT_NULL(arg) (*(volatile void **)&(arg) != NULL) #else # define NONNULL_ARG_NOT_NULL(arg) (NULL != (arg)) #endif #define COND_DUMP_CHECKS \ do { \ GC_ASSERT(I_HOLD_LOCK()); \ GC_ASSERT(GC_compute_large_free_bytes() == GC_large_free_bytes); \ GC_ASSERT(GC_compute_root_size() == GC_root_size); \ } while (0) #ifndef NO_DEBUGGING GC_EXTERN GC_bool GC_dump_regularly; /* Generate regular debugging dumps. */ # define COND_DUMP if (EXPECT(GC_dump_regularly, FALSE)) { \ GC_dump_named(NULL); \ } else COND_DUMP_CHECKS #else # define COND_DUMP COND_DUMP_CHECKS #endif #if defined(PARALLEL_MARK) /* We need additional synchronization facilities from the thread */ /* support. We believe these are less performance critical */ /* than the main garbage collector lock; standard pthreads-based */ /* implementations should be sufficient. */ # define GC_markers_m1 GC_parallel /* Number of mark threads we would like to have */ /* excluding the initiating thread. */ GC_EXTERN GC_bool GC_parallel_mark_disabled; /* A flag to temporarily avoid parallel marking.*/ /* The mark lock and condition variable. If the GC lock is also */ /* acquired, the GC lock must be acquired first. The mark lock is */ /* used to both protect some variables used by the parallel */ /* marker, and to protect GC_fl_builder_count, below. */ /* GC_notify_all_marker() is called when */ /* the state of the parallel marker changes */ /* in some significant way (see gc_mark.h for details). The */ /* latter set of events includes incrementing GC_mark_no. */ /* GC_notify_all_builder() is called when GC_fl_builder_count */ /* reaches 0. */ GC_INNER void GC_wait_for_markers_init(void); GC_INNER void GC_acquire_mark_lock(void); GC_INNER void GC_release_mark_lock(void); GC_INNER void GC_notify_all_builder(void); GC_INNER void GC_wait_for_reclaim(void); GC_EXTERN signed_word GC_fl_builder_count; /* Protected by mark lock. */ GC_INNER void GC_notify_all_marker(void); GC_INNER void GC_wait_marker(void); GC_EXTERN word GC_mark_no; /* Protected by mark lock. */ GC_INNER void GC_help_marker(word my_mark_no); /* Try to help out parallel marker for mark cycle */ /* my_mark_no. Returns if the mark cycle finishes or */ /* was already done, or there was nothing to do for */ /* some other reason. */ GC_INNER void GC_start_mark_threads_inner(void); #endif /* PARALLEL_MARK */ #if defined(SIGNAL_BASED_STOP_WORLD) && !defined(SIG_SUSPEND) /* We define the thread suspension signal here, so that we can refer */ /* to it in the dirty bit implementation, if necessary. Ideally we */ /* would allocate a (real-time?) signal using the standard mechanism. */ /* unfortunately, there is no standard mechanism. (There is one */ /* in Linux glibc, but it's not exported.) Thus we continue to use */ /* the same hard-coded signals we've always used. */ # ifdef THREAD_SANITIZER /* Unfortunately, use of an asynchronous signal to suspend threads */ /* leads to the situation when the signal is not delivered (is */ /* stored to pending_signals in TSan runtime actually) while the */ /* destination thread is blocked in pthread_mutex_lock. Thus, we */ /* use some synchronous one instead (which is again unlikely to be */ /* used by clients directly). */ # define SIG_SUSPEND SIGSYS # elif (defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)) \ && !defined(GC_USESIGRT_SIGNALS) # if defined(SPARC) && !defined(SIGPWR) /* SPARC/Linux doesn't properly define SIGPWR in . */ /* It is aliased to SIGLOST in asm/signal.h, though. */ # define SIG_SUSPEND SIGLOST # else /* Linuxthreads itself uses SIGUSR1 and SIGUSR2. */ # define SIG_SUSPEND SIGPWR # endif # elif defined(GC_FREEBSD_THREADS) && defined(__GLIBC__) \ && !defined(GC_USESIGRT_SIGNALS) # define SIG_SUSPEND (32+6) # elif (defined(GC_FREEBSD_THREADS) || defined(HURD) || defined(RTEMS)) \ && !defined(GC_USESIGRT_SIGNALS) # define SIG_SUSPEND SIGUSR1 /* SIGTSTP and SIGCONT could be used alternatively on FreeBSD. */ # elif defined(GC_OPENBSD_THREADS) && !defined(GC_USESIGRT_SIGNALS) # define SIG_SUSPEND SIGXFSZ # elif defined(_SIGRTMIN) && !defined(CPPCHECK) # define SIG_SUSPEND _SIGRTMIN + 6 # else # define SIG_SUSPEND SIGRTMIN + 6 # endif #endif /* GC_PTHREADS && !SIG_SUSPEND */ #if defined(GC_PTHREADS) && !defined(GC_SEM_INIT_PSHARED) # define GC_SEM_INIT_PSHARED 0 #endif /* Some macros for setjmp that works across signal handlers */ /* were possible, and a couple of routines to facilitate */ /* catching accesses to bad addresses when that's */ /* possible/needed. */ #if (defined(UNIX_LIKE) || (defined(NEED_FIND_LIMIT) && defined(CYGWIN32))) \ && !defined(GC_NO_SIGSETJMP) # if defined(SUNOS5SIGS) && !defined(FREEBSD) && !defined(LINUX) EXTERN_C_END # include EXTERN_C_BEGIN # endif /* Define SETJMP and friends to be the version that restores */ /* the signal mask. */ # define SETJMP(env) sigsetjmp(env, 1) # define LONGJMP(env, val) siglongjmp(env, val) # define JMP_BUF sigjmp_buf #else # ifdef ECOS # define SETJMP(env) hal_setjmp(env) # else # define SETJMP(env) setjmp(env) # endif # define LONGJMP(env, val) longjmp(env, val) # define JMP_BUF jmp_buf #endif /* !UNIX_LIKE || GC_NO_SIGSETJMP */ #if defined(DATASTART_USES_BSDGETDATASTART) EXTERN_C_END # include EXTERN_C_BEGIN GC_INNER ptr_t GC_FreeBSDGetDataStart(size_t, ptr_t); # define DATASTART_IS_FUNC #endif /* DATASTART_USES_BSDGETDATASTART */ #if defined(NEED_FIND_LIMIT) \ || (defined(WRAP_MARK_SOME) && defined(NO_SEH_AVAILABLE)) \ || (defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS)) GC_EXTERN JMP_BUF GC_jmp_buf; /* Set up a handler for address faults which will longjmp to */ /* GC_jmp_buf. */ GC_INNER void GC_setup_temporary_fault_handler(void); /* Undo the effect of GC_setup_temporary_fault_handler. */ GC_INNER void GC_reset_fault_handler(void); #endif /* NEED_FIND_LIMIT || USE_PROC_FOR_LIBRARIES || WRAP_MARK_SOME */ /* Some convenience macros for cancellation support. */ #if defined(CANCEL_SAFE) # if defined(GC_ASSERTIONS) \ && (defined(USE_COMPILER_TLS) \ || (defined(LINUX) && !defined(ARM32) && GC_GNUC_PREREQ(3, 3) \ || defined(HPUX) /* and probably others ... */)) extern __thread unsigned char GC_cancel_disable_count; # define NEED_CANCEL_DISABLE_COUNT # define INCR_CANCEL_DISABLE() ++GC_cancel_disable_count # define DECR_CANCEL_DISABLE() --GC_cancel_disable_count # define ASSERT_CANCEL_DISABLED() GC_ASSERT(GC_cancel_disable_count > 0) # else # define INCR_CANCEL_DISABLE() # define DECR_CANCEL_DISABLE() # define ASSERT_CANCEL_DISABLED() (void)0 # endif /* GC_ASSERTIONS & ... */ # define DISABLE_CANCEL(state) \ do { pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state); \ INCR_CANCEL_DISABLE(); } while (0) # define RESTORE_CANCEL(state) \ do { ASSERT_CANCEL_DISABLED(); \ pthread_setcancelstate(state, NULL); \ DECR_CANCEL_DISABLE(); } while (0) #else /* !CANCEL_SAFE */ # define DISABLE_CANCEL(state) (void)0 # define RESTORE_CANCEL(state) (void)0 # define ASSERT_CANCEL_DISABLED() (void)0 #endif /* !CANCEL_SAFE */ EXTERN_C_END #endif /* GC_PRIVATE_H */