summaryrefslogtreecommitdiff
path: root/boehm-gc/linux_threads.c
diff options
context:
space:
mode:
authortromey <tromey@138bc75d-0d04-0410-961f-82ee72b054a4>2001-08-18 01:04:43 +0000
committertromey <tromey@138bc75d-0d04-0410-961f-82ee72b054a4>2001-08-18 01:04:43 +0000
commit8fee6c5ba203062646a84ef04770c79ba2be0a6a (patch)
tree0a038d94cc4acd44c0926f011247f6b7969fa84b /boehm-gc/linux_threads.c
parent75ae025532a15d2842c5401959ef6775e3ebe550 (diff)
downloadgcc-8fee6c5ba203062646a84ef04770c79ba2be0a6a.tar.gz
* Makefile.am, acinclude.m4, configure.in: Imported GC 6.0 and
merged local changes. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@44994 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'boehm-gc/linux_threads.c')
-rw-r--r--boehm-gc/linux_threads.c204
1 files changed, 127 insertions, 77 deletions
diff --git a/boehm-gc/linux_threads.c b/boehm-gc/linux_threads.c
index 37537bcef3e..8ab140b6b38 100644
--- a/boehm-gc/linux_threads.c
+++ b/boehm-gc/linux_threads.c
@@ -55,6 +55,12 @@
|| defined(GC_OSF1_THREADS) || defined(OSF1_THREADS) \
# include "private/gc_priv.h"
+
+# if defined(HPUX_THREADS) && !defined(USE_PTHREAD_SPECIFIC) \
+ && !defined(USE_HPUX_TLS)
+# define USE_HPUX_TLS
+# endif
+
# ifdef THREAD_LOCAL_ALLOC
# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_HPUX_TLS)
# include "private/specific.h"
@@ -161,15 +167,16 @@ typedef struct GC_Thread_Rep {
# ifdef THREAD_LOCAL_ALLOC
# if CPP_WORDSZ == 64 && defined(ALIGN_DOUBLE)
# define GRANULARITY 16
-# define NFREELISTS 48
+# define NFREELISTS 49
# else
# define GRANULARITY 8
-# define NFREELISTS 64
+# define NFREELISTS 65
# endif
- /* The ith free list corresponds to size (i+1)*GRANULARITY */
-# define INDEX_FROM_BYTES(n) (ADD_SLOP(n) - 1)/GRANULARITY
-# define BYTES_FROM_INDEX(i) (((i) + 1) * GRANULARITY - EXTRA_BYTES)
-# define SMALL_ENOUGH(bytes) (ADD_SLOP(bytes) <= NFREELISTS*GRANULARITY)
+ /* The ith free list corresponds to size i*GRANULARITY */
+# define INDEX_FROM_BYTES(n) ((ADD_SLOP(n) + GRANULARITY - 1)/GRANULARITY)
+# define BYTES_FROM_INDEX(i) ((i) * GRANULARITY - EXTRA_BYTES)
+# define SMALL_ENOUGH(bytes) (ADD_SLOP(bytes) <= \
+ (NFREELISTS-1)*GRANULARITY)
ptr_t ptrfree_freelists[NFREELISTS];
ptr_t normal_freelists[NFREELISTS];
# ifdef GC_GCJ_SUPPORT
@@ -194,12 +201,12 @@ typedef struct GC_Thread_Rep {
GC_thread GC_lookup_thread(pthread_t id);
-static GC_bool fully_initialized = FALSE;
+static GC_bool parallel_initialized = FALSE;
# if defined(__GNUC__)
- void GC_full_init() __attribute__ ((constructor));
+ void GC_init_parallel() __attribute__ ((constructor));
# else
- void GC_full_init();
+ void GC_init_parallel();
# endif
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
@@ -223,8 +230,8 @@ static void return_freelists(ptr_t *fl, ptr_t *gfl)
ptr_t q, *qptr;
size_t nwords;
- for (i = 0; i < NFREELISTS; ++i) {
- nwords = (i + 1) * (GRANULARITY/sizeof(word));
+ for (i = 1; i < NFREELISTS; ++i) {
+ nwords = i * (GRANULARITY/sizeof(word));
qptr = fl + i;
q = *qptr;
if ((word)q < HBLKSIZE) continue;
@@ -243,6 +250,12 @@ static void return_freelists(ptr_t *fl, ptr_t *gfl)
}
}
+/* We statically allocate a single "size 0" object. It is linked to */
+/* itself, and is thus repeatedly reused for all size 0 allocation */
+/* requests. (Size 0 gcj allocation requests are incorrect, and */
+/* we arrange for those to fault asap.) */
+static ptr_t size_zero_object = (ptr_t)(&size_zero_object);
+
/* Each thread structure must be initialized. */
/* This call must be made from the new thread. */
/* Caller holds allocation lock. */
@@ -259,13 +272,19 @@ void GC_init_thread_local(GC_thread p)
if (0 != GC_setspecific(GC_thread_key, p)) {
ABORT("Failed to set thread specific allocation pointers");
}
- for (i = 0; i < NFREELISTS; ++i) {
+ for (i = 1; i < NFREELISTS; ++i) {
p -> ptrfree_freelists[i] = (ptr_t)1;
p -> normal_freelists[i] = (ptr_t)1;
# ifdef GC_GCJ_SUPPORT
p -> gcj_freelists[i] = (ptr_t)1;
# endif
}
+ /* Set up the size 0 free lists. */
+ p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
+ p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
+# ifdef GC_GCJ_SUPPORT
+ p -> gcj_freelists[0] = (ptr_t)(-1);
+# endif
}
#ifdef GC_GCJ_SUPPORT
@@ -303,7 +322,7 @@ GC_PTR GC_local_malloc(size_t bytes)
/* This can happen if we get called when the world is */
/* being initialized. Whether we can actually complete */
/* the initialization then is unclear. */
- GC_full_init();
+ GC_init_parallel();
k = GC_thread_key;
}
# endif
@@ -326,10 +345,8 @@ GC_PTR GC_local_malloc(size_t bytes)
*my_fl = my_entry + index + 1;
return GC_malloc(bytes);
} else {
- my_entry = GC_generic_malloc_many(BYTES_FROM_INDEX(index),
- NORMAL);
- *my_fl = my_entry;
- if (my_entry == 0) return GC_oom_fn(bytes);
+ GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
+ if (*my_fl == 0) return GC_oom_fn(bytes);
return GC_local_malloc(bytes);
}
}
@@ -352,10 +369,11 @@ GC_PTR GC_local_malloc_atomic(size_t bytes)
*my_fl = my_entry + index + 1;
return GC_malloc_atomic(bytes);
} else {
- my_entry = GC_generic_malloc_many(BYTES_FROM_INDEX(index),
- PTRFREE);
- *my_fl = my_entry;
- if (my_entry == 0) return GC_oom_fn(bytes);
+ GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
+ /* *my_fl is updated while the collector is excluded; */
+ /* the free list is always visible to the collector as */
+ /* such. */
+ if (*my_fl == 0) return GC_oom_fn(bytes);
return GC_local_malloc_atomic(bytes);
}
}
@@ -390,18 +408,22 @@ GC_PTR GC_local_gcj_malloc(size_t bytes,
/* allocation of the next object, but to see this object */
/* still containing a free list pointer. Otherwise the */
/* marker might find a random "mark descriptor". */
- *my_fl = obj_link(my_entry);
- *(void **)result = ptr_to_struct_containing_descr;
+ *(volatile ptr_t *)my_fl = obj_link(my_entry);
+ /* We must update the freelist before we store the pointer. */
+ /* Otherwise a GC at this point would see a corrupted */
+ /* free list. */
+ /* A memory barrier is probably never needed, since the */
+ /* action of stopping this thread will cause prior writes */
+ /* to complete. */
+ *(void * volatile *)result = ptr_to_struct_containing_descr;
return result;
} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
*my_fl = my_entry + index + 1;
return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
} else {
- my_entry = GC_generic_malloc_many(BYTES_FROM_INDEX(index),
- GC_gcj_kind);
- *my_fl = my_entry;
- if (my_entry == 0) return GC_oom_fn(bytes);
- return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
+ GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
+ if (*my_fl == 0) return GC_oom_fn(bytes);
+ return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
}
}
}
@@ -415,22 +437,16 @@ GC_PTR GC_local_gcj_malloc(size_t bytes,
# endif /* !THREAD_LOCAL_ALLOC */
/*
- * The only way to suspend threads given the pthread interface is to send
- * signals. We can't use SIGSTOP directly, because we need to get the
- * thread to save its stack pointer in the GC thread table before
- * suspending. So we have to reserve a signal of our own for this.
- * This means we have to intercept client calls to change the signal mask.
- * The linuxthreads package already uses SIGUSR1 and SIGUSR2,
- * so we need to reuse something else. I chose SIGPWR.
- * (Perhaps SIGUNUSED would be a better choice.)
+ * We use signals to stop threads during GC.
+ *
+ * Suspended threads wait in signal handler for SIG_THR_RESTART.
+ * That's more portable than semaphores or condition variables.
+ * (We do use sem_post from a signal handler, but that should be portable.)
+ *
+ * The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
+ * Note that we can't just stop a thread; we need it to save its stack
+ * pointer(s) and acknowledge.
*/
-#ifndef SIG_SUSPEND
-# if defined(HPUX_THREADS) || defined(GC_OSF1_THREADS)
-# define SIG_SUSPEND _SIGRTMIN + 6
-# else
-# define SIG_SUSPEND SIGPWR
-# endif
-#endif
#ifndef SIG_THR_RESTART
# if defined(HPUX_THREADS) || defined(GC_OSF1_THREADS)
@@ -440,12 +456,6 @@ GC_PTR GC_local_gcj_malloc(size_t bytes,
# endif
#endif
-/* SPARC/Linux doesn't properly define SIGPWR in <signal.h>.
- * It is aliased to SIGLOST in asm/signal.h, though. */
-#if defined(SPARC) && !defined(SIGPWR)
-# define SIGPWR SIGLOST
-#endif
-
sem_t GC_suspend_ack_sem;
#if !defined(HPUX_THREADS) && !defined(GC_OSF1_THREADS)
@@ -654,6 +664,34 @@ void GC_push_thread_structures GC_PROTO((void))
GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
}
+#ifdef THREAD_LOCAL_ALLOC
+/* We must explicitly mark ptrfree and gcj free lists, since the free */
+/* list links wouldn't otherwise be found. We also set them in the */
+/* normal free lists, since that involves touching less memory than if */
+/* we scanned them normally. */
+void GC_mark_thread_local_free_lists(void)
+{
+ int i, j;
+ GC_thread p;
+ ptr_t q;
+
+ for (i = 0; i < THREAD_TABLE_SZ; ++i) {
+ for (p = GC_threads[i]; 0 != p; p = p -> next) {
+ for (j = 1; j < NFREELISTS; ++j) {
+ q = p -> ptrfree_freelists[j];
+ if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
+ q = p -> normal_freelists[j];
+ if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
+# ifdef GC_GCJ_SUPPORT
+ q = p -> gcj_freelists[j];
+ if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
+# endif /* GC_GCJ_SUPPORT */
+ }
+ }
+ }
+}
+#endif /* THREAD_LOCAL_ALLOC */
+
/* Add a thread to GC_threads. We assume it wasn't already there. */
/* Caller holds allocation lock. */
GC_thread GC_new_thread(pthread_t id)
@@ -752,7 +790,6 @@ void GC_stop_world()
GC_stopping_thread = my_thread; /* debugging only. */
GC_stopping_pid = getpid(); /* debugging only. */
-
/* Make sure all free list construction has stopped before we start. */
/* No new construction can start, since free list construction is */
/* required to acquire and release the GC lock before it starts, */
@@ -795,6 +832,7 @@ void GC_stop_world()
#if DEBUG_THREADS
GC_printf1("World stopped 0x%x\n", pthread_self());
#endif
+ GC_stopping_thread = 0; /* debugging only */
}
/* Caller holds allocation lock, and has held it continuously since */
@@ -1072,9 +1110,12 @@ void GC_thr_init()
/* may require allocation. */
/* Called as constructor without allocation lock. */
/* Must be called before a second thread is created. */
-void GC_full_init()
+/* Called without allocation lock. */
+void GC_init_parallel()
{
- if (fully_initialized) return;
+ if (parallel_initialized) return;
+ parallel_initialized = TRUE;
+ /* GC_init() calls us back, so set flag first. */
if (!GC_is_initialized) GC_init();
/* If we are using a parallel marker, start the helper threads. */
# ifdef PARALLEL_MARK
@@ -1086,7 +1127,6 @@ void GC_full_init()
GC_init_thread_local(GC_lookup_thread(pthread_self()));
UNLOCK();
# endif
- fully_initialized = TRUE;
}
@@ -1327,7 +1367,7 @@ WRAP_FUNC(pthread_create)(pthread_t *new_thread,
LOCK();
si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info), NORMAL);
UNLOCK();
- if (!fully_initialized) GC_full_init();
+ if (!parallel_initialized) GC_init_parallel();
if (0 == si) return(ENOMEM);
sem_init(&(si -> registered), 0, 0);
si -> start_routine = start_routine;
@@ -1539,7 +1579,7 @@ void GC_lock()
#endif /* !USE_SPINLOCK */
-#ifdef PARALLEL_MARK
+#if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
#ifdef GC_ASSERTIONS
pthread_t GC_mark_lock_holder = NO_THREAD;
@@ -1559,8 +1599,6 @@ void GC_lock()
static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
#endif
-static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
-
static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
void GC_acquire_mark_lock()
@@ -1587,13 +1625,18 @@ void GC_release_mark_lock()
}
}
-void GC_wait_marker()
+/* Collector must wait for a freelist builders for 2 reasons: */
+/* 1) Mark bits may still be getting examined without lock. */
+/* 2) Partial free lists referenced only by locals may not be scanned */
+/* correctly, e.g. if they contain "pointer-free" objects, since the */
+/* free-list link may be ignored. */
+void GC_wait_builder()
{
GC_ASSERT(GC_mark_lock_holder == pthread_self());
# ifdef GC_ASSERTIONS
GC_mark_lock_holder = NO_THREAD;
# endif
- if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
+ if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
ABORT("pthread_cond_wait failed");
}
GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
@@ -1602,13 +1645,36 @@ void GC_wait_marker()
# endif
}
-void GC_wait_builder()
+void GC_wait_for_reclaim()
+{
+ GC_acquire_mark_lock();
+ while (GC_fl_builder_count > 0) {
+ GC_wait_builder();
+ }
+ GC_release_mark_lock();
+}
+
+void GC_notify_all_builder()
+{
+ GC_ASSERT(GC_mark_lock_holder == pthread_self());
+ if (pthread_cond_broadcast(&builder_cv) != 0) {
+ ABORT("pthread_cond_broadcast failed");
+ }
+}
+
+#endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
+
+#ifdef PARALLEL_MARK
+
+static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
+
+void GC_wait_marker()
{
GC_ASSERT(GC_mark_lock_holder == pthread_self());
# ifdef GC_ASSERTIONS
GC_mark_lock_holder = NO_THREAD;
# endif
- if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
+ if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
ABORT("pthread_cond_wait failed");
}
GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
@@ -1624,22 +1690,6 @@ void GC_notify_all_marker()
}
}
-void GC_notify_all_builder()
-{
- GC_ASSERT(GC_mark_lock_holder == pthread_self());
- if (pthread_cond_broadcast(&builder_cv) != 0) {
- ABORT("pthread_cond_broadcast failed");
- }
-}
-
-void GC_wait_for_reclaim()
-{
- GC_acquire_mark_lock();
- while (GC_fl_builder_count > 0) {
- GC_wait_builder();
- }
- GC_release_mark_lock();
-}
#endif /* PARALLEL_MARK */
# endif /* LINUX_THREADS */