summaryrefslogtreecommitdiff
path: root/pthread_support.c
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2017-10-20 00:25:52 +0300
committerIvan Maidanski <ivmai@mail.ru>2017-10-20 00:25:52 +0300
commitb850e1c9bb460d54d7a5e1131fe78f3474700251 (patch)
treef27444823a95c261142249fca0a4ba7f57dbfd68 /pthread_support.c
parent243ab168e4237aa4e48e74949d1995282616be7b (diff)
downloadbdwgc-b850e1c9bb460d54d7a5e1131fe78f3474700251.tar.gz
Workaround Thread Sanitizer (TSan) false positive warnings (partially)
This patch covers only data race false positive warnings reported in async_set_pht_entry_from_index, GC_clear_stack, GC_invoke_finalizers, GC_lock, GC_noop1, GC_notify_or_invoke_finalizers, GC_pthread_create, GC_suspend_handler_inner, I_DONT_HOLD_LOCK. * finalize.c (GC_should_invoke_finalizers): Add GC_ATTR_NO_SANITIZE_THREAD. * mark.c (GC_noop1): Likewise. * os_dep.c [MPROTECT_VDB && THREADS && AO_HAVE_test_and_set_acquire] (async_set_pht_entry_from_index): Likewise. * finalize.c (GC_invoke_finalizers, GC_notify_or_invoke_finalizers): Call GC_should_invoke_finalizers() instead of GC_fnlz_roots.finalize_now!=NULL. * include/private/gc_locks.h [(GC_WIN32_THREADS && !USE_PTHREAD_LOCKS || GC_PTHREADS) && GC_ASSERTIONS && THREAD_SANITIZER] (I_DONT_HOLD_LOCK): Define to TRUE; add comment. * include/private/gc_locks.h [!GC_ALWAYS_MULTITHREADED && THREAD_SANITIZER] (set_need_to_lock): Do not set GC_need_to_lock if already set; add comment. * include/private/gc_priv.h [!GC_ATTR_NO_SANITIZE_THREAD] (GC_ATTR_NO_SANITIZE_THREAD): New macro. * include/private/gcconfig.h [__has_feature && __has_feature(thread_sanitizer)] (THREAD_SANITIZER): Define. * misc.c [THREADS] (next_random_no): New static function (with GC_ATTR_NO_SANITIZE_THREAD). * pthread_stop_world.c [!GC_OPENBSD_UTHREADS && !NACL] (update_last_stop_count): Likewise. * pthread_support.c [THREAD_SANITIZER && (USE_SPIN_LOCK || !NO_PTHREAD_TRYLOCK)] (is_collecting): Likewise. * pthread_support.c [USE_SPIN_LOCK] (set_last_spins_and_high_spin_max, reset_spin_max): Likewise. * misc.c [THREADS] (GC_clear_stack): Remove random_no static variable; use next_random_no() instead of ++random_no%13. * pthread_stop_world.c [!GC_OPENBSD_UTHREADS && !NACL] (GC_suspend_handler_inner): Call update_last_stop_count() instead of me->stop_info.last_stop_count=my_stop_count. * pthread_support.c [USE_SPIN_LOCK || !NO_PTHREAD_TRYLOCK] (SPIN_MAX): Define only if not yet. * pthread_support.c [USE_SPIN_LOCK || !NO_PTHREAD_TRYLOCK] (GC_collecting): Initialize to FALSE instead of 0. * pthread_support.c [!(THREAD_SANITIZER && (USE_SPIN_LOCK || !NO_PTHREAD_TRYLOCK))] (is_collecting): Define as a macro. * pthread_support.c [USE_SPIN_LOCK] (low_spin_max, high_spin_max, spin_max, last_spins): Move definition out of GC_lock(). * pthread_support.c (GC_lock): Use is_collecting(), set_last_spins_and_high_spin_max() and reset_spin_max().
Diffstat (limited to 'pthread_support.c')
-rw-r--r--pthread_support.c54
1 files changed, 43 insertions, 11 deletions
diff --git a/pthread_support.c b/pthread_support.c
index 9da80142..659d603b 100644
--- a/pthread_support.c
+++ b/pthread_support.c
@@ -1884,10 +1884,12 @@ STATIC void GC_pause(void)
}
#endif
-#define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */
+#ifndef SPIN_MAX
+# define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */
/* give up. */
+#endif
-GC_INNER volatile GC_bool GC_collecting = 0;
+GC_INNER volatile GC_bool GC_collecting = FALSE;
/* A hint that we're in the collector and */
/* holding the allocation lock for an */
/* extended period. */
@@ -1955,6 +1957,18 @@ STATIC void GC_generic_lock(pthread_mutex_t * lock)
#endif /* !USE_SPIN_LOCK || ... */
+#if defined(THREAD_SANITIZER) \
+ && (defined(USE_SPIN_LOCK) || !defined(NO_PTHREAD_TRYLOCK))
+ /* GC_collecting is a hint, a potential data race between */
+ /* GC_lock() and ENTER/EXIT_GC() is OK to ignore. */
+ static GC_bool is_collecting(void) GC_ATTR_NO_SANITIZE_THREAD
+ {
+ return GC_collecting;
+ }
+#else
+# define is_collecting() GC_collecting
+#endif
+
#if defined(USE_SPIN_LOCK)
/* Reasonably fast spin locks. Basically the same implementation */
@@ -1963,13 +1977,30 @@ STATIC void GC_generic_lock(pthread_mutex_t * lock)
GC_INNER volatile AO_TS_t GC_allocate_lock = AO_TS_INITIALIZER;
+# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
+# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
+ static unsigned spin_max = low_spin_max;
+ static unsigned last_spins = 0;
+
+ /* A potential data race between threads invoking GC_lock which reads */
+ /* and updates spin_max and last_spins could be ignored because these */
+ /* variables are hints only. (Atomic getters and setters are avoided */
+ /* here for performance reasons.) */
+ static void set_last_spins_and_high_spin_max(unsigned new_last_spins)
+ GC_ATTR_NO_SANITIZE_THREAD
+ {
+ last_spins = new_last_spins;
+ spin_max = high_spin_max;
+ }
+
+ static void reset_spin_max(void) GC_ATTR_NO_SANITIZE_THREAD
+ {
+ spin_max = low_spin_max;
+ }
+
GC_INNER void GC_lock(void)
{
-# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
-# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
- static unsigned spin_max = low_spin_max;
unsigned my_spin_max;
- static unsigned last_spins = 0;
unsigned my_last_spins;
unsigned i;
@@ -1979,7 +2010,8 @@ GC_INNER void GC_lock(void)
my_spin_max = spin_max;
my_last_spins = last_spins;
for (i = 0; i < my_spin_max; i++) {
- if (GC_collecting || GC_nprocs == 1) goto yield;
+ if (is_collecting() || GC_nprocs == 1)
+ goto yield;
if (i < my_last_spins/2) {
GC_pause();
continue;
@@ -1991,13 +2023,12 @@ GC_INNER void GC_lock(void)
* against the other process with which we were contending.
* Thus it makes sense to spin longer the next time.
*/
- last_spins = i;
- spin_max = high_spin_max;
+ set_last_spins_and_high_spin_max(i);
return;
}
}
/* We are probably being scheduled against the other process. Sleep. */
- spin_max = low_spin_max;
+ reset_spin_max();
yield:
for (i = 0;; ++i) {
if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
@@ -2026,10 +2057,11 @@ yield:
}
#else /* !USE_SPIN_LOCK */
+
GC_INNER void GC_lock(void)
{
#ifndef NO_PTHREAD_TRYLOCK
- if (1 == GC_nprocs || GC_collecting) {
+ if (1 == GC_nprocs || is_collecting()) {
pthread_mutex_lock(&GC_allocate_ml);
} else {
GC_generic_lock(&GC_allocate_ml);