summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorzhxchen17 <zhxchen17@fb.com>2019-08-14 16:10:09 -0700
committerQi Wang <interwq@gmail.com>2019-09-13 09:23:57 -0700
commitb7c7df24ba7c3b76b4985084de6e20356b26547e (patch)
tree56a53786ebd337ecda981aadf178e38476edc379
parent4b76c684bb8d7f0b7960bfac84391e9fd51a234e (diff)
downloadjemalloc-b7c7df24ba7c3b76b4985084de6e20356b26547e.tar.gz
Add max_per_bg_thd stats for per background thread mutexes.
Added a new stats row to aggregate the maximum value of mutex counters for each background threads. Given that the per bg thd mutex is not expected to be contended, this counter is mainly for sanity check / debugging.
-rw-r--r--include/jemalloc/internal/background_thread_structs.h1
-rw-r--r--include/jemalloc/internal/mutex.h53
-rw-r--r--include/jemalloc/internal/mutex_prof.h1
-rw-r--r--src/background_thread.c8
-rw-r--r--src/ctl.c3
5 files changed, 55 insertions, 11 deletions
diff --git a/include/jemalloc/internal/background_thread_structs.h b/include/jemalloc/internal/background_thread_structs.h
index c02aa434..249115c3 100644
--- a/include/jemalloc/internal/background_thread_structs.h
+++ b/include/jemalloc/internal/background_thread_structs.h
@@ -48,6 +48,7 @@ struct background_thread_stats_s {
size_t num_threads;
uint64_t num_runs;
nstime_t run_interval;
+ mutex_prof_data_t max_counter_per_bg_thd;
};
typedef struct background_thread_stats_s background_thread_stats_t;
diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h
index 7c24f072..f5b1163a 100644
--- a/include/jemalloc/internal/mutex.h
+++ b/include/jemalloc/internal/mutex.h
@@ -245,22 +245,25 @@ malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
-/* Copy the prof data from mutex for processing. */
static inline void
-malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
- malloc_mutex_t *mutex) {
- mutex_prof_data_t *source = &mutex->prof_data;
- /* Can only read holding the mutex. */
- malloc_mutex_assert_owner(tsdn, mutex);
-
+malloc_mutex_prof_copy(mutex_prof_data_t *dst, mutex_prof_data_t *source) {
/*
* Not *really* allowed (we shouldn't be doing non-atomic loads of
* atomic data), but the mutex protection makes this safe, and writing
* a member-for-member copy is tedious for this situation.
*/
- *data = *source;
+ *dst = *source;
/* n_wait_thds is not reported (modified w/o locking). */
- atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
+ atomic_store_u32(&dst->n_waiting_thds, 0, ATOMIC_RELAXED);
+}
+
+/* Copy the prof data from mutex for processing. */
+static inline void
+malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
+ malloc_mutex_t *mutex) {
+ /* Can only read holding the mutex. */
+ malloc_mutex_assert_owner(tsdn, mutex);
+ malloc_mutex_prof_copy(data, &mutex->prof_data);
}
static inline void
@@ -285,4 +288,36 @@ malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
data->n_lock_ops += source->n_lock_ops;
}
+/* Compare the prof data and update to the maximum. */
+static inline void
+malloc_mutex_prof_max_update(tsdn_t *tsdn, mutex_prof_data_t *data,
+ malloc_mutex_t *mutex) {
+ mutex_prof_data_t *source = &mutex->prof_data;
+ /* Can only read holding the mutex. */
+ malloc_mutex_assert_owner(tsdn, mutex);
+
+ if (nstime_compare(&source->tot_wait_time, &data->tot_wait_time) > 0) {
+ nstime_copy(&data->tot_wait_time, &source->tot_wait_time);
+ }
+ if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
+ nstime_copy(&data->max_wait_time, &source->max_wait_time);
+ }
+ if (source->n_wait_times > data->n_wait_times) {
+ data->n_wait_times = source->n_wait_times;
+ }
+ if (source->n_spin_acquired > data->n_spin_acquired) {
+ data->n_spin_acquired = source->n_spin_acquired;
+ }
+ if (source->max_n_thds > data->max_n_thds) {
+ data->max_n_thds = source->max_n_thds;
+ }
+ if (source->n_owner_switches > data->n_owner_switches) {
+ data->n_owner_switches = source->n_owner_switches;
+ }
+ if (source->n_lock_ops > data->n_lock_ops) {
+ data->n_lock_ops = source->n_lock_ops;
+ }
+ /* n_wait_thds is not reported. */
+}
+
#endif /* JEMALLOC_INTERNAL_MUTEX_H */
diff --git a/include/jemalloc/internal/mutex_prof.h b/include/jemalloc/internal/mutex_prof.h
index 6288ede5..190402e6 100644
--- a/include/jemalloc/internal/mutex_prof.h
+++ b/include/jemalloc/internal/mutex_prof.h
@@ -7,6 +7,7 @@
#define MUTEX_PROF_GLOBAL_MUTEXES \
OP(background_thread) \
+ OP(max_per_bg_thd) \
OP(ctl) \
OP(prof) \
OP(prof_thds_data) \
diff --git a/src/background_thread.c b/src/background_thread.c
index 57b9b256..bea445f2 100644
--- a/src/background_thread.c
+++ b/src/background_thread.c
@@ -794,9 +794,11 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
return true;
}
- stats->num_threads = n_background_threads;
- uint64_t num_runs = 0;
nstime_init(&stats->run_interval, 0);
+ memset(&stats->max_counter_per_bg_thd, 0, sizeof(mutex_prof_data_t));
+
+ uint64_t num_runs = 0;
+ stats->num_threads = n_background_threads;
for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
if (malloc_mutex_trylock(tsdn, &info->mtx)) {
@@ -809,6 +811,8 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
if (info->state != background_thread_stopped) {
num_runs += info->tot_n_runs;
nstime_add(&stats->run_interval, &info->tot_sleep_time);
+ malloc_mutex_prof_max_update(tsdn,
+ &stats->max_counter_per_bg_thd, &info->mtx);
}
malloc_mutex_unlock(tsdn, &info->mtx);
}
diff --git a/src/ctl.c b/src/ctl.c
index 0beef6e0..3ec6ca24 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -1042,6 +1042,9 @@ ctl_background_thread_stats_read(tsdn_t *tsdn) {
memset(stats, 0, sizeof(background_thread_stats_t));
nstime_init(&stats->run_interval, 0);
}
+ malloc_mutex_prof_copy(
+ &ctl_stats->mutex_prof_data[global_prof_mutex_max_per_bg_thd],
+ &stats->max_counter_per_bg_thd);
}
static void