summaryrefslogtreecommitdiff
path: root/src/peak_event.c
diff options
context:
space:
mode:
authorQi Wang <interwq@gwu.edu>2022-05-06 11:28:25 -0700
committerQi Wang <interwq@gwu.edu>2022-05-06 11:28:25 -0700
commit54eaed1d8b56b1aa528be3bdd1877e59c56fa90c (patch)
treee79620e0c00b1f8b6b698fbe74df6bae7d812ae2 /src/peak_event.c
parentea6b3e973b477b8061e0076bb257dbd7f3faa756 (diff)
parent304c919829f9f340669b61fa64867cfe5dba8021 (diff)
downloadjemalloc-master.tar.gz
Merge branch 'dev'5.3.0master
Diffstat (limited to 'src/peak_event.c')
-rw-r--r--src/peak_event.c82
1 files changed, 82 insertions, 0 deletions
diff --git a/src/peak_event.c b/src/peak_event.c
new file mode 100644
index 00000000..4093fbcc
--- /dev/null
+++ b/src/peak_event.c
@@ -0,0 +1,82 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/peak_event.h"
+
+#include "jemalloc/internal/activity_callback.h"
+#include "jemalloc/internal/peak.h"
+
+/*
+ * Update every 64K by default. We're not exposing this as a configuration
+ * option for now; we don't want to bind ourselves too tightly to any particular
+ * performance requirements for small values, or guarantee that we'll even be
+ * able to provide fine-grained accuracy.
+ */
+#define PEAK_EVENT_WAIT (64 * 1024)
+
+/* Update the peak with current tsd state. */
+void
+peak_event_update(tsd_t *tsd) {
+ uint64_t alloc = tsd_thread_allocated_get(tsd);
+ uint64_t dalloc = tsd_thread_deallocated_get(tsd);
+ peak_t *peak = tsd_peakp_get(tsd);
+ peak_update(peak, alloc, dalloc);
+}
+
+static void
+peak_event_activity_callback(tsd_t *tsd) {
+ activity_callback_thunk_t *thunk = tsd_activity_callback_thunkp_get(
+ tsd);
+ uint64_t alloc = tsd_thread_allocated_get(tsd);
+ uint64_t dalloc = tsd_thread_deallocated_get(tsd);
+ if (thunk->callback != NULL) {
+ thunk->callback(thunk->uctx, alloc, dalloc);
+ }
+}
+
+/* Set current state to zero. */
+void
+peak_event_zero(tsd_t *tsd) {
+ uint64_t alloc = tsd_thread_allocated_get(tsd);
+ uint64_t dalloc = tsd_thread_deallocated_get(tsd);
+ peak_t *peak = tsd_peakp_get(tsd);
+ peak_set_zero(peak, alloc, dalloc);
+}
+
+uint64_t
+peak_event_max(tsd_t *tsd) {
+ peak_t *peak = tsd_peakp_get(tsd);
+ return peak_max(peak);
+}
+
+uint64_t
+peak_alloc_new_event_wait(tsd_t *tsd) {
+ return PEAK_EVENT_WAIT;
+}
+
+uint64_t
+peak_alloc_postponed_event_wait(tsd_t *tsd) {
+ return TE_MIN_START_WAIT;
+}
+
+void
+peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ peak_event_update(tsd);
+ peak_event_activity_callback(tsd);
+}
+
+uint64_t
+peak_dalloc_new_event_wait(tsd_t *tsd) {
+ return PEAK_EVENT_WAIT;
+}
+
+uint64_t
+peak_dalloc_postponed_event_wait(tsd_t *tsd) {
+ return TE_MIN_START_WAIT;
+}
+
+void
+peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ peak_event_update(tsd);
+ peak_event_activity_callback(tsd);
+}