summaryrefslogtreecommitdiff
path: root/deps/jemalloc/include/jemalloc/internal/rtree.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/include/jemalloc/internal/rtree.h')
-rw-r--r--deps/jemalloc/include/jemalloc/internal/rtree.h520
1 files changed, 273 insertions, 247 deletions
diff --git a/deps/jemalloc/include/jemalloc/internal/rtree.h b/deps/jemalloc/include/jemalloc/internal/rtree.h
index 16ccbebee..a00adb298 100644
--- a/deps/jemalloc/include/jemalloc/internal/rtree.h
+++ b/deps/jemalloc/include/jemalloc/internal/rtree.h
@@ -35,33 +35,52 @@
# define RTREE_LEAF_COMPACT
#endif
-/* Needed for initialization only. */
-#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
-
typedef struct rtree_node_elm_s rtree_node_elm_t;
struct rtree_node_elm_s {
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
};
+typedef struct rtree_metadata_s rtree_metadata_t;
+struct rtree_metadata_s {
+ szind_t szind;
+ extent_state_t state; /* Mirrors edata->state. */
+ bool is_head; /* Mirrors edata->is_head. */
+ bool slab;
+};
+
+typedef struct rtree_contents_s rtree_contents_t;
+struct rtree_contents_s {
+ edata_t *edata;
+ rtree_metadata_t metadata;
+};
+
+#define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH
+#define RTREE_LEAF_STATE_SHIFT 2
+#define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT)
+
struct rtree_leaf_elm_s {
#ifdef RTREE_LEAF_COMPACT
/*
* Single pointer-width field containing all three leaf element fields.
* For example, on a 64-bit x64 system with 48 significant virtual
- * memory address bits, the index, extent, and slab fields are packed as
+ * memory address bits, the index, edata, and slab fields are packed as
* such:
*
* x: index
- * e: extent
+ * e: edata
+ * s: state
+ * h: is_head
* b: slab
*
- * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
+ * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e00ssshb
*/
atomic_p_t le_bits;
#else
- atomic_p_t le_extent; /* (extent_t *) */
- atomic_u_t le_szind; /* (szind_t) */
- atomic_b_t le_slab; /* (bool) */
+ atomic_p_t le_edata; /* (edata_t *) */
+ /*
+ * From high to low bits: szind (8 bits), state (4 bits), is_head, slab
+ */
+ atomic_u_t le_metadata;
#endif
};
@@ -78,6 +97,7 @@ struct rtree_level_s {
typedef struct rtree_s rtree_t;
struct rtree_s {
+ base_t *base;
malloc_mutex_t init_lock;
/* Number of elements based on rtree_levels[0].bits. */
#if RTREE_HEIGHT > 1
@@ -109,42 +129,29 @@ static const rtree_level_t rtree_levels[] = {
#endif
};
-bool rtree_new(rtree_t *rtree, bool zeroed);
-
-typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
-extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc;
+bool rtree_new(rtree_t *rtree, base_t *base, bool zeroed);
-typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t);
-extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc;
-
-typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *);
-extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc;
-
-typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *);
-extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc;
-#ifdef JEMALLOC_JET
-void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
-#endif
rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
-JEMALLOC_ALWAYS_INLINE uintptr_t
-rtree_leafkey(uintptr_t key) {
+JEMALLOC_ALWAYS_INLINE unsigned
+rtree_leaf_maskbits(void) {
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
rtree_levels[RTREE_HEIGHT-1].bits);
- unsigned maskbits = ptrbits - cumbits;
- uintptr_t mask = ~((ZU(1) << maskbits) - 1);
+ return ptrbits - cumbits;
+}
+
+JEMALLOC_ALWAYS_INLINE uintptr_t
+rtree_leafkey(uintptr_t key) {
+ uintptr_t mask = ~((ZU(1) << rtree_leaf_maskbits()) - 1);
return (key & mask);
}
JEMALLOC_ALWAYS_INLINE size_t
rtree_cache_direct_map(uintptr_t key) {
- unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
- unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
- rtree_levels[RTREE_HEIGHT-1].bits);
- unsigned maskbits = ptrbits - cumbits;
- return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1));
+ return (size_t)((key >> rtree_leaf_maskbits()) &
+ (RTREE_CTX_NCACHE - 1));
}
JEMALLOC_ALWAYS_INLINE uintptr_t
@@ -176,151 +183,174 @@ rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
}
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
+JEMALLOC_ALWAYS_INLINE uintptr_t
+rtree_leaf_elm_bits_encode(rtree_contents_t contents) {
+ assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
+ uintptr_t edata_bits = (uintptr_t)contents.edata
+ & (((uintptr_t)1 << LG_VADDR) - 1);
+
+ uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR;
+ uintptr_t slab_bits = (uintptr_t)contents.metadata.slab;
+ uintptr_t is_head_bits = (uintptr_t)contents.metadata.is_head << 1;
+ uintptr_t state_bits = (uintptr_t)contents.metadata.state <<
+ RTREE_LEAF_STATE_SHIFT;
+ uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits |
+ slab_bits;
+ assert((edata_bits & metadata_bits) == 0);
+
+ return edata_bits | metadata_bits;
+}
+
+JEMALLOC_ALWAYS_INLINE rtree_contents_t
+rtree_leaf_elm_bits_decode(uintptr_t bits) {
+ rtree_contents_t contents;
+ /* Do the easy things first. */
+ contents.metadata.szind = bits >> LG_VADDR;
+ contents.metadata.slab = (bool)(bits & 1);
+ contents.metadata.is_head = (bool)(bits & (1 << 1));
+
+ uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK) >>
+ RTREE_LEAF_STATE_SHIFT;
+ assert(state_bits <= extent_state_max);
+ contents.metadata.state = (extent_state_t)state_bits;
+
+ uintptr_t low_bit_mask = ~((uintptr_t)EDATA_ALIGNMENT - 1);
# ifdef __aarch64__
/*
* aarch64 doesn't sign extend the highest virtual address bit to set
- * the higher ones. Instead, the high bits gets zeroed.
+ * the higher ones. Instead, the high bits get zeroed.
*/
uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
- /* Mask off the slab bit. */
- uintptr_t low_bit_mask = ~(uintptr_t)1;
+ /* Mask off metadata. */
uintptr_t mask = high_bit_mask & low_bit_mask;
- return (extent_t *)(bits & mask);
+ contents.edata = (edata_t *)(bits & mask);
# else
- /* Restore sign-extended high bits, mask slab bit. */
- return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
- RTREE_NHIB) & ~((uintptr_t)0x1));
+ /* Restore sign-extended high bits, mask metadata bits. */
+ contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB)
+ >> RTREE_NHIB) & low_bit_mask);
# endif
+ assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
+ return contents;
}
-JEMALLOC_ALWAYS_INLINE szind_t
-rtree_leaf_elm_bits_szind_get(uintptr_t bits) {
- return (szind_t)(bits >> LG_VADDR);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
- return (bool)(bits & (uintptr_t)0x1);
-}
+# endif /* RTREE_LEAF_COMPACT */
-# endif
-
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool dependent) {
+JEMALLOC_ALWAYS_INLINE rtree_contents_t
+rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
+ bool dependent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- return rtree_leaf_elm_bits_extent_get(bits);
+ rtree_contents_t contents = rtree_leaf_elm_bits_decode(bits);
+ return contents;
#else
- extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
+ rtree_contents_t contents;
+ unsigned metadata_bits = atomic_load_u(&elm->le_metadata, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
- return extent;
-#endif
-}
+ contents.metadata.slab = (bool)(metadata_bits & 1);
+ contents.metadata.is_head = (bool)(metadata_bits & (1 << 1));
-JEMALLOC_ALWAYS_INLINE szind_t
-rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool dependent) {
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- return rtree_leaf_elm_bits_szind_get(bits);
-#else
- return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
- : ATOMIC_ACQUIRE);
+ uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK) >>
+ RTREE_LEAF_STATE_SHIFT;
+ assert(state_bits <= extent_state_max);
+ contents.metadata.state = (extent_state_t)state_bits;
+ contents.metadata.szind = metadata_bits >> (RTREE_LEAF_STATE_SHIFT +
+ RTREE_LEAF_STATE_WIDTH);
+
+ contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent
+ ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
+
+ return contents;
#endif
}
-JEMALLOC_ALWAYS_INLINE bool
-rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool dependent) {
+JEMALLOC_ALWAYS_INLINE void
+rtree_contents_encode(rtree_contents_t contents, void **bits,
+ unsigned *additional) {
#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- return rtree_leaf_elm_bits_slab_get(bits);
+ *bits = (void *)rtree_leaf_elm_bits_encode(contents);
#else
- return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
- ATOMIC_ACQUIRE);
+ *additional = (unsigned)contents.metadata.slab
+ | ((unsigned)contents.metadata.is_head << 1)
+ | ((unsigned)contents.metadata.state << RTREE_LEAF_STATE_SHIFT)
+ | ((unsigned)contents.metadata.szind << (RTREE_LEAF_STATE_SHIFT +
+ RTREE_LEAF_STATE_WIDTH));
+ *bits = contents.edata;
#endif
}
-static inline void
-rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, extent_t *extent) {
+JEMALLOC_ALWAYS_INLINE void
+rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, void *bits, unsigned additional) {
#ifdef RTREE_LEAF_COMPACT
- uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
- uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
- LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
- | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
- atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
+ atomic_store_p(&elm->le_bits, bits, ATOMIC_RELEASE);
#else
- atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
+ atomic_store_u(&elm->le_metadata, additional, ATOMIC_RELEASE);
+ /*
+ * Write edata last, since the element is atomically considered valid
+ * as soon as the edata field is non-NULL.
+ */
+ atomic_store_p(&elm->le_edata, bits, ATOMIC_RELEASE);
#endif
}
-static inline void
-rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, szind_t szind) {
- assert(szind <= SC_NSIZES);
+JEMALLOC_ALWAYS_INLINE void
+rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, rtree_contents_t contents) {
+ assert((uintptr_t)contents.edata % EDATA_ALIGNMENT == 0);
+ void *bits;
+ unsigned additional;
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
- true);
- uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
- ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
- (((uintptr_t)0x1 << LG_VADDR) - 1)) |
- ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
- atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
-#else
- atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
-#endif
+ rtree_contents_encode(contents, &bits, &additional);
+ rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
}
-static inline void
-rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool slab) {
+/* The state field can be updated independently (and more frequently). */
+JEMALLOC_ALWAYS_INLINE void
+rtree_leaf_elm_state_update(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm1, rtree_leaf_elm_t *elm2, extent_state_t state) {
+ assert(elm1 != NULL);
#ifdef RTREE_LEAF_COMPACT
- uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
- true);
- uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
- LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
- (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
- atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
+ uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm1,
+ /* dependent */ true);
+ bits &= ~RTREE_LEAF_STATE_MASK;
+ bits |= state << RTREE_LEAF_STATE_SHIFT;
+ atomic_store_p(&elm1->le_bits, (void *)bits, ATOMIC_RELEASE);
+ if (elm2 != NULL) {
+ atomic_store_p(&elm2->le_bits, (void *)bits, ATOMIC_RELEASE);
+ }
#else
- atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
+ unsigned bits = atomic_load_u(&elm1->le_metadata, ATOMIC_RELAXED);
+ bits &= ~RTREE_LEAF_STATE_MASK;
+ bits |= state << RTREE_LEAF_STATE_SHIFT;
+ atomic_store_u(&elm1->le_metadata, bits, ATOMIC_RELEASE);
+ if (elm2 != NULL) {
+ atomic_store_u(&elm2->le_metadata, bits, ATOMIC_RELEASE);
+ }
#endif
}
-static inline void
-rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) {
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
- ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
- ((uintptr_t)slab);
- atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
-#else
- rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
- rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
- /*
- * Write extent last, since the element is atomically considered valid
- * as soon as the extent field is non-NULL.
- */
- rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
-#endif
-}
+/*
+ * Tries to look up the key in the L1 cache, returning false if there's a hit, or
+ * true if there's a miss.
+ * Key is allowed to be NULL; returns true in this case.
+ */
+JEMALLOC_ALWAYS_INLINE bool
+rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, rtree_leaf_elm_t **elm) {
+ size_t slot = rtree_cache_direct_map(key);
+ uintptr_t leafkey = rtree_leafkey(key);
+ assert(leafkey != RTREE_LEAFKEY_INVALID);
-static inline void
-rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
- assert(!slab || szind < SC_NBINS);
+ if (unlikely(rtree_ctx->cache[slot].leafkey != leafkey)) {
+ return true;
+ }
- /*
- * The caller implicitly assures that it is the only writer to the szind
- * and slab fields, and that the extent field cannot currently change.
- */
- rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
- rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
+ rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
+ assert(leaf != NULL);
+ uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
+ *elm = &leaf[subkey];
+
+ return false;
}
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
@@ -382,147 +412,143 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
dependent, init_missing);
}
+/*
+ * Returns true on lookup failure.
+ */
static inline bool
-rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
- extent_t *extent, szind_t szind, bool slab) {
- /* Use rtree_clear() to set the extent to NULL. */
- assert(extent != NULL);
-
+rtree_read_independent(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, rtree_contents_t *r_contents) {
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
- key, false, true);
+ key, /* dependent */ false, /* init_missing */ false);
if (elm == NULL) {
return true;
}
-
- assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
- rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
-
+ *r_contents = rtree_leaf_elm_read(tsdn, rtree, elm,
+ /* dependent */ false);
return false;
}
-JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
-rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
- bool dependent) {
+static inline rtree_contents_t
+rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key) {
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
- key, dependent, false);
- if (!dependent && elm == NULL) {
- return NULL;
- }
+ key, /* dependent */ true, /* init_missing */ false);
assert(elm != NULL);
- return elm;
+ return rtree_leaf_elm_read(tsdn, rtree, elm, /* dependent */ true);
}
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
- dependent);
- if (!dependent && elm == NULL) {
- return NULL;
- }
- return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
- dependent);
- if (!dependent && elm == NULL) {
- return SC_NSIZES;
- }
- return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
+static inline rtree_metadata_t
+rtree_metadata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key) {
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
+ key, /* dependent */ true, /* init_missing */ false);
+ assert(elm != NULL);
+ return rtree_leaf_elm_read(tsdn, rtree, elm,
+ /* dependent */ true).metadata;
}
/*
- * rtree_slab_read() is intentionally omitted because slab is always read in
- * conjunction with szind, which makes rtree_szind_slab_read() a better choice.
+ * Returns true when the request cannot be fulfilled by fastpath.
*/
-
-JEMALLOC_ALWAYS_INLINE bool
-rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
- dependent);
- if (!dependent && elm == NULL) {
+static inline bool
+rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, rtree_metadata_t *r_rtree_metadata) {
+ rtree_leaf_elm_t *elm;
+ /*
+ * Should check the bool return value (lookup success or not) instead of
+ * elm == NULL (which will result in an extra branch). This is because
+ * when the cache lookup succeeds, there will never be a NULL pointer
+ * returned (which is unknown to the compiler).
+ */
+ if (rtree_leaf_elm_lookup_fast(tsdn, rtree, rtree_ctx, key, &elm)) {
return true;
}
- *r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
- *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
+ assert(elm != NULL);
+ *r_rtree_metadata = rtree_leaf_elm_read(tsdn, rtree, elm,
+ /* dependent */ true).metadata;
return false;
}
-/*
- * Try to read szind_slab from the L1 cache. Returns true on a hit,
- * and fills in r_szind and r_slab. Otherwise returns false.
- *
- * Key is allowed to be NULL in order to save an extra branch on the
- * fastpath. returns false in this case.
- */
-JEMALLOC_ALWAYS_INLINE bool
-rtree_szind_slab_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, szind_t *r_szind, bool *r_slab) {
- rtree_leaf_elm_t *elm;
-
- size_t slot = rtree_cache_direct_map(key);
- uintptr_t leafkey = rtree_leafkey(key);
- assert(leafkey != RTREE_LEAFKEY_INVALID);
-
- if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
- rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
- assert(leaf != NULL);
- uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
- elm = &leaf[subkey];
-
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree,
- elm, true);
- *r_szind = rtree_leaf_elm_bits_szind_get(bits);
- *r_slab = rtree_leaf_elm_bits_slab_get(bits);
-#else
- *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, true);
- *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, true);
-#endif
- return true;
- } else {
- return false;
+JEMALLOC_ALWAYS_INLINE void
+rtree_write_range_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t base, uintptr_t end, rtree_contents_t contents, bool clearing) {
+ assert((base & PAGE_MASK) == 0 && (end & PAGE_MASK) == 0);
+ /*
+ * Only used for emap_(de)register_interior, which implies the
+ * boundaries have been registered already. Therefore all the lookups
+ * are dependent w/o init_missing, assuming the range spans across at
+ * most 2 rtree leaf nodes (each covers 1 GiB of vaddr).
+ */
+ void *bits;
+ unsigned additional;
+ rtree_contents_encode(contents, &bits, &additional);
+
+ rtree_leaf_elm_t *elm = NULL; /* Dead store. */
+ for (uintptr_t addr = base; addr <= end; addr += PAGE) {
+ if (addr == base ||
+ (addr & ((ZU(1) << rtree_leaf_maskbits()) - 1)) == 0) {
+ elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
+ /* dependent */ true, /* init_missing */ false);
+ assert(elm != NULL);
+ }
+ assert(elm == rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
+ /* dependent */ true, /* init_missing */ false));
+ assert(!clearing || rtree_leaf_elm_read(tsdn, rtree, elm,
+ /* dependent */ true).edata != NULL);
+ rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
+ elm++;
}
}
+
+JEMALLOC_ALWAYS_INLINE void
+rtree_write_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t base, uintptr_t end, rtree_contents_t contents) {
+ rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
+ /* clearing */ false);
+}
+
JEMALLOC_ALWAYS_INLINE bool
-rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
- dependent);
- if (!dependent && elm == NULL) {
+rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
+ rtree_contents_t contents) {
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
+ key, /* dependent */ false, /* init_missing */ true);
+ if (elm == NULL) {
return true;
}
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- *r_szind = rtree_leaf_elm_bits_szind_get(bits);
- *r_slab = rtree_leaf_elm_bits_slab_get(bits);
-#else
- *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
- *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
-#endif
- return false;
-}
-static inline void
-rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, szind_t szind, bool slab) {
- assert(!slab || szind < SC_NBINS);
+ rtree_leaf_elm_write(tsdn, rtree, elm, contents);
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
- rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
+ return false;
}
static inline void
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
- assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
- NULL);
- rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false);
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
+ key, /* dependent */ true, /* init_missing */ false);
+ assert(elm != NULL);
+ assert(rtree_leaf_elm_read(tsdn, rtree, elm,
+ /* dependent */ true).edata != NULL);
+ rtree_contents_t contents;
+ contents.edata = NULL;
+ contents.metadata.szind = SC_NSIZES;
+ contents.metadata.slab = false;
+ contents.metadata.is_head = false;
+ contents.metadata.state = (extent_state_t)0;
+ rtree_leaf_elm_write(tsdn, rtree, elm, contents);
+}
+
+static inline void
+rtree_clear_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t base, uintptr_t end) {
+ rtree_contents_t contents;
+ contents.edata = NULL;
+ contents.metadata.szind = SC_NSIZES;
+ contents.metadata.slab = false;
+ contents.metadata.is_head = false;
+ contents.metadata.state = (extent_state_t)0;
+ rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
+ /* clearing */ true);
}
#endif /* JEMALLOC_INTERNAL_RTREE_H */