summaryrefslogtreecommitdiff
path: root/deps/jemalloc/include/jemalloc/internal/san_bump.h
blob: 8ec4a710d6f4a51e0ae3d6090585f4a1230c308d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#ifndef JEMALLOC_INTERNAL_SAN_BUMP_H
#define JEMALLOC_INTERNAL_SAN_BUMP_H

#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/exp_grow.h"
#include "jemalloc/internal/mutex.h"

#define SBA_RETAINED_ALLOC_SIZE ((size_t)4 << 20)

extern bool opt_retain;

typedef struct ehooks_s ehooks_t;
typedef struct pac_s pac_t;

typedef struct san_bump_alloc_s san_bump_alloc_t;
struct san_bump_alloc_s {
	malloc_mutex_t mtx;

	edata_t *curr_reg;
};

static inline bool
san_bump_enabled() {
	/*
	 * We enable san_bump allocator only when it's possible to break up a
	 * mapping and unmap a part of it (maps_coalesce). This is needed to
	 * ensure the arena destruction process can destroy all retained guarded
	 * extents one by one and to unmap a trailing part of a retained guarded
	 * region when it's too small to fit a pending allocation.
	 * opt_retain is required, because this allocator retains a large
	 * virtual memory mapping and returns smaller parts of it.
	 */
	return maps_coalesce && opt_retain;
}

static inline bool
san_bump_alloc_init(san_bump_alloc_t* sba) {
	bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator",
	    WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive);
	if (err) {
		return true;
	}
	sba->curr_reg = NULL;

	return false;
}

edata_t *
san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks,
    size_t size, bool zero);

#endif /* JEMALLOC_INTERNAL_SAN_BUMP_H */