summaryrefslogtreecommitdiff
path: root/include/jemalloc/internal/edata.h
blob: e77a55e6429aea6dae487d72401f5d73841b4fcc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
#ifndef JEMALLOC_INTERNAL_EDATA_H
#define JEMALLOC_INTERNAL_EDATA_H

#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin_info.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/hpdata.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/slab_data.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/typed_list.h"

/*
 * sizeof(edata_t) is 128 bytes on 64-bit architectures.  Ensure the alignment
 * to free up the low bits in the rtree leaf.
 */
#define EDATA_ALIGNMENT 128

enum extent_state_e {
	extent_state_active   = 0,
	extent_state_dirty    = 1,
	extent_state_muzzy    = 2,
	extent_state_retained = 3,
	extent_state_transition = 4, /* States below are intermediate. */
	extent_state_merging = 5,
	extent_state_max = 5 /* Sanity checking only. */
};
typedef enum extent_state_e extent_state_t;

enum extent_head_state_e {
	EXTENT_NOT_HEAD,
	EXTENT_IS_HEAD   /* See comments in ehooks_default_merge_impl(). */
};
typedef enum extent_head_state_e extent_head_state_t;

/*
 * Which implementation of the page allocator interface, (PAI, defined in
 * pai.h) owns the given extent?
 */
enum extent_pai_e {
	EXTENT_PAI_PAC = 0,
	EXTENT_PAI_HPA = 1
};
typedef enum extent_pai_e extent_pai_t;

struct e_prof_info_s {
	/* Time when this was allocated. */
	nstime_t	e_prof_alloc_time;
	/* Allocation request size. */
	size_t		e_prof_alloc_size;
	/* Points to a prof_tctx_t. */
	atomic_p_t	e_prof_tctx;
	/*
	 * Points to a prof_recent_t for the allocation; NULL
	 * means the recent allocation record no longer exists.
	 * Protected by prof_recent_alloc_mtx.
	 */
	atomic_p_t	e_prof_recent_alloc;
};
typedef struct e_prof_info_s e_prof_info_t;

/*
 * The information about a particular edata that lives in an emap.  Space is
 * more precious there (the information, plus the edata pointer, has to live in
 * a 64-bit word if we want to enable a packed representation.
 *
 * There are two things that are special about the information here:
 * - It's quicker to access.  You have one fewer pointer hop, since finding the
 *   edata_t associated with an item always requires accessing the rtree leaf in
 *   which this data is stored.
 * - It can be read unsynchronized, and without worrying about lifetime issues.
 */
typedef struct edata_map_info_s edata_map_info_t;
struct edata_map_info_s {
	bool slab;
	szind_t szind;
};

typedef struct edata_cmp_summary_s edata_cmp_summary_t;
struct edata_cmp_summary_s {
	uint64_t sn;
	uintptr_t addr;
};

/* Extent (span of pages).  Use accessor functions for e_* fields. */
typedef struct edata_s edata_t;
ph_structs(edata_avail, edata_t);
ph_structs(edata_heap, edata_t);
struct edata_s {
	/*
	 * Bitfield containing several fields:
	 *
	 * a: arena_ind
	 * b: slab
	 * c: committed
	 * p: pai
	 * z: zeroed
	 * g: guarded
	 * t: state
	 * i: szind
	 * f: nfree
	 * s: bin_shard
	 *
	 * 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa
	 *
	 * arena_ind: Arena from which this extent came, or all 1 bits if
	 *            unassociated.
	 *
	 * slab: The slab flag indicates whether the extent is used for a slab
	 *       of small regions.  This helps differentiate small size classes,
	 *       and it indicates whether interior pointers can be looked up via
	 *       iealloc().
	 *
	 * committed: The committed flag indicates whether physical memory is
	 *            committed to the extent, whether explicitly or implicitly
	 *            as on a system that overcommits and satisfies physical
	 *            memory needs on demand via soft page faults.
	 *
	 * pai: The pai flag is an extent_pai_t.
	 *
	 * zeroed: The zeroed flag is used by extent recycling code to track
	 *         whether memory is zero-filled.
	 *
	 * guarded: The guarded flag is use by the sanitizer to track whether
	 *          the extent has page guards around it.
	 *
	 * state: The state flag is an extent_state_t.
	 *
	 * szind: The szind flag indicates usable size class index for
	 *        allocations residing in this extent, regardless of whether the
	 *        extent is a slab.  Extent size and usable size often differ
	 *        even for non-slabs, either due to sz_large_pad or promotion of
	 *        sampled small regions.
	 *
	 * nfree: Number of free regions in slab.
	 *
	 * bin_shard: the shard of the bin from which this extent came.
	 */
	uint64_t		e_bits;
#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))

#define EDATA_BITS_ARENA_WIDTH  MALLOCX_ARENA_BITS
#define EDATA_BITS_ARENA_SHIFT  0
#define EDATA_BITS_ARENA_MASK  MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)

#define EDATA_BITS_SLAB_WIDTH  1
#define EDATA_BITS_SLAB_SHIFT  (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
#define EDATA_BITS_SLAB_MASK  MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)

#define EDATA_BITS_COMMITTED_WIDTH  1
#define EDATA_BITS_COMMITTED_SHIFT  (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
#define EDATA_BITS_COMMITTED_MASK  MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)

#define EDATA_BITS_PAI_WIDTH  1
#define EDATA_BITS_PAI_SHIFT  (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
#define EDATA_BITS_PAI_MASK  MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT)

#define EDATA_BITS_ZEROED_WIDTH  1
#define EDATA_BITS_ZEROED_SHIFT  (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
#define EDATA_BITS_ZEROED_MASK  MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)

#define EDATA_BITS_GUARDED_WIDTH  1
#define EDATA_BITS_GUARDED_SHIFT  (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
#define EDATA_BITS_GUARDED_MASK  MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT)

#define EDATA_BITS_STATE_WIDTH  3
#define EDATA_BITS_STATE_SHIFT  (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT)
#define EDATA_BITS_STATE_MASK  MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)

#define EDATA_BITS_SZIND_WIDTH  LG_CEIL(SC_NSIZES)
#define EDATA_BITS_SZIND_SHIFT  (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
#define EDATA_BITS_SZIND_MASK  MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)

#define EDATA_BITS_NFREE_WIDTH  (SC_LG_SLAB_MAXREGS + 1)
#define EDATA_BITS_NFREE_SHIFT  (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
#define EDATA_BITS_NFREE_MASK  MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)

#define EDATA_BITS_BINSHARD_WIDTH  6
#define EDATA_BITS_BINSHARD_SHIFT  (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
#define EDATA_BITS_BINSHARD_MASK  MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)

#define EDATA_BITS_IS_HEAD_WIDTH 1
#define EDATA_BITS_IS_HEAD_SHIFT  (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
#define EDATA_BITS_IS_HEAD_MASK  MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)

	/* Pointer to the extent that this structure is responsible for. */
	void			*e_addr;

	union {
		/*
		 * Extent size and serial number associated with the extent
		 * structure (different than the serial number for the extent at
		 * e_addr).
		 *
		 * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
		 */
		size_t			e_size_esn;
	#define EDATA_SIZE_MASK	((size_t)~(PAGE-1))
	#define EDATA_ESN_MASK		((size_t)PAGE-1)
		/* Base extent size, which may not be a multiple of PAGE. */
		size_t			e_bsize;
	};

	/*
	 * If this edata is a user allocation from an HPA, it comes out of some
	 * pageslab (we don't yet support huegpage allocations that don't fit
	 * into pageslabs).  This tracks it.
	 */
	hpdata_t *e_ps;

	/*
	 * Serial number.  These are not necessarily unique; splitting an extent
	 * results in two extents with the same serial number.
	 */
	uint64_t e_sn;

	union {
		/*
		 * List linkage used when the edata_t is active; either in
		 * arena's large allocations or bin_t's slabs_full.
		 */
		ql_elm(edata_t)	ql_link_active;
		/*
		 * Pairing heap linkage.  Used whenever the extent is inactive
		 * (in the page allocators), or when it is active and in
		 * slabs_nonfull, or when the edata_t is unassociated with an
		 * extent and sitting in an edata_cache.
		 */
		union {
			edata_heap_link_t heap_link;
			edata_avail_link_t avail_link;
		};
	};

	union {
		/*
		 * List linkage used when the extent is inactive:
		 * - Stashed dirty extents
		 * - Ecache LRU functionality.
		 */
		ql_elm(edata_t) ql_link_inactive;
		/* Small region slab metadata. */
		slab_data_t	e_slab_data;

		/* Profiling data, used for large objects. */
		e_prof_info_t	e_prof_info;
	};
};

TYPED_LIST(edata_list_active, edata_t, ql_link_active)
TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive)

static inline unsigned
edata_arena_ind_get(const edata_t *edata) {
	unsigned arena_ind = (unsigned)((edata->e_bits &
	    EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
	assert(arena_ind < MALLOCX_ARENA_LIMIT);

	return arena_ind;
}

static inline szind_t
edata_szind_get_maybe_invalid(const edata_t *edata) {
	szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
	    EDATA_BITS_SZIND_SHIFT);
	assert(szind <= SC_NSIZES);
	return szind;
}

static inline szind_t
edata_szind_get(const edata_t *edata) {
	szind_t szind = edata_szind_get_maybe_invalid(edata);
	assert(szind < SC_NSIZES); /* Never call when "invalid". */
	return szind;
}

static inline size_t
edata_usize_get(const edata_t *edata) {
	return sz_index2size(edata_szind_get(edata));
}

static inline unsigned
edata_binshard_get(const edata_t *edata) {
	unsigned binshard = (unsigned)((edata->e_bits &
	    EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
	assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
	return binshard;
}

static inline uint64_t
edata_sn_get(const edata_t *edata) {
	return edata->e_sn;
}

static inline extent_state_t
edata_state_get(const edata_t *edata) {
	return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
	    EDATA_BITS_STATE_SHIFT);
}

static inline bool
edata_guarded_get(const edata_t *edata) {
	return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >>
	    EDATA_BITS_GUARDED_SHIFT);
}

static inline bool
edata_zeroed_get(const edata_t *edata) {
	return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
	    EDATA_BITS_ZEROED_SHIFT);
}

static inline bool
edata_committed_get(const edata_t *edata) {
	return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
	    EDATA_BITS_COMMITTED_SHIFT);
}

static inline extent_pai_t
edata_pai_get(const edata_t *edata) {
	return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >>
	    EDATA_BITS_PAI_SHIFT);
}

static inline bool
edata_slab_get(const edata_t *edata) {
	return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
	    EDATA_BITS_SLAB_SHIFT);
}

static inline unsigned
edata_nfree_get(const edata_t *edata) {
	assert(edata_slab_get(edata));
	return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
	    EDATA_BITS_NFREE_SHIFT);
}

static inline void *
edata_base_get(const edata_t *edata) {
	assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
	    !edata_slab_get(edata));
	return PAGE_ADDR2BASE(edata->e_addr);
}

static inline void *
edata_addr_get(const edata_t *edata) {
	assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
	    !edata_slab_get(edata));
	return edata->e_addr;
}

static inline size_t
edata_size_get(const edata_t *edata) {
	return (edata->e_size_esn & EDATA_SIZE_MASK);
}

static inline size_t
edata_esn_get(const edata_t *edata) {
	return (edata->e_size_esn & EDATA_ESN_MASK);
}

static inline size_t
edata_bsize_get(const edata_t *edata) {
	return edata->e_bsize;
}

static inline hpdata_t *
edata_ps_get(const edata_t *edata) {
	assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
	return edata->e_ps;
}

static inline void *
edata_before_get(const edata_t *edata) {
	return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
}

static inline void *
edata_last_get(const edata_t *edata) {
	return (void *)((uintptr_t)edata_base_get(edata) +
	    edata_size_get(edata) - PAGE);
}

static inline void *
edata_past_get(const edata_t *edata) {
	return (void *)((uintptr_t)edata_base_get(edata) +
	    edata_size_get(edata));
}

static inline slab_data_t *
edata_slab_data_get(edata_t *edata) {
	assert(edata_slab_get(edata));
	return &edata->e_slab_data;
}

static inline const slab_data_t *
edata_slab_data_get_const(const edata_t *edata) {
	assert(edata_slab_get(edata));
	return &edata->e_slab_data;
}

static inline prof_tctx_t *
edata_prof_tctx_get(const edata_t *edata) {
	return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx,
	    ATOMIC_ACQUIRE);
}

static inline const nstime_t *
edata_prof_alloc_time_get(const edata_t *edata) {
	return &edata->e_prof_info.e_prof_alloc_time;
}

static inline size_t
edata_prof_alloc_size_get(const edata_t *edata) {
	return edata->e_prof_info.e_prof_alloc_size;
}

static inline prof_recent_t *
edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) {
	return (prof_recent_t *)atomic_load_p(
	    &edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED);
}

static inline void
edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
	edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
	    ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
}

static inline void
edata_binshard_set(edata_t *edata, unsigned binshard) {
	/* The assertion assumes szind is set already. */
	assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
	edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
	    ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
}

static inline void
edata_addr_set(edata_t *edata, void *addr) {
	edata->e_addr = addr;
}

static inline void
edata_size_set(edata_t *edata, size_t size) {
	assert((size & ~EDATA_SIZE_MASK) == 0);
	edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK);
}

static inline void
edata_esn_set(edata_t *edata, size_t esn) {
	edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
	    EDATA_ESN_MASK);
}

static inline void
edata_bsize_set(edata_t *edata, size_t bsize) {
	edata->e_bsize = bsize;
}

static inline void
edata_ps_set(edata_t *edata, hpdata_t *ps) {
	assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
	edata->e_ps = ps;
}

static inline void
edata_szind_set(edata_t *edata, szind_t szind) {
	assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
	edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
	    ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
}

static inline void
edata_nfree_set(edata_t *edata, unsigned nfree) {
	assert(edata_slab_get(edata));
	edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
	    ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
}

static inline void
edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
	/* The assertion assumes szind is set already. */
	assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
	edata->e_bits = (edata->e_bits &
	    (~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
	    ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
	    ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
}

static inline void
edata_nfree_inc(edata_t *edata) {
	assert(edata_slab_get(edata));
	edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
}

static inline void
edata_nfree_dec(edata_t *edata) {
	assert(edata_slab_get(edata));
	edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
}

static inline void
edata_nfree_sub(edata_t *edata, uint64_t n) {
	assert(edata_slab_get(edata));
	edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT);
}

static inline void
edata_sn_set(edata_t *edata, uint64_t sn) {
	edata->e_sn = sn;
}

static inline void
edata_state_set(edata_t *edata, extent_state_t state) {
	edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
	    ((uint64_t)state << EDATA_BITS_STATE_SHIFT);
}

static inline void
edata_guarded_set(edata_t *edata, bool guarded) {
	edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) |
	    ((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT);
}

static inline void
edata_zeroed_set(edata_t *edata, bool zeroed) {
	edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
	    ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
}

static inline void
edata_committed_set(edata_t *edata, bool committed) {
	edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
	    ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
}

static inline void
edata_pai_set(edata_t *edata, extent_pai_t pai) {
	edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) |
	    ((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
}

static inline void
edata_slab_set(edata_t *edata, bool slab) {
	edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
	    ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
}

static inline void
edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
	atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE);
}

static inline void
edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) {
	nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t);
}

static inline void
edata_prof_alloc_size_set(edata_t *edata, size_t size) {
	edata->e_prof_info.e_prof_alloc_size = size;
}

static inline void
edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata,
    prof_recent_t *recent_alloc) {
	atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc,
	    ATOMIC_RELAXED);
}

static inline bool
edata_is_head_get(edata_t *edata) {
	return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
	    EDATA_BITS_IS_HEAD_SHIFT);
}

static inline void
edata_is_head_set(edata_t *edata, bool is_head) {
	edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
	    ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
}

static inline bool
edata_state_in_transition(extent_state_t state) {
	return state >= extent_state_transition;
}

/*
 * Because this function is implemented as a sequence of bitfield modifications,
 * even though each individual bit is properly initialized, we technically read
 * uninitialized data within it.  This is mostly fine, since most callers get
 * their edatas from zeroing sources, but callers who make stack edata_ts need
 * to manually zero them.
 */
static inline void
edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
    bool slab, szind_t szind, uint64_t sn, extent_state_t state, bool zeroed,
    bool committed, extent_pai_t pai, extent_head_state_t is_head) {
	assert(addr == PAGE_ADDR2BASE(addr) || !slab);

	edata_arena_ind_set(edata, arena_ind);
	edata_addr_set(edata, addr);
	edata_size_set(edata, size);
	edata_slab_set(edata, slab);
	edata_szind_set(edata, szind);
	edata_sn_set(edata, sn);
	edata_state_set(edata, state);
	edata_guarded_set(edata, false);
	edata_zeroed_set(edata, zeroed);
	edata_committed_set(edata, committed);
	edata_pai_set(edata, pai);
	edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
	if (config_prof) {
		edata_prof_tctx_set(edata, NULL);
	}
}

static inline void
edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) {
	edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
	edata_addr_set(edata, addr);
	edata_bsize_set(edata, bsize);
	edata_slab_set(edata, false);
	edata_szind_set(edata, SC_NSIZES);
	edata_sn_set(edata, sn);
	edata_state_set(edata, extent_state_active);
	edata_guarded_set(edata, false);
	edata_zeroed_set(edata, true);
	edata_committed_set(edata, true);
	/*
	 * This isn't strictly true, but base allocated extents never get
	 * deallocated and can't be looked up in the emap, but no sense in
	 * wasting a state bit to encode this fact.
	 */
	edata_pai_set(edata, EXTENT_PAI_PAC);
}

static inline int
edata_esn_comp(const edata_t *a, const edata_t *b) {
	size_t a_esn = edata_esn_get(a);
	size_t b_esn = edata_esn_get(b);

	return (a_esn > b_esn) - (a_esn < b_esn);
}

static inline int
edata_ead_comp(const edata_t *a, const edata_t *b) {
	uintptr_t a_eaddr = (uintptr_t)a;
	uintptr_t b_eaddr = (uintptr_t)b;

	return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
}

static inline edata_cmp_summary_t
edata_cmp_summary_get(const edata_t *edata) {
	edata_cmp_summary_t result;
	result.sn = edata_sn_get(edata);
	result.addr = (uintptr_t)edata_addr_get(edata);
	return result;
}

static inline int
edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) {
	int ret;
	ret = (a.sn > b.sn) - (a.sn < b.sn);
	if (ret != 0) {
		return ret;
	}
	ret = (a.addr > b.addr) - (a.addr < b.addr);
	return ret;
}

static inline int
edata_snad_comp(const edata_t *a, const edata_t *b) {
	edata_cmp_summary_t a_cmp = edata_cmp_summary_get(a);
	edata_cmp_summary_t b_cmp = edata_cmp_summary_get(b);

	return edata_cmp_summary_comp(a_cmp, b_cmp);
}

static inline int
edata_esnead_comp(const edata_t *a, const edata_t *b) {
	int ret;

	ret = edata_esn_comp(a, b);
	if (ret != 0) {
		return ret;
	}

	ret = edata_ead_comp(a, b);
	return ret;
}

ph_proto(, edata_avail, edata_t)
ph_proto(, edata_heap, edata_t)

#endif /* JEMALLOC_INTERNAL_EDATA_H */