summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap')
-rw-r--r--deps/v8/src/heap/allocation-stats.h2
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.cc1
-rw-r--r--deps/v8/src/heap/base-space.cc4
-rw-r--r--deps/v8/src/heap/base/basic-slot-set.h464
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h2
-rw-r--r--deps/v8/src/heap/code-range.cc17
-rw-r--r--deps/v8/src/heap/concurrent-allocator-inl.h38
-rw-r--r--deps/v8/src/heap/concurrent-allocator.cc160
-rw-r--r--deps/v8/src/heap/concurrent-allocator.h33
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc13
-rw-r--r--deps/v8/src/heap/conservative-stack-visitor.cc63
-rw-r--r--deps/v8/src/heap/conservative-stack-visitor.h11
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc127
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h25
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h1
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc3
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc2
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h2
-rw-r--r--deps/v8/src/heap/cppgc/compactor.cc20
-rw-r--r--deps/v8/src/heap/cppgc/compactor.h14
-rw-r--r--deps/v8/src/heap/cppgc/explicit-management.cc29
-rw-r--r--deps/v8/src/heap/cppgc/garbage-collector.h60
-rw-r--r--deps/v8/src/heap/cppgc/gc-invoker.cc27
-rw-r--r--deps/v8/src/heap/cppgc/gc-invoker.h4
-rw-r--r--deps/v8/src/heap/cppgc/globals.h6
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc12
-rw-r--r--deps/v8/src/heap/cppgc/heap-config.h103
-rw-r--r--deps/v8/src/heap/cppgc/heap-growing.cc8
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.cc34
-rw-r--r--deps/v8/src/heap/cppgc/heap-page.h27
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc62
-rw-r--r--deps/v8/src/heap/cppgc/heap.h14
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc57
-rw-r--r--deps/v8/src/heap/cppgc/marker.h31
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc10
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.h10
-rw-r--r--deps/v8/src/heap/cppgc/member-storage.cc26
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc82
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h1
-rw-r--r--deps/v8/src/heap/cppgc/remembered-set.cc274
-rw-r--r--deps/v8/src/heap/cppgc/remembered-set.h18
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.cc5
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.h11
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.cc247
-rw-r--r--deps/v8/src/heap/cppgc/sweeper.h48
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.cc18
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc5
-rw-r--r--deps/v8/src/heap/evacuation-allocator-inl.h30
-rw-r--r--deps/v8/src/heap/evacuation-allocator.h8
-rw-r--r--deps/v8/src/heap/evacuation-verifier-inl.h64
-rw-r--r--deps/v8/src/heap/evacuation-verifier.cc179
-rw-r--r--deps/v8/src/heap/evacuation-verifier.h104
-rw-r--r--deps/v8/src/heap/factory-base.cc2
-rw-r--r--deps/v8/src/heap/factory.cc72
-rw-r--r--deps/v8/src/heap/factory.h6
-rw-r--r--deps/v8/src/heap/gc-tracer-inl.h4
-rw-r--r--deps/v8/src/heap/gc-tracer.cc126
-rw-r--r--deps/v8/src/heap/gc-tracer.h13
-rw-r--r--deps/v8/src/heap/global-handle-marking-visitor.cc6
-rw-r--r--deps/v8/src/heap/global-handle-marking-visitor.h2
-rw-r--r--deps/v8/src/heap/heap-allocator-inl.h1
-rw-r--r--deps/v8/src/heap/heap-allocator.cc12
-rw-r--r--deps/v8/src/heap/heap-inl.h107
-rw-r--r--deps/v8/src/heap/heap-verifier.cc1
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h4
-rw-r--r--deps/v8/src/heap/heap.cc861
-rw-r--r--deps/v8/src/heap/heap.h256
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h1
-rw-r--r--deps/v8/src/heap/incremental-marking.cc79
-rw-r--r--deps/v8/src/heap/incremental-marking.h14
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h1
-rw-r--r--deps/v8/src/heap/invalidated-slots.cc13
-rw-r--r--deps/v8/src/heap/invalidated-slots.h2
-rw-r--r--deps/v8/src/heap/large-spaces.cc33
-rw-r--r--deps/v8/src/heap/large-spaces.h8
-rw-r--r--deps/v8/src/heap/linear-allocation-area.h6
-rw-r--r--deps/v8/src/heap/local-heap-inl.h4
-rw-r--r--deps/v8/src/heap/local-heap.cc61
-rw-r--r--deps/v8/src/heap/local-heap.h15
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h80
-rw-r--r--deps/v8/src/heap/mark-compact.cc1563
-rw-r--r--deps/v8/src/heap/mark-compact.h169
-rw-r--r--deps/v8/src/heap/marking-barrier-inl.h13
-rw-r--r--deps/v8/src/heap/marking-barrier.cc20
-rw-r--r--deps/v8/src/heap/marking-barrier.h4
-rw-r--r--deps/v8/src/heap/marking-state-inl.h155
-rw-r--r--deps/v8/src/heap/marking-state.h137
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h13
-rw-r--r--deps/v8/src/heap/marking-visitor.h112
-rw-r--r--deps/v8/src/heap/memory-allocator.cc1
-rw-r--r--deps/v8/src/heap/memory-allocator.h3
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.cc3
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.h3
-rw-r--r--deps/v8/src/heap/memory-chunk.cc10
-rw-r--r--deps/v8/src/heap/memory-chunk.h8
-rw-r--r--deps/v8/src/heap/new-spaces-inl.h16
-rw-r--r--deps/v8/src/heap/new-spaces.cc154
-rw-r--r--deps/v8/src/heap/new-spaces.h54
-rw-r--r--deps/v8/src/heap/object-stats.cc19
-rw-r--r--deps/v8/src/heap/paged-spaces-inl.h2
-rw-r--r--deps/v8/src/heap/paged-spaces.cc167
-rw-r--r--deps/v8/src/heap/paged-spaces.h70
-rw-r--r--deps/v8/src/heap/pretenuring-handler-inl.h112
-rw-r--r--deps/v8/src/heap/pretenuring-handler.cc244
-rw-r--r--deps/v8/src/heap/pretenuring-handler.h90
-rw-r--r--deps/v8/src/heap/read-only-heap.cc3
-rw-r--r--deps/v8/src/heap/read-only-spaces.cc22
-rw-r--r--deps/v8/src/heap/remembered-set-inl.h20
-rw-r--r--deps/v8/src/heap/remembered-set.h5
-rw-r--r--deps/v8/src/heap/safepoint.cc44
-rw-r--r--deps/v8/src/heap/safepoint.h7
-rw-r--r--deps/v8/src/heap/scavenger-inl.h13
-rw-r--r--deps/v8/src/heap/scavenger.cc43
-rw-r--r--deps/v8/src/heap/scavenger.h6
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc14
-rw-r--r--deps/v8/src/heap/slot-set.h466
-rw-r--r--deps/v8/src/heap/spaces-inl.h4
-rw-r--r--deps/v8/src/heap/spaces.cc10
-rw-r--r--deps/v8/src/heap/spaces.h14
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.cc4
-rw-r--r--deps/v8/src/heap/sweeper.cc86
-rw-r--r--deps/v8/src/heap/sweeper.h31
122 files changed, 4970 insertions, 3370 deletions
diff --git a/deps/v8/src/heap/allocation-stats.h b/deps/v8/src/heap/allocation-stats.h
index a024b956e0..f02f2594d9 100644
--- a/deps/v8/src/heap/allocation-stats.h
+++ b/deps/v8/src/heap/allocation-stats.h
@@ -59,6 +59,8 @@ class AllocationStats {
#endif
void IncreaseAllocatedBytes(size_t bytes, const BasicMemoryChunk* page) {
+ DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
+ IsAligned(bytes, kObjectAlignment8GbHeap));
#ifdef DEBUG
size_t size = size_;
DCHECK_GE(size + bytes, size);
diff --git a/deps/v8/src/heap/array-buffer-sweeper.cc b/deps/v8/src/heap/array-buffer-sweeper.cc
index fd36cf89c0..088e9e4ac5 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.cc
+++ b/deps/v8/src/heap/array-buffer-sweeper.cc
@@ -109,7 +109,6 @@ ArrayBufferSweeper::~ArrayBufferSweeper() {
void ArrayBufferSweeper::EnsureFinished() {
if (!sweeping_in_progress()) return;
- TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS);
TryAbortResult abort_result =
heap_->isolate()->cancelable_task_manager()->TryAbort(job_->id_);
diff --git a/deps/v8/src/heap/base-space.cc b/deps/v8/src/heap/base-space.cc
index aabbeaebf5..5f28afc240 100644
--- a/deps/v8/src/heap/base-space.cc
+++ b/deps/v8/src/heap/base-space.cc
@@ -17,12 +17,16 @@ const char* BaseSpace::GetSpaceName(AllocationSpace space) {
return "map_space";
case CODE_SPACE:
return "code_space";
+ case SHARED_SPACE:
+ return "shared_space";
case LO_SPACE:
return "large_object_space";
case NEW_LO_SPACE:
return "new_large_object_space";
case CODE_LO_SPACE:
return "code_large_object_space";
+ case SHARED_LO_SPACE:
+ return "shared_large_object_space";
case RO_SPACE:
return "read_only_space";
}
diff --git a/deps/v8/src/heap/base/basic-slot-set.h b/deps/v8/src/heap/base/basic-slot-set.h
new file mode 100644
index 0000000000..2f0bc1c872
--- /dev/null
+++ b/deps/v8/src/heap/base/basic-slot-set.h
@@ -0,0 +1,464 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_BASE_BASIC_SLOT_SET_H_
+#define V8_HEAP_BASE_BASIC_SLOT_SET_H_
+
+#include <cstddef>
+#include <memory>
+
+#include "src/base/atomic-utils.h"
+#include "src/base/bits.h"
+#include "src/base/platform/memory.h"
+
+namespace heap {
+namespace base {
+
+enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
+
+// Data structure for maintaining a set of slots in a standard (non-large)
+// page.
+// The data structure assumes that the slots are pointer size aligned and
+// splits the valid slot offset range into buckets.
+// Each bucket is a bitmap with a bit corresponding to a single slot offset.
+template <size_t SlotGranularity>
+class BasicSlotSet {
+ static constexpr auto kSystemPointerSize = sizeof(void*);
+
+ public:
+ using Address = uintptr_t;
+
+ enum AccessMode : uint8_t {
+ ATOMIC,
+ NON_ATOMIC,
+ };
+
+ enum EmptyBucketMode {
+ FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
+ KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
+ };
+
+ BasicSlotSet() = delete;
+
+ static BasicSlotSet* Allocate(size_t buckets) {
+ // BasicSlotSet* slot_set --+
+ // |
+ // v
+ // +-----------------+-------------------------+
+ // | initial buckets | buckets array |
+ // +-----------------+-------------------------+
+ // pointer-sized pointer-sized * buckets
+ //
+ //
+ // The BasicSlotSet pointer points to the beginning of the buckets array for
+ // faster access in the write barrier. The number of buckets is needed for
+ // calculating the size of this data structure.
+ size_t buckets_size = buckets * sizeof(Bucket*);
+ size_t size = kInitialBucketsSize + buckets_size;
+ void* allocation = v8::base::AlignedAlloc(size, kSystemPointerSize);
+ CHECK(allocation);
+ BasicSlotSet* slot_set = reinterpret_cast<BasicSlotSet*>(
+ reinterpret_cast<uint8_t*>(allocation) + kInitialBucketsSize);
+ DCHECK(
+ IsAligned(reinterpret_cast<uintptr_t>(slot_set), kSystemPointerSize));
+#ifdef DEBUG
+ *slot_set->initial_buckets() = buckets;
+#endif
+ for (size_t i = 0; i < buckets; i++) {
+ *slot_set->bucket(i) = nullptr;
+ }
+ return slot_set;
+ }
+
+ static void Delete(BasicSlotSet* slot_set, size_t buckets) {
+ if (slot_set == nullptr) return;
+
+ for (size_t i = 0; i < buckets; i++) {
+ slot_set->ReleaseBucket(i);
+ }
+
+#ifdef DEBUG
+ size_t initial_buckets = *slot_set->initial_buckets();
+
+ for (size_t i = buckets; i < initial_buckets; i++) {
+ DCHECK_NULL(*slot_set->bucket(i));
+ }
+#endif
+
+ v8::base::AlignedFree(reinterpret_cast<uint8_t*>(slot_set) -
+ kInitialBucketsSize);
+ }
+
+ constexpr static size_t BucketsForSize(size_t size) {
+ return (size + (SlotGranularity * kBitsPerBucket) - 1) /
+ (SlotGranularity * kBitsPerBucket);
+ }
+
+ // Converts the slot offset into bucket index.
+ constexpr static size_t BucketForSlot(size_t slot_offset) {
+ DCHECK(IsAligned(slot_offset, SlotGranularity));
+ return slot_offset / (SlotGranularity * kBitsPerBucket);
+ }
+
+ // The slot offset specifies a slot at address page_start_ + slot_offset.
+ // AccessMode defines whether there can be concurrent access on the buckets
+ // or not.
+ template <AccessMode access_mode>
+ void Insert(size_t slot_offset) {
+ size_t bucket_index;
+ int cell_index, bit_index;
+ SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
+ Bucket* bucket = LoadBucket<access_mode>(bucket_index);
+ if (bucket == nullptr) {
+ bucket = new Bucket;
+ if (!SwapInNewBucket<access_mode>(bucket_index, bucket)) {
+ delete bucket;
+ bucket = LoadBucket<access_mode>(bucket_index);
+ }
+ }
+ // Check that monotonicity is preserved, i.e., once a bucket is set we do
+ // not free it concurrently.
+ DCHECK(bucket != nullptr);
+ DCHECK_EQ(bucket->cells(), LoadBucket<access_mode>(bucket_index)->cells());
+ uint32_t mask = 1u << bit_index;
+ if ((bucket->template LoadCell<access_mode>(cell_index) & mask) == 0) {
+ bucket->template SetCellBits<access_mode>(cell_index, mask);
+ }
+ }
+
+ // The slot offset specifies a slot at address page_start_ + slot_offset.
+ // Returns true if the set contains the slot.
+ bool Contains(size_t slot_offset) {
+ size_t bucket_index;
+ int cell_index, bit_index;
+ SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
+ Bucket* bucket = LoadBucket(bucket_index);
+ if (bucket == nullptr) return false;
+ return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
+ }
+
+ // The slot offset specifies a slot at address page_start_ + slot_offset.
+ void Remove(size_t slot_offset) {
+ size_t bucket_index;
+ int cell_index, bit_index;
+ SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
+ Bucket* bucket = LoadBucket(bucket_index);
+ if (bucket != nullptr) {
+ uint32_t cell = bucket->LoadCell(cell_index);
+ uint32_t bit_mask = 1u << bit_index;
+ if (cell & bit_mask) {
+ bucket->ClearCellBits(cell_index, bit_mask);
+ }
+ }
+ }
+
+ // The slot offsets specify a range of slots at addresses:
+ // [page_start_ + start_offset ... page_start_ + end_offset).
+ void RemoveRange(size_t start_offset, size_t end_offset, size_t buckets,
+ EmptyBucketMode mode) {
+ CHECK_LE(end_offset, buckets * kBitsPerBucket * SlotGranularity);
+ DCHECK_LE(start_offset, end_offset);
+ size_t start_bucket;
+ int start_cell, start_bit;
+ SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
+ size_t end_bucket;
+ int end_cell, end_bit;
+ SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
+ uint32_t start_mask = (1u << start_bit) - 1;
+ uint32_t end_mask = ~((1u << end_bit) - 1);
+ Bucket* bucket;
+ if (start_bucket == end_bucket && start_cell == end_cell) {
+ bucket = LoadBucket(start_bucket);
+ if (bucket != nullptr) {
+ bucket->ClearCellBits(start_cell, ~(start_mask | end_mask));
+ }
+ return;
+ }
+ size_t current_bucket = start_bucket;
+ int current_cell = start_cell;
+ bucket = LoadBucket(current_bucket);
+ if (bucket != nullptr) {
+ bucket->ClearCellBits(current_cell, ~start_mask);
+ }
+ current_cell++;
+ if (current_bucket < end_bucket) {
+ if (bucket != nullptr) {
+ ClearBucket(bucket, current_cell, kCellsPerBucket);
+ }
+ // The rest of the current bucket is cleared.
+ // Move on to the next bucket.
+ current_bucket++;
+ current_cell = 0;
+ }
+ DCHECK(current_bucket == end_bucket ||
+ (current_bucket < end_bucket && current_cell == 0));
+ while (current_bucket < end_bucket) {
+ if (mode == FREE_EMPTY_BUCKETS) {
+ ReleaseBucket(current_bucket);
+ } else {
+ DCHECK(mode == KEEP_EMPTY_BUCKETS);
+ bucket = LoadBucket(current_bucket);
+ if (bucket != nullptr) {
+ ClearBucket(bucket, 0, kCellsPerBucket);
+ }
+ }
+ current_bucket++;
+ }
+ // All buckets between start_bucket and end_bucket are cleared.
+ DCHECK(current_bucket == end_bucket);
+ if (current_bucket == buckets) return;
+ bucket = LoadBucket(current_bucket);
+ DCHECK(current_cell <= end_cell);
+ if (bucket == nullptr) return;
+ while (current_cell < end_cell) {
+ bucket->StoreCell(current_cell, 0);
+ current_cell++;
+ }
+ // All cells between start_cell and end_cell are cleared.
+ DCHECK(current_bucket == end_bucket && current_cell == end_cell);
+ bucket->ClearCellBits(end_cell, ~end_mask);
+ }
+
+ // The slot offset specifies a slot at address page_start_ + slot_offset.
+ bool Lookup(size_t slot_offset) {
+ size_t bucket_index;
+ int cell_index, bit_index;
+ SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
+ Bucket* bucket = LoadBucket(bucket_index);
+ if (bucket == nullptr) return false;
+ return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
+ }
+
+ // Iterate over all slots in the set and for each slot invoke the callback.
+ // If the callback returns REMOVE_SLOT then the slot is removed from the set.
+ // Returns the new number of slots.
+ //
+ // Iteration can be performed concurrently with other operations that use
+ // atomic access mode such as insertion and removal. However there is no
+ // guarantee about ordering and linearizability.
+ //
+ // Sample usage:
+ // Iterate([](Address slot) {
+ // if (good(slot)) return KEEP_SLOT;
+ // else return REMOVE_SLOT;
+ // });
+ //
+ // Releases memory for empty buckets with FREE_EMPTY_BUCKETS.
+ template <typename Callback>
+ size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
+ Callback callback, EmptyBucketMode mode) {
+ return Iterate(chunk_start, start_bucket, end_bucket, callback,
+ [this, mode](size_t bucket_index) {
+ if (mode == EmptyBucketMode::FREE_EMPTY_BUCKETS) {
+ ReleaseBucket(bucket_index);
+ }
+ });
+ }
+
+ bool FreeEmptyBuckets(size_t buckets) {
+ bool empty = true;
+ for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
+ if (!FreeBucketIfEmpty(bucket_index)) {
+ empty = false;
+ }
+ }
+
+ return empty;
+ }
+
+ static const int kCellsPerBucket = 32;
+ static const int kCellsPerBucketLog2 = 5;
+ static const int kCellSizeBytesLog2 = 2;
+ static const int kCellSizeBytes = 1 << kCellSizeBytesLog2;
+ static const int kBitsPerCell = 32;
+ static const int kBitsPerCellLog2 = 5;
+ static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
+ static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
+
+ class Bucket final {
+ uint32_t cells_[kCellsPerBucket];
+
+ public:
+ Bucket() {
+ for (int i = 0; i < kCellsPerBucket; i++) {
+ cells_[i] = 0;
+ }
+ }
+
+ uint32_t* cells() { return cells_; }
+ uint32_t* cell(int cell_index) { return cells() + cell_index; }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ uint32_t LoadCell(int cell_index) {
+ DCHECK_LT(cell_index, kCellsPerBucket);
+ if (access_mode == AccessMode::ATOMIC)
+ return v8::base::AsAtomic32::Acquire_Load(cells() + cell_index);
+ return *(cells() + cell_index);
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ void SetCellBits(int cell_index, uint32_t mask) {
+ if (access_mode == AccessMode::ATOMIC) {
+ v8::base::AsAtomic32::SetBits(cell(cell_index), mask, mask);
+ } else {
+ uint32_t* c = cell(cell_index);
+ *c = (*c & ~mask) | mask;
+ }
+ }
+
+ void ClearCellBits(int cell_index, uint32_t mask) {
+ v8::base::AsAtomic32::SetBits(cell(cell_index), 0u, mask);
+ }
+
+ void StoreCell(int cell_index, uint32_t value) {
+ v8::base::AsAtomic32::Release_Store(cell(cell_index), value);
+ }
+
+ bool IsEmpty() {
+ for (int i = 0; i < kCellsPerBucket; i++) {
+ if (cells_[i] != 0) {
+ return false;
+ }
+ }
+ return true;
+ }
+ };
+
+ protected:
+ template <typename Callback, typename EmptyBucketCallback>
+ size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
+ Callback callback, EmptyBucketCallback empty_bucket_callback) {
+ size_t new_count = 0;
+ for (size_t bucket_index = start_bucket; bucket_index < end_bucket;
+ bucket_index++) {
+ Bucket* bucket = LoadBucket(bucket_index);
+ if (bucket != nullptr) {
+ size_t in_bucket_count = 0;
+ size_t cell_offset = bucket_index << kBitsPerBucketLog2;
+ for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
+ uint32_t cell = bucket->LoadCell(i);
+ if (cell) {
+ uint32_t old_cell = cell;
+ uint32_t mask = 0;
+ while (cell) {
+ int bit_offset = v8::base::bits::CountTrailingZeros(cell);
+ uint32_t bit_mask = 1u << bit_offset;
+ Address slot = (cell_offset + bit_offset) * SlotGranularity;
+ if (callback(chunk_start + slot) == KEEP_SLOT) {
+ ++in_bucket_count;
+ } else {
+ mask |= bit_mask;
+ }
+ cell ^= bit_mask;
+ }
+ uint32_t new_cell = old_cell & ~mask;
+ if (old_cell != new_cell) {
+ bucket->ClearCellBits(i, mask);
+ }
+ }
+ }
+ if (in_bucket_count == 0) {
+ empty_bucket_callback(bucket_index);
+ }
+ new_count += in_bucket_count;
+ }
+ }
+ return new_count;
+ }
+
+ bool FreeBucketIfEmpty(size_t bucket_index) {
+ Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
+ if (bucket != nullptr) {
+ if (bucket->IsEmpty()) {
+ ReleaseBucket<AccessMode::NON_ATOMIC>(bucket_index);
+ } else {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ void ClearBucket(Bucket* bucket, int start_cell, int end_cell) {
+ DCHECK_GE(start_cell, 0);
+ DCHECK_LE(end_cell, kCellsPerBucket);
+ int current_cell = start_cell;
+ while (current_cell < kCellsPerBucket) {
+ bucket->StoreCell(current_cell, 0);
+ current_cell++;
+ }
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ void ReleaseBucket(size_t bucket_index) {
+ Bucket* bucket = LoadBucket<access_mode>(bucket_index);
+ StoreBucket<access_mode>(bucket_index, nullptr);
+ delete bucket;
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ Bucket* LoadBucket(Bucket** bucket) {
+ if (access_mode == AccessMode::ATOMIC)
+ return v8::base::AsAtomicPointer::Acquire_Load(bucket);
+ return *bucket;
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ Bucket* LoadBucket(size_t bucket_index) {
+ return LoadBucket(bucket(bucket_index));
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ void StoreBucket(Bucket** bucket, Bucket* value) {
+ if (access_mode == AccessMode::ATOMIC) {
+ v8::base::AsAtomicPointer::Release_Store(bucket, value);
+ } else {
+ *bucket = value;
+ }
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ void StoreBucket(size_t bucket_index, Bucket* value) {
+ StoreBucket(bucket(bucket_index), value);
+ }
+
+ template <AccessMode access_mode = AccessMode::ATOMIC>
+ bool SwapInNewBucket(size_t bucket_index, Bucket* value) {
+ Bucket** b = bucket(bucket_index);
+ if (access_mode == AccessMode::ATOMIC) {
+ return v8::base::AsAtomicPointer::Release_CompareAndSwap(
+ b, nullptr, value) == nullptr;
+ } else {
+ DCHECK_NULL(*b);
+ *b = value;
+ return true;
+ }
+ }
+
+ // Converts the slot offset into bucket/cell/bit index.
+ static void SlotToIndices(size_t slot_offset, size_t* bucket_index,
+ int* cell_index, int* bit_index) {
+ DCHECK(IsAligned(slot_offset, SlotGranularity));
+ size_t slot = slot_offset / SlotGranularity;
+ *bucket_index = slot >> kBitsPerBucketLog2;
+ *cell_index =
+ static_cast<int>((slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1));
+ *bit_index = static_cast<int>(slot & (kBitsPerCell - 1));
+ }
+
+ Bucket** buckets() { return reinterpret_cast<Bucket**>(this); }
+ Bucket** bucket(size_t bucket_index) { return buckets() + bucket_index; }
+
+#ifdef DEBUG
+ size_t* initial_buckets() { return reinterpret_cast<size_t*>(this) - 1; }
+ static const int kInitialBucketsSize = sizeof(size_t);
+#else
+ static const int kInitialBucketsSize = 0;
+#endif
+};
+
+} // namespace base
+} // namespace heap
+
+#endif // V8_HEAP_BASE_BASIC_SLOT_SET_H_
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
index db80da75c9..60a711b622 100644
--- a/deps/v8/src/heap/basic-memory-chunk.h
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -353,7 +353,7 @@ class BasicMemoryChunk {
size_t size_;
// Flags that are only mutable from the main thread when no concurrent
- // component (e.g. marker, sweeper) is running.
+ // component (e.g. marker, sweeper, compilation, allocation) is running.
MainThreadFlags main_thread_flags_{NO_FLAGS};
// TODO(v8:7464): Find a way to remove this.
diff --git a/deps/v8/src/heap/code-range.cc b/deps/v8/src/heap/code-range.cc
index dfe730730c..ae240d0f1c 100644
--- a/deps/v8/src/heap/code-range.cc
+++ b/deps/v8/src/heap/code-range.cc
@@ -147,15 +147,14 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
if (!VirtualMemoryCage::InitReservation(params)) return false;
- if (V8_EXTERNAL_CODE_SPACE_BOOL) {
- // Ensure that the code range does not cross the 4Gb boundary and thus
- // default compression scheme of truncating the Code pointers to 32-bits
- // still works.
- Address base = page_allocator_->begin();
- Address last = base + page_allocator_->size() - 1;
- CHECK_EQ(GetPtrComprCageBaseAddress(base),
- GetPtrComprCageBaseAddress(last));
- }
+#ifdef V8_EXTERNAL_CODE_SPACE
+ // Ensure that ExternalCodeCompressionScheme is applicable to all objects
+ // stored in the code range.
+ Address base = page_allocator_->begin();
+ Address last = base + page_allocator_->size() - 1;
+ CHECK_EQ(ExternalCodeCompressionScheme::GetPtrComprCageBaseAddress(base),
+ ExternalCodeCompressionScheme::GetPtrComprCageBaseAddress(last));
+#endif // V8_EXTERNAL_CODE_SPACE
// On some platforms, specifically Win64, we need to reserve some pages at
// the beginning of an executable space. See
diff --git a/deps/v8/src/heap/concurrent-allocator-inl.h b/deps/v8/src/heap/concurrent-allocator-inl.h
index f6eed1b696..efce44d363 100644
--- a/deps/v8/src/heap/concurrent-allocator-inl.h
+++ b/deps/v8/src/heap/concurrent-allocator-inl.h
@@ -13,7 +13,6 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/local-heap.h"
#include "src/heap/spaces-inl.h"
-#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
namespace v8 {
@@ -22,6 +21,7 @@ namespace internal {
AllocationResult ConcurrentAllocator::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
DCHECK(!v8_flags.enable_third_party_heap);
// TODO(dinfuehr): Add support for allocation observers
#ifdef DEBUG
@@ -34,15 +34,47 @@ AllocationResult ConcurrentAllocator::AllocateRaw(int size_in_bytes,
AllocationResult result;
if (USE_ALLOCATION_ALIGNMENT_BOOL && alignment != kTaggedAligned) {
- result = lab_.AllocateRawAligned(size_in_bytes, alignment);
+ result = AllocateInLabFastAligned(size_in_bytes, alignment);
} else {
- result = lab_.AllocateRawUnaligned(size_in_bytes);
+ result = AllocateInLabFastUnaligned(size_in_bytes);
}
return result.IsFailure()
? AllocateInLabSlow(size_in_bytes, alignment, origin)
: result;
}
+AllocationResult ConcurrentAllocator::AllocateInLabFastUnaligned(
+ int size_in_bytes) {
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
+
+ if (!lab_.CanIncrementTop(size_in_bytes)) {
+ return AllocationResult::Failure();
+ }
+
+ HeapObject object = HeapObject::FromAddress(lab_.IncrementTop(size_in_bytes));
+ return AllocationResult::FromObject(object);
+}
+
+AllocationResult ConcurrentAllocator::AllocateInLabFastAligned(
+ int size_in_bytes, AllocationAlignment alignment) {
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
+ Address current_top = lab_.top();
+ int filler_size = Heap::GetFillToAlign(current_top, alignment);
+ int aligned_size = filler_size + size_in_bytes;
+
+ if (!lab_.CanIncrementTop(aligned_size)) {
+ return AllocationResult::Failure();
+ }
+
+ HeapObject object = HeapObject::FromAddress(lab_.IncrementTop(aligned_size));
+
+ if (filler_size > 0) {
+ object = owning_heap()->PrecedeWithFiller(object, filler_size);
+ }
+
+ return AllocationResult::FromObject(object);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/concurrent-allocator.cc b/deps/v8/src/heap/concurrent-allocator.cc
index 4b02b14170..b4494d9a63 100644
--- a/deps/v8/src/heap/concurrent-allocator.cc
+++ b/deps/v8/src/heap/concurrent-allocator.cc
@@ -8,7 +8,9 @@
#include "src/execution/isolate.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/heap.h"
+#include "src/heap/linear-allocation-area.h"
#include "src/heap/local-heap-inl.h"
#include "src/heap/local-heap.h"
#include "src/heap/marking.h"
@@ -41,7 +43,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
heap->CreateFillerObjectAtBackground(result.ToAddress(),
kSmallObjectSize);
} else {
- local_heap.TryPerformCollection();
+ heap->CollectGarbageFromAnyThread(&local_heap);
}
result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld,
@@ -51,7 +53,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
heap->CreateFillerObjectAtBackground(result.ToAddress(),
kMediumObjectSize);
} else {
- local_heap.TryPerformCollection();
+ heap->CollectGarbageFromAnyThread(&local_heap);
}
result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
@@ -61,7 +63,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
heap->CreateFillerObjectAtBackground(result.ToAddress(),
kLargeObjectSize);
} else {
- local_heap.TryPerformCollection();
+ heap->CollectGarbageFromAnyThread(&local_heap);
}
local_heap.Safepoint();
}
@@ -77,24 +79,35 @@ void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) {
kDelayInSeconds);
}
+ConcurrentAllocator::ConcurrentAllocator(LocalHeap* local_heap,
+ PagedSpace* space)
+ : local_heap_(local_heap), space_(space), owning_heap_(space_->heap()) {}
+
void ConcurrentAllocator::FreeLinearAllocationArea() {
// The code page of the linear allocation area needs to be unprotected
// because we are going to write a filler into that memory area below.
base::Optional<CodePageMemoryModificationScope> optional_scope;
- if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
+ if (IsLabValid() && space_->identity() == CODE_SPACE) {
optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
}
- lab_.CloseAndMakeIterable();
+ if (lab_.top() != lab_.limit() &&
+ owning_heap()->incremental_marking()->black_allocation()) {
+ Page::FromAddress(lab_.top())
+ ->DestroyBlackAreaBackground(lab_.top(), lab_.limit());
+ }
+
+ MakeLabIterable();
+ ResetLab();
}
void ConcurrentAllocator::MakeLinearAllocationAreaIterable() {
// The code page of the linear allocation area needs to be unprotected
// because we are going to write a filler into that memory area below.
base::Optional<CodePageMemoryModificationScope> optional_scope;
- if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
+ if (IsLabValid() && space_->identity() == CODE_SPACE) {
optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
}
- lab_.MakeIterable();
+ MakeLabIterable();
}
void ConcurrentAllocator::MarkLinearAllocationAreaBlack() {
@@ -128,54 +141,134 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
AllocationResult ConcurrentAllocator::AllocateInLabSlow(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
- if (!EnsureLab(origin)) {
+ if (!AllocateLab(origin)) {
return AllocationResult::Failure();
}
AllocationResult allocation =
- lab_.AllocateRawAligned(size_in_bytes, alignment);
+ AllocateInLabFastAligned(size_in_bytes, alignment);
DCHECK(!allocation.IsFailure());
return allocation;
}
-bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
- auto result = space_->RawAllocateBackground(local_heap_, kMinLabSize,
- kMaxLabSize, origin);
+bool ConcurrentAllocator::AllocateLab(AllocationOrigin origin) {
+ auto result = AllocateFromSpaceFreeList(kMinLabSize, kMaxLabSize, origin);
if (!result) return false;
+ owning_heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
+
+ FreeLinearAllocationArea();
+
+ Address lab_start = result->first;
+ Address lab_end = lab_start + result->second;
+ lab_ = LinearAllocationArea(lab_start, lab_end);
+ DCHECK(IsLabValid());
+
if (IsBlackAllocationEnabled()) {
- Address top = result->first;
- Address limit = top + result->second;
+ Address top = lab_.top();
+ Address limit = lab_.limit();
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
}
- HeapObject object = HeapObject::FromAddress(result->first);
- LocalAllocationBuffer saved_lab = std::move(lab_);
- lab_ = LocalAllocationBuffer::FromResult(
- space_->heap(), AllocationResult::FromObject(object), result->second);
- DCHECK(lab_.IsValid());
- if (!lab_.TryMerge(&saved_lab)) {
- saved_lab.CloseAndMakeIterable();
- }
return true;
}
+base::Optional<std::pair<Address, size_t>>
+ConcurrentAllocator::AllocateFromSpaceFreeList(size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationOrigin origin) {
+ DCHECK(!space_->is_compaction_space());
+ DCHECK(space_->identity() == OLD_SPACE || space_->identity() == CODE_SPACE ||
+ space_->identity() == MAP_SPACE || space_->identity() == SHARED_SPACE);
+ DCHECK(origin == AllocationOrigin::kRuntime ||
+ origin == AllocationOrigin::kGC);
+ DCHECK_IMPLIES(!local_heap_, origin == AllocationOrigin::kGC);
+
+ base::Optional<std::pair<Address, size_t>> result =
+ space_->TryAllocationFromFreeListBackground(min_size_in_bytes,
+ max_size_in_bytes, origin);
+ if (result) return result;
+
+ // Sweeping is still in progress.
+ if (owning_heap()->sweeping_in_progress()) {
+ // First try to refill the free-list, concurrent sweeper threads
+ // may have freed some objects in the meantime.
+ {
+ TRACE_GC_EPOCH(owning_heap()->tracer(),
+ GCTracer::Scope::MC_BACKGROUND_SWEEPING,
+ ThreadKind::kBackground);
+ space_->RefillFreeList();
+ }
+
+ // Retry the free list allocation.
+ result = space_->TryAllocationFromFreeListBackground(
+ min_size_in_bytes, max_size_in_bytes, origin);
+ if (result) return result;
+
+ // Now contribute to sweeping from background thread and then try to
+ // reallocate.
+ int max_freed;
+ {
+ TRACE_GC_EPOCH(owning_heap()->tracer(),
+ GCTracer::Scope::MC_BACKGROUND_SWEEPING,
+ ThreadKind::kBackground);
+ const int kMaxPagesToSweep = 1;
+ max_freed = owning_heap()->sweeper()->ParallelSweepSpace(
+ space_->identity(), Sweeper::SweepingMode::kLazyOrConcurrent,
+ static_cast<int>(min_size_in_bytes), kMaxPagesToSweep);
+ space_->RefillFreeList();
+ }
+
+ if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
+ result = space_->TryAllocationFromFreeListBackground(
+ min_size_in_bytes, max_size_in_bytes, origin);
+ if (result) return result;
+ }
+ }
+
+ if (owning_heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap_) &&
+ owning_heap()->CanExpandOldGenerationBackground(local_heap_,
+ space_->AreaSize())) {
+ result = space_->TryExpandBackground(max_size_in_bytes);
+ if (result) return result;
+ }
+
+ if (owning_heap()->sweeping_in_progress()) {
+ // Complete sweeping for this space.
+ TRACE_GC_EPOCH(owning_heap()->tracer(),
+ GCTracer::Scope::MC_BACKGROUND_SWEEPING,
+ ThreadKind::kBackground);
+ owning_heap()->DrainSweepingWorklistForSpace(space_->identity());
+
+ space_->RefillFreeList();
+
+ // Last try to acquire memory from free list.
+ return space_->TryAllocationFromFreeListBackground(
+ min_size_in_bytes, max_size_in_bytes, origin);
+ }
+
+ return {};
+}
+
AllocationResult ConcurrentAllocator::AllocateOutsideLab(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
// Conservative estimate as we don't know the alignment of the allocation.
const int requested_filler_size = Heap::GetMaximumFillToAlign(alignment);
const int aligned_size_in_bytes = size_in_bytes + requested_filler_size;
- auto result = space_->RawAllocateBackground(
- local_heap_, aligned_size_in_bytes, aligned_size_in_bytes, origin);
+ auto result = AllocateFromSpaceFreeList(aligned_size_in_bytes,
+ aligned_size_in_bytes, origin);
if (!result) return AllocationResult::Failure();
+
+ owning_heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
+
DCHECK_GE(result->second, aligned_size_in_bytes);
- HeapObject object =
- (requested_filler_size)
- ? owning_heap()->AlignWithFiller(
- HeapObject::FromAddress(result->first), size_in_bytes,
- static_cast<int>(result->second), alignment)
- : HeapObject::FromAddress(result->first);
+ HeapObject object = HeapObject::FromAddress(result->first);
+ if (requested_filler_size > 0) {
+ object = owning_heap()->AlignWithFiller(
+ object, size_in_bytes, static_cast<int>(result->second), alignment);
+ }
+
if (IsBlackAllocationEnabled()) {
owning_heap()->incremental_marking()->MarkBlackBackground(object,
size_in_bytes);
@@ -187,7 +280,12 @@ bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
return owning_heap()->incremental_marking()->black_allocation();
}
-Heap* ConcurrentAllocator::owning_heap() const { return space_->heap(); }
+void ConcurrentAllocator::MakeLabIterable() {
+ if (IsLabValid()) {
+ owning_heap()->CreateFillerObjectAtBackground(
+ lab_.top(), static_cast<int>(lab_.limit() - lab_.top()));
+ }
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/concurrent-allocator.h b/deps/v8/src/heap/concurrent-allocator.h
index c7c3a28465..b9e003e6cb 100644
--- a/deps/v8/src/heap/concurrent-allocator.h
+++ b/deps/v8/src/heap/concurrent-allocator.h
@@ -5,8 +5,10 @@
#ifndef V8_HEAP_CONCURRENT_ALLOCATOR_H_
#define V8_HEAP_CONCURRENT_ALLOCATOR_H_
+#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/heap/heap.h"
+#include "src/heap/linear-allocation-area.h"
#include "src/heap/spaces.h"
#include "src/tasks/cancelable-task.h"
@@ -37,10 +39,7 @@ class ConcurrentAllocator {
static constexpr int kMaxLabSize = 32 * KB;
static constexpr int kMaxLabObjectSize = 2 * KB;
- ConcurrentAllocator(LocalHeap* local_heap, PagedSpace* space)
- : local_heap_(local_heap),
- space_(space),
- lab_(LocalAllocationBuffer::InvalidBuffer()) {}
+ ConcurrentAllocator(LocalHeap* local_heap, PagedSpace* space);
inline AllocationResult AllocateRaw(int object_size,
AllocationAlignment alignment,
@@ -59,10 +58,20 @@ class ConcurrentAllocator {
"size <= kMaxLabObjectSize will fit into a newly allocated LAB of size "
"kLabSize after computing the alignment requirements.");
+ V8_EXPORT_PRIVATE V8_INLINE AllocationResult
+ AllocateInLabFastUnaligned(int size_in_bytes);
+
+ V8_EXPORT_PRIVATE V8_INLINE AllocationResult
+ AllocateInLabFastAligned(int size_in_bytes, AllocationAlignment alignment);
+
V8_EXPORT_PRIVATE AllocationResult
AllocateInLabSlow(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin);
- bool EnsureLab(AllocationOrigin origin);
+ bool AllocateLab(AllocationOrigin origin);
+
+ base::Optional<std::pair<Address, size_t>> AllocateFromSpaceFreeList(
+ size_t min_size_in_bytes, size_t max_size_in_bytes,
+ AllocationOrigin origin);
V8_EXPORT_PRIVATE AllocationResult
AllocateOutsideLab(int size_in_bytes, AllocationAlignment alignment,
@@ -70,13 +79,23 @@ class ConcurrentAllocator {
bool IsBlackAllocationEnabled() const;
+ // Checks whether the LAB is currently in use.
+ V8_INLINE bool IsLabValid() { return lab_.top() != kNullAddress; }
+
+ // Resets the LAB.
+ void ResetLab() { lab_ = LinearAllocationArea(kNullAddress, kNullAddress); }
+
+ // Installs a filler object between the LABs top and limit pointers.
+ void MakeLabIterable();
+
// Returns the Heap of space_. This might differ from the LocalHeap's Heap for
// shared spaces.
- Heap* owning_heap() const;
+ Heap* owning_heap() const { return owning_heap_; }
LocalHeap* const local_heap_;
PagedSpace* const space_;
- LocalAllocationBuffer lab_;
+ Heap* const owning_heap_;
+ LinearAllocationArea lab_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index d15cb26bcd..652037e2e4 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -16,6 +16,7 @@
#include "src/heap/heap.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/marking-state-inl.h"
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking.h"
@@ -52,6 +53,8 @@ class ConcurrentMarkingState final
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
+ DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
+ IsAligned(by, kObjectAlignment8GbHeap));
(*memory_chunk_data_)[chunk].live_bytes += by;
}
@@ -145,9 +148,7 @@ class ConcurrentMarkingVisitorUtility {
if (!object.IsHeapObject()) continue;
HeapObject heap_object = HeapObject::cast(object);
visitor->SynchronizePageAccess(heap_object);
- BasicMemoryChunk* target_page =
- BasicMemoryChunk::FromHeapObject(heap_object);
- if (!visitor->is_shared_heap() && target_page->InSharedHeap()) continue;
+ if (!visitor->ShouldMarkObject(heap_object)) continue;
visitor->MarkObject(host, heap_object);
visitor->RecordSlot(host, slot, heap_object);
}
@@ -262,7 +263,9 @@ class YoungGenerationConcurrentMarkingVisitor final
heap->isolate(), worklists_local),
marking_state_(heap->isolate(), memory_chunk_data) {}
- bool is_shared_heap() { return false; }
+ bool ShouldMarkObject(HeapObject object) const {
+ return !object.InSharedHeap();
+ }
void SynchronizePageAccess(HeapObject heap_object) {
#ifdef THREAD_SANITIZER
@@ -555,7 +558,7 @@ class ConcurrentMarkingVisitor final
DCHECK(length.IsSmi());
int size = T::SizeFor(Smi::ToInt(length));
marking_state_.IncrementLiveBytes(MemoryChunk::FromHeapObject(object),
- size);
+ ALIGN_TO_ALLOCATION_ALIGNMENT(size));
VisitMapPointer(object);
T::BodyDescriptor::IterateBody(map, object, size, this);
return size;
diff --git a/deps/v8/src/heap/conservative-stack-visitor.cc b/deps/v8/src/heap/conservative-stack-visitor.cc
index eaf2860787..a9785fb284 100644
--- a/deps/v8/src/heap/conservative-stack-visitor.cc
+++ b/deps/v8/src/heap/conservative-stack-visitor.cc
@@ -4,9 +4,13 @@
#include "src/heap/conservative-stack-visitor.h"
-#include "src/heap/large-spaces.h"
-#include "src/heap/paged-spaces-inl.h"
-#include "src/heap/paged-spaces.h"
+#include "src/execution/isolate-inl.h"
+#include "src/heap/mark-compact.h"
+#include "src/objects/visitors.h"
+
+#ifdef V8_COMPRESS_POINTERS
+#include "src/common/ptr-compr-inl.h"
+#endif // V8_COMPRESS_POINTERS
namespace v8 {
namespace internal {
@@ -16,61 +20,30 @@ ConservativeStackVisitor::ConservativeStackVisitor(Isolate* isolate,
: isolate_(isolate), delegate_(delegate) {}
void ConservativeStackVisitor::VisitPointer(const void* pointer) {
- VisitConservativelyIfPointer(pointer);
+ auto address = reinterpret_cast<Address>(const_cast<void*>(pointer));
+ VisitConservativelyIfPointer(address);
+#ifdef V8_COMPRESS_POINTERS
+ V8HeapCompressionScheme::ProcessIntermediatePointers(
+ isolate_, address,
+ [this](Address ptr) { VisitConservativelyIfPointer(ptr); });
+#endif // V8_COMPRESS_POINTERS
}
-bool ConservativeStackVisitor::CheckPage(Address address, MemoryChunk* page) {
- if (address < page->area_start() || address >= page->area_end()) return false;
-
+void ConservativeStackVisitor::VisitConservativelyIfPointer(Address address) {
Address base_ptr;
-#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- base_ptr = page->object_start_bitmap()->FindBasePtr(address);
-#elif V8_ENABLE_INNER_POINTER_RESOLUTION_MB
+#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_MB
base_ptr = isolate_->heap()->mark_compact_collector()->FindBasePtrForMarking(
address);
#else
#error "Some inner pointer resolution mechanism is needed"
-#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_(OSB|MB)
-
- if (base_ptr == kNullAddress) return false;
-
- // At this point, base_ptr *must* refer to the valid object. We check if
- // |address| resides inside the object or beyond it in unused memory.
+#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_MB
+ if (base_ptr == kNullAddress) return;
HeapObject obj = HeapObject::FromAddress(base_ptr);
- Address obj_end = obj.address() + obj.Size();
-
- if (address >= obj_end) {
- // |address| points to unused memory.
- return false;
- }
-
Object root = obj;
delegate_->VisitRootPointer(Root::kHandleScope, nullptr,
FullObjectSlot(&root));
// Check that the delegate visitor did not modify the root slot.
DCHECK_EQ(root, obj);
- return true;
-}
-
-void ConservativeStackVisitor::VisitConservativelyIfPointer(
- const void* pointer) {
- auto address = reinterpret_cast<Address>(pointer);
-
- for (Page* page : *isolate_->heap()->old_space()) {
- if (CheckPage(address, page)) {
- return;
- }
- }
-
- for (LargePage* page : *isolate_->heap()->lo_space()) {
- if (address >= page->area_start() && address < page->area_end()) {
- Object ptr = page->GetObject();
- FullObjectSlot root = FullObjectSlot(&ptr);
- delegate_->VisitRootPointer(Root::kHandleScope, nullptr, root);
- DCHECK(root == FullObjectSlot(&ptr));
- return;
- }
- }
}
} // namespace internal
diff --git a/deps/v8/src/heap/conservative-stack-visitor.h b/deps/v8/src/heap/conservative-stack-visitor.h
index 649608b423..e63ba5b673 100644
--- a/deps/v8/src/heap/conservative-stack-visitor.h
+++ b/deps/v8/src/heap/conservative-stack-visitor.h
@@ -5,22 +5,23 @@
#ifndef V8_HEAP_CONSERVATIVE_STACK_VISITOR_H_
#define V8_HEAP_CONSERVATIVE_STACK_VISITOR_H_
+#include "include/v8-internal.h"
#include "src/heap/base/stack.h"
-#include "src/heap/memory-chunk.h"
namespace v8 {
namespace internal {
-class ConservativeStackVisitor : public ::heap::base::StackVisitor {
+class RootVisitor;
+
+class V8_EXPORT_PRIVATE ConservativeStackVisitor
+ : public ::heap::base::StackVisitor {
public:
ConservativeStackVisitor(Isolate* isolate, RootVisitor* delegate);
void VisitPointer(const void* pointer) final;
private:
- bool CheckPage(Address address, MemoryChunk* page);
-
- void VisitConservativelyIfPointer(const void* pointer);
+ void VisitConservativelyIfPointer(Address address);
Isolate* isolate_ = nullptr;
RootVisitor* delegate_ = nullptr;
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index 2a9742b1ea..28beb83e27 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -55,6 +55,49 @@
namespace v8 {
+namespace internal {
+
+class MinorGCHeapGrowing
+ : public cppgc::internal::StatsCollector::AllocationObserver {
+ public:
+ explicit MinorGCHeapGrowing(cppgc::internal::StatsCollector& stats_collector)
+ : stats_collector_(stats_collector) {
+ stats_collector.RegisterObserver(this);
+ }
+ virtual ~MinorGCHeapGrowing() = default;
+
+ void AllocatedObjectSizeIncreased(size_t) final {}
+ void AllocatedObjectSizeDecreased(size_t) final {}
+ void ResetAllocatedObjectSize(size_t allocated_object_size) final {
+ ConfigureLimit(allocated_object_size);
+ }
+
+ bool LimitReached() const {
+ return stats_collector_.allocated_object_size() >= limit_for_atomic_gc_;
+ }
+
+ private:
+ void ConfigureLimit(size_t allocated_object_size) {
+ // Constant growing factor for growing the heap limit.
+ static constexpr double kGrowingFactor = 1.5;
+ // For smaller heaps, allow allocating at least LAB in each regular space
+ // before triggering GC again.
+ static constexpr size_t kMinLimitIncrease =
+ cppgc::internal::kPageSize *
+ cppgc::internal::RawHeap::kNumberOfRegularSpaces;
+
+ const size_t size = std::max(allocated_object_size, initial_heap_size_);
+ limit_for_atomic_gc_ = std::max(static_cast<size_t>(size * kGrowingFactor),
+ size + kMinLimitIncrease);
+ }
+
+ cppgc::internal::StatsCollector& stats_collector_;
+ size_t initial_heap_size_ = 1 * cppgc::internal::kMB;
+ size_t limit_for_atomic_gc_ = 0; // See ConfigureLimit().
+};
+
+} // namespace internal
+
namespace {
START_ALLOW_USE_DEPRECATED()
@@ -286,7 +329,8 @@ class UnifiedHeapConservativeMarkingVisitor final
class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
public:
UnifiedHeapMarker(Heap* v8_heap, cppgc::internal::HeapBase& cpp_heap,
- cppgc::Platform* platform, MarkingConfig config);
+ cppgc::Platform* platform,
+ cppgc::internal::MarkingConfig config);
~UnifiedHeapMarker() final = default;
@@ -324,7 +368,7 @@ class UnifiedHeapMarker final : public cppgc::internal::MarkerBase {
UnifiedHeapMarker::UnifiedHeapMarker(Heap* v8_heap,
cppgc::internal::HeapBase& heap,
cppgc::Platform* platform,
- MarkingConfig config)
+ cppgc::internal::MarkingConfig config)
: cppgc::internal::MarkerBase(heap, platform, config),
mutator_unified_heap_marking_state_(v8_heap, nullptr),
marking_visitor_(config.collection_type == CppHeap::CollectionType::kMajor
@@ -487,6 +531,8 @@ CppHeap::CppHeap(
cppgc::internal::HeapBase::StackSupport::
kSupportsConservativeStackScan,
marking_support, sweeping_support, *this),
+ minor_gc_heap_growing_(
+ std::make_unique<MinorGCHeapGrowing>(*stats_collector())),
wrapper_descriptor_(wrapper_descriptor) {
CHECK_NE(WrapperDescriptor::kUnknownEmbedderId,
wrapper_descriptor_.embedder_id_for_garbage_collected);
@@ -509,6 +555,29 @@ void CppHeap::Terminate() {
HeapBase::Terminate();
}
+namespace {
+
+class SweepingOnMutatorThreadForGlobalHandlesObserver final
+ : public cppgc::internal::Sweeper::SweepingOnMutatorThreadObserver {
+ public:
+ SweepingOnMutatorThreadForGlobalHandlesObserver(CppHeap& cpp_heap,
+ GlobalHandles& global_handles)
+ : cppgc::internal::Sweeper::SweepingOnMutatorThreadObserver(
+ cpp_heap.sweeper()),
+ global_handles_(global_handles) {}
+
+ void Start() override {
+ global_handles_.NotifyStartSweepingOnMutatorThread();
+ }
+
+ void End() override { global_handles_.NotifyEndSweepingOnMutatorThread(); }
+
+ private:
+ GlobalHandles& global_handles_;
+};
+
+} // namespace
+
void CppHeap::AttachIsolate(Isolate* isolate) {
CHECK(!in_detached_testing_mode_);
CHECK_NULL(isolate_);
@@ -522,6 +591,9 @@ void CppHeap::AttachIsolate(Isolate* isolate) {
SetMetricRecorder(std::make_unique<MetricRecorderAdapter>(*this));
oom_handler().SetCustomHandler(&FatalOutOfMemoryHandlerImpl);
ReduceGCCapabilititesFromFlags();
+ sweeping_on_mutator_thread_observer_ =
+ std::make_unique<SweepingOnMutatorThreadForGlobalHandlesObserver>(
+ *this, *isolate_->global_handles());
no_gc_scope_--;
}
@@ -538,6 +610,8 @@ void CppHeap::DetachIsolate() {
}
sweeper_.FinishIfRunning();
+ sweeping_on_mutator_thread_observer_.reset();
+
auto* heap_profiler = isolate_->heap_profiler();
if (heap_profiler) {
heap_profiler->RemoveBuildEmbedderGraphCallback(&CppGraphBuilder::Run,
@@ -619,17 +693,20 @@ void CppHeap::InitializeTracing(CollectionType collection_type,
#if defined(CPPGC_YOUNG_GENERATION)
if (generational_gc_supported() &&
- *collection_type_ == CollectionType::kMajor)
+ *collection_type_ == CollectionType::kMajor) {
+ cppgc::internal::StatsCollector::EnabledScope stats_scope(
+ stats_collector(), cppgc::internal::StatsCollector::kUnmark);
cppgc::internal::SequentialUnmarker unmarker(raw_heap());
+ }
#endif // defined(CPPGC_YOUNG_GENERATION)
current_gc_flags_ = gc_flags;
- const UnifiedHeapMarker::MarkingConfig marking_config{
+ const cppgc::internal::MarkingConfig marking_config{
*collection_type_, StackState::kNoHeapPointers, SelectMarkingType(),
IsForceGC(current_gc_flags_)
- ? UnifiedHeapMarker::MarkingConfig::IsForcedGC::kForced
- : UnifiedHeapMarker::MarkingConfig::IsForcedGC::kNotForced};
+ ? cppgc::internal::MarkingConfig::IsForcedGC::kForced
+ : cppgc::internal::MarkingConfig::IsForcedGC::kNotForced};
DCHECK_IMPLIES(!isolate_,
(MarkingType::kAtomic == marking_config.marking_type) ||
force_incremental_marking_for_testing_);
@@ -695,8 +772,7 @@ void CppHeap::EnterFinalPause(cppgc::EmbedderStackState stack_state) {
auto& heap = *isolate()->heap();
marker.conservative_visitor().SetGlobalHandlesMarkingVisitor(
std::make_unique<GlobalHandleMarkingVisitor>(
- heap, *heap.mark_compact_collector()->marking_state(),
- *heap.mark_compact_collector()->local_marking_worklists()));
+ heap, *heap.mark_compact_collector()->local_marking_worklists()));
}
marker.EnterAtomicPause(stack_state);
if (isolate_ && *collection_type_ == CollectionType::kMinor) {
@@ -753,14 +829,14 @@ void CppHeap::TraceEpilogue() {
{
cppgc::subtle::NoGarbageCollectionScope no_gc(*this);
- cppgc::internal::Sweeper::SweepingConfig::CompactableSpaceHandling
+ cppgc::internal::SweepingConfig::CompactableSpaceHandling
compactable_space_handling = compactor_.CompactSpacesIfEnabled();
- const cppgc::internal::Sweeper::SweepingConfig sweeping_config{
+ const cppgc::internal::SweepingConfig sweeping_config{
SelectSweepingType(), compactable_space_handling,
ShouldReduceMemory(current_gc_flags_)
- ? cppgc::internal::Sweeper::SweepingConfig::FreeMemoryHandling::
+ ? cppgc::internal::SweepingConfig::FreeMemoryHandling::
kDiscardWherePossible
- : cppgc::internal::Sweeper::SweepingConfig::FreeMemoryHandling::
+ : cppgc::internal::SweepingConfig::FreeMemoryHandling::
kDoNotDiscard};
DCHECK_IMPLIES(!isolate_,
SweepingType::kAtomic == sweeping_config.sweeping_type);
@@ -772,15 +848,17 @@ void CppHeap::TraceEpilogue() {
sweeper().NotifyDoneIfNeeded();
}
-void CppHeap::RunMinorGC(StackState stack_state) {
- DCHECK(!sweeper_.IsSweepingInProgress());
-
+void CppHeap::RunMinorGCIfNeeded(StackState stack_state) {
if (!generational_gc_supported()) return;
if (in_no_gc_scope()) return;
// Minor GC does not support nesting in full GCs.
if (IsMarking()) return;
// Minor GCs with the stack are currently not supported.
if (stack_state == StackState::kMayContainHeapPointers) return;
+ // Run only when the limit is reached.
+ if (!minor_gc_heap_growing_->LimitReached()) return;
+
+ DCHECK(!sweeper_.IsSweepingInProgress());
// Notify GC tracer that CppGC started young GC cycle.
isolate_->heap()->tracer()->NotifyYoungCppGCRunning();
@@ -928,8 +1006,8 @@ class CollectCustomSpaceStatisticsAtLastGCTask final : public v8::Task {
void Run() final {
cppgc::internal::Sweeper& sweeper = heap_.sweeper();
if (sweeper.PerformSweepOnMutatorThread(
- heap_.platform()->MonotonicallyIncreasingTime() +
- kStepSizeMs.InSecondsF())) {
+ kStepSizeMs,
+ cppgc::internal::StatsCollector::kSweepInTaskForStatistics)) {
// Sweeping is done.
DCHECK(!sweeper.IsSweepingInProgress());
ReportCustomSpaceStatistics(heap_.raw_heap(), std::move(custom_spaces_),
@@ -1004,14 +1082,15 @@ CppHeap::PauseConcurrentMarkingScope::PauseConcurrentMarkingScope(
}
}
-void CppHeap::CollectGarbage(Config config) {
+void CppHeap::CollectGarbage(cppgc::internal::GCConfig config) {
if (in_no_gc_scope() || !isolate_) return;
// TODO(mlippautz): Respect full config.
- const int flags = (config.free_memory_handling ==
- Config::FreeMemoryHandling::kDiscardWherePossible)
- ? Heap::kReduceMemoryFootprintMask
- : Heap::kNoGCFlags;
+ const int flags =
+ (config.free_memory_handling ==
+ cppgc::internal::GCConfig::FreeMemoryHandling::kDiscardWherePossible)
+ ? Heap::kReduceMemoryFootprintMask
+ : Heap::kNoGCFlags;
isolate_->heap()->CollectAllGarbage(
flags, GarbageCollectionReason::kCppHeapAllocationFailure);
}
@@ -1020,7 +1099,9 @@ const cppgc::EmbedderStackState* CppHeap::override_stack_state() const {
return HeapBase::override_stack_state();
}
-void CppHeap::StartIncrementalGarbageCollection(Config) { UNIMPLEMENTED(); }
+void CppHeap::StartIncrementalGarbageCollection(cppgc::internal::GCConfig) {
+ UNIMPLEMENTED();
+}
size_t CppHeap::epoch() const { UNIMPLEMENTED(); }
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index c109841ea2..87473682d0 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -28,6 +28,7 @@ class Isolate;
namespace internal {
class CppMarkingState;
+class MinorGCHeapGrowing;
// A C++ heap implementation used with V8 to implement unified heap.
class V8_EXPORT_PRIVATE CppHeap final
@@ -43,9 +44,8 @@ class V8_EXPORT_PRIVATE CppHeap final
};
using GarbageCollectionFlags = base::Flags<GarbageCollectionFlagValues>;
- using StackState = cppgc::internal::GarbageCollector::Config::StackState;
- using CollectionType =
- cppgc::internal::GarbageCollector::Config::CollectionType;
+ using StackState = cppgc::internal::StackState;
+ using CollectionType = cppgc::internal::CollectionType;
class MetricRecorderAdapter final : public cppgc::internal::MetricRecorder {
public:
@@ -139,9 +139,7 @@ class V8_EXPORT_PRIVATE CppHeap final
void FinishSweepingIfRunning();
void FinishSweepingIfOutOfWork();
- void InitializeTracing(
- cppgc::internal::GarbageCollector::Config::CollectionType,
- GarbageCollectionFlags);
+ void InitializeTracing(CollectionType, GarbageCollectionFlags);
void StartTracing();
bool AdvanceTracing(double max_duration);
bool IsTracingDone();
@@ -149,7 +147,7 @@ class V8_EXPORT_PRIVATE CppHeap final
void EnterFinalPause(cppgc::EmbedderStackState stack_state);
bool FinishConcurrentMarkingIfNeeded();
- void RunMinorGC(StackState);
+ void RunMinorGCIfNeeded(StackState);
// StatsCollector::AllocationObserver interface.
void AllocatedObjectSizeIncreased(size_t) final;
@@ -168,9 +166,9 @@ class V8_EXPORT_PRIVATE CppHeap final
std::unique_ptr<CppMarkingState> CreateCppMarkingStateForMutatorThread();
// cppgc::internal::GarbageCollector interface.
- void CollectGarbage(Config) override;
+ void CollectGarbage(cppgc::internal::GCConfig) override;
const cppgc::EmbedderStackState* override_stack_state() const override;
- void StartIncrementalGarbageCollection(Config) override;
+ void StartIncrementalGarbageCollection(cppgc::internal::GCConfig) override;
size_t epoch() const override;
private:
@@ -194,10 +192,14 @@ class V8_EXPORT_PRIVATE CppHeap final
Isolate* isolate_ = nullptr;
bool marking_done_ = false;
// |collection_type_| is initialized when marking is in progress.
- base::Optional<cppgc::internal::GarbageCollector::Config::CollectionType>
- collection_type_;
+ base::Optional<CollectionType> collection_type_;
GarbageCollectionFlags current_gc_flags_;
+ std::unique_ptr<MinorGCHeapGrowing> minor_gc_heap_growing_;
+
+ std::unique_ptr<cppgc::internal::Sweeper::SweepingOnMutatorThreadObserver>
+ sweeping_on_mutator_thread_observer_;
+
// Buffered allocated bytes. Reporting allocated bytes to V8 can trigger a GC
// atomic pause. Allocated bytes are buffer in case this is temporarily
// prohibited.
@@ -207,7 +209,6 @@ class V8_EXPORT_PRIVATE CppHeap final
bool in_detached_testing_mode_ = false;
bool force_incremental_marking_for_testing_ = false;
-
bool is_in_v8_marking_step_ = false;
friend class MetricRecorderAdapter;
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h
index afa830b051..212b41ed1d 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state-inl.h
@@ -14,6 +14,7 @@
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/heap/heap.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/marking-state-inl.h"
#include "src/heap/marking-worklist-inl.h"
namespace v8 {
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc
index 10c2c19cac..116563769f 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.cc
@@ -13,8 +13,7 @@ namespace internal {
UnifiedHeapMarkingState::UnifiedHeapMarkingState(
Heap* heap, MarkingWorklists::Local* local_marking_worklist)
: heap_(heap),
- marking_state_(heap_ ? heap_->mark_compact_collector()->marking_state()
- : nullptr),
+ marking_state_(heap_ ? heap_->marking_state() : nullptr),
local_marking_worklist_(local_marking_worklist),
track_retaining_path_(v8_flags.track_retaining_path) {
DCHECK_IMPLIES(v8_flags.track_retaining_path,
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc
index b8940d145a..3ac2a93dee 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc
@@ -57,7 +57,7 @@ class UnifiedHeapVerificationVisitor final : public JSVisitor {
UnifiedHeapMarkingVerifier::UnifiedHeapMarkingVerifier(
cppgc::internal::HeapBase& heap_base,
- cppgc::internal::Heap::Config::CollectionType collection_type)
+ cppgc::internal::CollectionType collection_type)
: MarkingVerifierBase(
heap_base, collection_type, state_,
std::make_unique<UnifiedHeapVerificationVisitor>(state_)) {}
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h
index 71bed04573..78a6ce1e69 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h
@@ -14,7 +14,7 @@ class V8_EXPORT_PRIVATE UnifiedHeapMarkingVerifier final
: public cppgc::internal::MarkingVerifierBase {
public:
UnifiedHeapMarkingVerifier(cppgc::internal::HeapBase&,
- cppgc::internal::Heap::Config::CollectionType);
+ cppgc::internal::CollectionType);
~UnifiedHeapMarkingVerifier() final = default;
private:
diff --git a/deps/v8/src/heap/cppgc/compactor.cc b/deps/v8/src/heap/cppgc/compactor.cc
index e3792a32f8..68ee147dda 100644
--- a/deps/v8/src/heap/cppgc/compactor.cc
+++ b/deps/v8/src/heap/cppgc/compactor.cc
@@ -452,13 +452,11 @@ Compactor::Compactor(RawHeap& heap) : heap_(heap) {
}
}
-bool Compactor::ShouldCompact(
- GarbageCollector::Config::MarkingType marking_type,
- GarbageCollector::Config::StackState stack_state) const {
+bool Compactor::ShouldCompact(GCConfig::MarkingType marking_type,
+ StackState stack_state) const {
if (compactable_spaces_.empty() ||
- (marking_type == GarbageCollector::Config::MarkingType::kAtomic &&
- stack_state ==
- GarbageCollector::Config::StackState::kMayContainHeapPointers)) {
+ (marking_type == GCConfig::MarkingType::kAtomic &&
+ stack_state == StackState::kMayContainHeapPointers)) {
// The following check ensures that tests that want to test compaction are
// not interrupted by garbage collections that cannot use compaction.
DCHECK(!enable_for_next_gc_for_testing_);
@@ -474,9 +472,8 @@ bool Compactor::ShouldCompact(
return free_list_size > kFreeListSizeThreshold;
}
-void Compactor::InitializeIfShouldCompact(
- GarbageCollector::Config::MarkingType marking_type,
- GarbageCollector::Config::StackState stack_state) {
+void Compactor::InitializeIfShouldCompact(GCConfig::MarkingType marking_type,
+ StackState stack_state) {
DCHECK(!is_enabled_);
if (!ShouldCompact(marking_type, stack_state)) return;
@@ -487,9 +484,8 @@ void Compactor::InitializeIfShouldCompact(
is_cancelled_ = false;
}
-void Compactor::CancelIfShouldNotCompact(
- GarbageCollector::Config::MarkingType marking_type,
- GarbageCollector::Config::StackState stack_state) {
+void Compactor::CancelIfShouldNotCompact(GCConfig::MarkingType marking_type,
+ StackState stack_state) {
if (!is_enabled_ || ShouldCompact(marking_type, stack_state)) return;
is_cancelled_ = true;
diff --git a/deps/v8/src/heap/cppgc/compactor.h b/deps/v8/src/heap/cppgc/compactor.h
index d79e6a7a65..9638996a42 100644
--- a/deps/v8/src/heap/cppgc/compactor.h
+++ b/deps/v8/src/heap/cppgc/compactor.h
@@ -12,9 +12,10 @@
namespace cppgc {
namespace internal {
+class NormalPageSpace;
+
class V8_EXPORT_PRIVATE Compactor final {
- using CompactableSpaceHandling =
- Sweeper::SweepingConfig::CompactableSpaceHandling;
+ using CompactableSpaceHandling = SweepingConfig::CompactableSpaceHandling;
public:
explicit Compactor(RawHeap&);
@@ -23,10 +24,8 @@ class V8_EXPORT_PRIVATE Compactor final {
Compactor(const Compactor&) = delete;
Compactor& operator=(const Compactor&) = delete;
- void InitializeIfShouldCompact(GarbageCollector::Config::MarkingType,
- GarbageCollector::Config::StackState);
- void CancelIfShouldNotCompact(GarbageCollector::Config::MarkingType,
- GarbageCollector::Config::StackState);
+ void InitializeIfShouldCompact(GCConfig::MarkingType, StackState);
+ void CancelIfShouldNotCompact(GCConfig::MarkingType, StackState);
// Returns whether spaces need to be processed by the Sweeper after
// compaction.
CompactableSpaceHandling CompactSpacesIfEnabled();
@@ -39,8 +38,7 @@ class V8_EXPORT_PRIVATE Compactor final {
bool IsEnabledForTesting() const { return is_enabled_; }
private:
- bool ShouldCompact(GarbageCollector::Config::MarkingType,
- GarbageCollector::Config::StackState) const;
+ bool ShouldCompact(GCConfig::MarkingType, StackState) const;
RawHeap& heap_;
// Compactor does not own the compactable spaces. The heap owns all spaces.
diff --git a/deps/v8/src/heap/cppgc/explicit-management.cc b/deps/v8/src/heap/cppgc/explicit-management.cc
index 3a18bd3369..560b18dc58 100644
--- a/deps/v8/src/heap/cppgc/explicit-management.cc
+++ b/deps/v8/src/heap/cppgc/explicit-management.cc
@@ -11,6 +11,7 @@
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/memory.h"
+#include "src/heap/cppgc/object-view.h"
namespace cppgc {
namespace internal {
@@ -36,21 +37,30 @@ void ExplicitManagementImpl::FreeUnreferencedObject(HeapHandle& heap_handle,
auto& header = HeapObjectHeader::FromObject(object);
header.Finalize();
- size_t object_size = 0;
- USE(object_size);
-
// `object` is guaranteed to be of type GarbageCollected, so getting the
// BasePage is okay for regular and large objects.
BasePage* base_page = BasePage::FromPayload(object);
+
+#if defined(CPPGC_YOUNG_GENERATION)
+ const size_t object_size = ObjectView<>(header).Size();
+
+ if (auto& heap_base = HeapBase::From(heap_handle);
+ heap_base.generational_gc_supported()) {
+ heap_base.remembered_set().InvalidateRememberedSlotsInRange(
+ object, reinterpret_cast<uint8_t*>(object) + object_size);
+ // If this object was registered as remembered, remove it. Do that before
+ // the page gets destroyed.
+ heap_base.remembered_set().InvalidateRememberedSourceObject(header);
+ }
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
if (base_page->is_large()) { // Large object.
- object_size = LargePage::From(base_page)->ObjectSize();
base_page->space().RemovePage(base_page);
base_page->heap().stats_collector()->NotifyExplicitFree(
LargePage::From(base_page)->PayloadSize());
LargePage::Destroy(LargePage::From(base_page));
} else { // Regular object.
const size_t header_size = header.AllocatedSize();
- object_size = header.ObjectSize();
auto* normal_page = NormalPage::From(base_page);
auto& normal_space = *static_cast<NormalPageSpace*>(&base_page->space());
auto& lab = normal_space.linear_allocation_buffer();
@@ -66,15 +76,6 @@ void ExplicitManagementImpl::FreeUnreferencedObject(HeapHandle& heap_handle,
// list entry.
}
}
-#if defined(CPPGC_YOUNG_GENERATION)
- auto& heap_base = HeapBase::From(heap_handle);
- if (heap_base.generational_gc_supported()) {
- heap_base.remembered_set().InvalidateRememberedSlotsInRange(
- object, reinterpret_cast<uint8_t*>(object) + object_size);
- // If this object was registered as remembered, remove it.
- heap_base.remembered_set().InvalidateRememberedSourceObject(header);
- }
-#endif // defined(CPPGC_YOUNG_GENERATION)
}
namespace {
diff --git a/deps/v8/src/heap/cppgc/garbage-collector.h b/deps/v8/src/heap/cppgc/garbage-collector.h
index a49a7a1bad..8a08f56b6b 100644
--- a/deps/v8/src/heap/cppgc/garbage-collector.h
+++ b/deps/v8/src/heap/cppgc/garbage-collector.h
@@ -6,8 +6,7 @@
#define V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
#include "include/cppgc/common.h"
-#include "src/heap/cppgc/marker.h"
-#include "src/heap/cppgc/sweeper.h"
+#include "src/heap/cppgc/heap-config.h"
namespace cppgc {
namespace internal {
@@ -16,62 +15,9 @@ namespace internal {
// needed to mock/fake GC for testing.
class GarbageCollector {
public:
- struct Config {
- using CollectionType = Marker::MarkingConfig::CollectionType;
- using StackState = cppgc::Heap::StackState;
- using MarkingType = Marker::MarkingConfig::MarkingType;
- using SweepingType = Sweeper::SweepingConfig::SweepingType;
- using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
- using IsForcedGC = Marker::MarkingConfig::IsForcedGC;
-
- static constexpr Config ConservativeAtomicConfig() {
- return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
- MarkingType::kAtomic, SweepingType::kAtomic};
- }
-
- static constexpr Config PreciseAtomicConfig() {
- return {CollectionType::kMajor, StackState::kNoHeapPointers,
- MarkingType::kAtomic, SweepingType::kAtomic};
- }
-
- static constexpr Config ConservativeIncrementalConfig() {
- return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
- MarkingType::kIncremental, SweepingType::kAtomic};
- }
-
- static constexpr Config PreciseIncrementalConfig() {
- return {CollectionType::kMajor, StackState::kNoHeapPointers,
- MarkingType::kIncremental, SweepingType::kAtomic};
- }
-
- static constexpr Config
- PreciseIncrementalMarkingConcurrentSweepingConfig() {
- return {CollectionType::kMajor, StackState::kNoHeapPointers,
- MarkingType::kIncremental,
- SweepingType::kIncrementalAndConcurrent};
- }
-
- static constexpr Config MinorPreciseAtomicConfig() {
- return {CollectionType::kMinor, StackState::kNoHeapPointers,
- MarkingType::kAtomic, SweepingType::kAtomic};
- }
-
- static constexpr Config MinorConservativeAtomicConfig() {
- return {CollectionType::kMinor, StackState::kMayContainHeapPointers,
- MarkingType::kAtomic, SweepingType::kAtomic};
- }
-
- CollectionType collection_type = CollectionType::kMajor;
- StackState stack_state = StackState::kMayContainHeapPointers;
- MarkingType marking_type = MarkingType::kAtomic;
- SweepingType sweeping_type = SweepingType::kAtomic;
- FreeMemoryHandling free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
- IsForcedGC is_forced_gc = IsForcedGC::kNotForced;
- };
-
// Executes a garbage collection specified in config.
- virtual void CollectGarbage(Config) = 0;
- virtual void StartIncrementalGarbageCollection(Config) = 0;
+ virtual void CollectGarbage(GCConfig) = 0;
+ virtual void StartIncrementalGarbageCollection(GCConfig) = 0;
// The current epoch that the GC maintains. The epoch is increased on every
// GC invocation.
diff --git a/deps/v8/src/heap/cppgc/gc-invoker.cc b/deps/v8/src/heap/cppgc/gc-invoker.cc
index 1bddad7a7e..8561437552 100644
--- a/deps/v8/src/heap/cppgc/gc-invoker.cc
+++ b/deps/v8/src/heap/cppgc/gc-invoker.cc
@@ -8,7 +8,6 @@
#include "include/cppgc/common.h"
#include "include/cppgc/platform.h"
-#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/task-handle.h"
namespace cppgc {
@@ -22,8 +21,8 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
GCInvokerImpl(const GCInvokerImpl&) = delete;
GCInvokerImpl& operator=(const GCInvokerImpl&) = delete;
- void CollectGarbage(GarbageCollector::Config) final;
- void StartIncrementalGarbageCollection(GarbageCollector::Config) final;
+ void CollectGarbage(GCConfig) final;
+ void StartIncrementalGarbageCollection(GCConfig) final;
size_t epoch() const final { return collector_->epoch(); }
const EmbedderStackState* override_stack_state() const final {
return collector_->override_stack_state();
@@ -35,7 +34,7 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
using Handle = SingleThreadedHandle;
static Handle Post(GarbageCollector* collector, cppgc::TaskRunner* runner,
- GarbageCollector::Config config) {
+ GCConfig config) {
auto task =
std::make_unique<GCInvoker::GCInvokerImpl::GCTask>(collector, config);
auto handle = task->GetHandle();
@@ -43,8 +42,7 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
return handle;
}
- explicit GCTask(GarbageCollector* collector,
- GarbageCollector::Config config)
+ explicit GCTask(GarbageCollector* collector, GCConfig config)
: collector_(collector),
config_(config),
handle_(Handle::NonEmptyTag{}),
@@ -63,7 +61,7 @@ class GCInvoker::GCInvokerImpl final : public GarbageCollector {
Handle GetHandle() { return handle_; }
GarbageCollector* collector_;
- GarbageCollector::Config config_;
+ GCConfig config_;
Handle handle_;
size_t saved_epoch_;
};
@@ -87,10 +85,9 @@ GCInvoker::GCInvokerImpl::~GCInvokerImpl() {
}
}
-void GCInvoker::GCInvokerImpl::CollectGarbage(GarbageCollector::Config config) {
+void GCInvoker::GCInvokerImpl::CollectGarbage(GCConfig config) {
DCHECK_EQ(config.marking_type, cppgc::Heap::MarkingType::kAtomic);
- if ((config.stack_state ==
- GarbageCollector::Config::StackState::kNoHeapPointers) ||
+ if ((config.stack_state == StackState::kNoHeapPointers) ||
(stack_support_ ==
cppgc::Heap::StackSupport::kSupportsConservativeStackScan)) {
collector_->CollectGarbage(config);
@@ -98,8 +95,7 @@ void GCInvoker::GCInvokerImpl::CollectGarbage(GarbageCollector::Config config) {
platform_->GetForegroundTaskRunner()->NonNestableTasksEnabled()) {
if (!gc_task_handle_) {
// Force a precise GC since it will run in a non-nestable task.
- config.stack_state =
- GarbageCollector::Config::StackState::kNoHeapPointers;
+ config.stack_state = StackState::kNoHeapPointers;
DCHECK_NE(cppgc::Heap::StackSupport::kSupportsConservativeStackScan,
stack_support_);
gc_task_handle_ = GCTask::Post(
@@ -109,7 +105,7 @@ void GCInvoker::GCInvokerImpl::CollectGarbage(GarbageCollector::Config config) {
}
void GCInvoker::GCInvokerImpl::StartIncrementalGarbageCollection(
- GarbageCollector::Config config) {
+ GCConfig config) {
DCHECK_NE(config.marking_type, cppgc::Heap::MarkingType::kAtomic);
if ((stack_support_ !=
cppgc::Heap::StackSupport::kSupportsConservativeStackScan) &&
@@ -134,12 +130,11 @@ GCInvoker::GCInvoker(GarbageCollector* collector, cppgc::Platform* platform,
GCInvoker::~GCInvoker() = default;
-void GCInvoker::CollectGarbage(GarbageCollector::Config config) {
+void GCInvoker::CollectGarbage(GCConfig config) {
impl_->CollectGarbage(config);
}
-void GCInvoker::StartIncrementalGarbageCollection(
- GarbageCollector::Config config) {
+void GCInvoker::StartIncrementalGarbageCollection(GCConfig config) {
impl_->StartIncrementalGarbageCollection(config);
}
diff --git a/deps/v8/src/heap/cppgc/gc-invoker.h b/deps/v8/src/heap/cppgc/gc-invoker.h
index ceebca139c..c3c379721b 100644
--- a/deps/v8/src/heap/cppgc/gc-invoker.h
+++ b/deps/v8/src/heap/cppgc/gc-invoker.h
@@ -34,8 +34,8 @@ class V8_EXPORT_PRIVATE GCInvoker final : public GarbageCollector {
GCInvoker(const GCInvoker&) = delete;
GCInvoker& operator=(const GCInvoker&) = delete;
- void CollectGarbage(GarbageCollector::Config) final;
- void StartIncrementalGarbageCollection(GarbageCollector::Config) final;
+ void CollectGarbage(GCConfig) final;
+ void StartIncrementalGarbageCollection(GCConfig) final;
size_t epoch() const final;
const EmbedderStackState* override_stack_state() const final;
diff --git a/deps/v8/src/heap/cppgc/globals.h b/deps/v8/src/heap/cppgc/globals.h
index 19d5cca59c..84fb389a7e 100644
--- a/deps/v8/src/heap/cppgc/globals.h
+++ b/deps/v8/src/heap/cppgc/globals.h
@@ -80,6 +80,12 @@ constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
#endif // !defined(CPPGC_2GB_CAGE)
constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
+#if defined(CPPGC_POINTER_COMPRESSION)
+constexpr size_t kSlotSize = sizeof(uint32_t);
+#else // !defined(CPPGC_POINTER_COMPRESSION)
+constexpr size_t kSlotSize = sizeof(uintptr_t);
+#endif // !defined(CPPGC_POINTER_COMPRESSION)
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index d057d820c8..3b17bb8aa6 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -250,18 +250,16 @@ void HeapBase::Terminate() {
#endif // defined(CPPGC_YOUNG_GENERATION)
in_atomic_pause_ = true;
- stats_collector()->NotifyMarkingStarted(
- GarbageCollector::Config::CollectionType::kMajor,
- GarbageCollector::Config::MarkingType::kAtomic,
- GarbageCollector::Config::IsForcedGC::kForced);
+ stats_collector()->NotifyMarkingStarted(CollectionType::kMajor,
+ GCConfig::MarkingType::kAtomic,
+ GCConfig::IsForcedGC::kForced);
object_allocator().ResetLinearAllocationBuffers();
stats_collector()->NotifyMarkingCompleted(0);
ExecutePreFinalizers();
// TODO(chromium:1029379): Prefinalizers may black-allocate objects (under a
// compile-time option). Run sweeping with forced finalization here.
- sweeper().Start(
- {Sweeper::SweepingConfig::SweepingType::kAtomic,
- Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep});
+ sweeper().Start({SweepingConfig::SweepingType::kAtomic,
+ SweepingConfig::CompactableSpaceHandling::kSweep});
in_atomic_pause_ = false;
sweeper().NotifyDoneIfNeeded();
diff --git a/deps/v8/src/heap/cppgc/heap-config.h b/deps/v8/src/heap/cppgc/heap-config.h
new file mode 100644
index 0000000000..a89581387b
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/heap-config.h
@@ -0,0 +1,103 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_CONFIG_H_
+#define V8_HEAP_CPPGC_HEAP_CONFIG_H_
+
+#include "include/cppgc/heap.h"
+
+namespace cppgc::internal {
+
+using StackState = cppgc::Heap::StackState;
+
+enum class CollectionType : uint8_t {
+ kMinor,
+ kMajor,
+};
+
+struct MarkingConfig {
+ using MarkingType = cppgc::Heap::MarkingType;
+ enum class IsForcedGC : uint8_t {
+ kNotForced,
+ kForced,
+ };
+
+ static constexpr MarkingConfig Default() { return {}; }
+
+ const CollectionType collection_type = CollectionType::kMajor;
+ StackState stack_state = StackState::kMayContainHeapPointers;
+ MarkingType marking_type = MarkingType::kIncremental;
+ IsForcedGC is_forced_gc = IsForcedGC::kNotForced;
+};
+
+struct SweepingConfig {
+ using SweepingType = cppgc::Heap::SweepingType;
+ enum class CompactableSpaceHandling { kSweep, kIgnore };
+ enum class FreeMemoryHandling { kDoNotDiscard, kDiscardWherePossible };
+
+ SweepingType sweeping_type = SweepingType::kIncrementalAndConcurrent;
+ CompactableSpaceHandling compactable_space_handling =
+ CompactableSpaceHandling::kSweep;
+ FreeMemoryHandling free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
+};
+
+struct GCConfig {
+ using MarkingType = MarkingConfig::MarkingType;
+ using SweepingType = SweepingConfig::SweepingType;
+ using FreeMemoryHandling = SweepingConfig::FreeMemoryHandling;
+ using IsForcedGC = MarkingConfig::IsForcedGC;
+
+ static constexpr GCConfig ConservativeAtomicConfig() {
+ return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ static constexpr GCConfig PreciseAtomicConfig() {
+ return {CollectionType::kMajor, StackState::kNoHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ static constexpr GCConfig ConservativeIncrementalConfig() {
+ return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
+ MarkingType::kIncremental, SweepingType::kAtomic};
+ }
+
+ static constexpr GCConfig PreciseIncrementalConfig() {
+ return {CollectionType::kMajor, StackState::kNoHeapPointers,
+ MarkingType::kIncremental, SweepingType::kAtomic};
+ }
+
+ static constexpr GCConfig
+ PreciseIncrementalMarkingConcurrentSweepingConfig() {
+ return {CollectionType::kMajor, StackState::kNoHeapPointers,
+ MarkingType::kIncremental, SweepingType::kIncrementalAndConcurrent};
+ }
+
+ static constexpr GCConfig PreciseConcurrentConfig() {
+ return {CollectionType::kMajor, StackState::kNoHeapPointers,
+ MarkingType::kIncrementalAndConcurrent,
+ SweepingType::kIncrementalAndConcurrent};
+ }
+
+ static constexpr GCConfig MinorPreciseAtomicConfig() {
+ return {CollectionType::kMinor, StackState::kNoHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ static constexpr GCConfig MinorConservativeAtomicConfig() {
+ return {CollectionType::kMinor, StackState::kMayContainHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ CollectionType collection_type = CollectionType::kMajor;
+ StackState stack_state = StackState::kMayContainHeapPointers;
+ MarkingType marking_type = MarkingType::kAtomic;
+ SweepingType sweeping_type = SweepingType::kAtomic;
+ FreeMemoryHandling free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
+ IsForcedGC is_forced_gc = IsForcedGC::kNotForced;
+};
+
+} // namespace cppgc::internal
+
+#endif // V8_HEAP_CPPGC_HEAP_CONFIG_H_
diff --git a/deps/v8/src/heap/cppgc/heap-growing.cc b/deps/v8/src/heap/cppgc/heap-growing.cc
index 1055626a0a..0af0119863 100644
--- a/deps/v8/src/heap/cppgc/heap-growing.cc
+++ b/deps/v8/src/heap/cppgc/heap-growing.cc
@@ -93,14 +93,12 @@ void HeapGrowing::HeapGrowingImpl::AllocatedObjectSizeIncreased(size_t) {
size_t allocated_object_size = stats_collector_->allocated_object_size();
if (allocated_object_size > limit_for_atomic_gc_) {
collector_->CollectGarbage(
- {GarbageCollector::Config::CollectionType::kMajor,
- GarbageCollector::Config::StackState::kMayContainHeapPointers,
- GarbageCollector::Config::MarkingType::kAtomic, sweeping_support_});
+ {CollectionType::kMajor, StackState::kMayContainHeapPointers,
+ GCConfig::MarkingType::kAtomic, sweeping_support_});
} else if (allocated_object_size > limit_for_incremental_gc_) {
if (marking_support_ == cppgc::Heap::MarkingType::kAtomic) return;
collector_->StartIncrementalGarbageCollection(
- {GarbageCollector::Config::CollectionType::kMajor,
- GarbageCollector::Config::StackState::kMayContainHeapPointers,
+ {CollectionType::kMajor, StackState::kMayContainHeapPointers,
marking_support_, sweeping_support_});
}
}
diff --git a/deps/v8/src/heap/cppgc/heap-page.cc b/deps/v8/src/heap/cppgc/heap-page.cc
index 07baf2e79d..7e85eeca47 100644
--- a/deps/v8/src/heap/cppgc/heap-page.cc
+++ b/deps/v8/src/heap/cppgc/heap-page.cc
@@ -18,6 +18,7 @@
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/page-memory.h"
#include "src/heap/cppgc/raw-heap.h"
+#include "src/heap/cppgc/remembered-set.h"
#include "src/heap/cppgc/stats-collector.h"
namespace cppgc {
@@ -85,6 +86,13 @@ ConstAddress BasePage::PayloadEnd() const {
return const_cast<BasePage*>(this)->PayloadEnd();
}
+size_t BasePage::AllocatedSize() const {
+ return is_large() ? LargePage::PageHeaderSize() +
+ LargePage::From(this)->PayloadSize()
+ : NormalPage::From(this)->PayloadSize() +
+ RoundUp(sizeof(NormalPage), kAllocationGranularity);
+}
+
size_t BasePage::AllocatedBytesAtLastGC() const {
return is_large() ? LargePage::From(this)->AllocatedBytesAtLastGC()
: NormalPage::From(this)->AllocatedBytesAtLastGC();
@@ -120,8 +128,32 @@ const HeapObjectHeader* BasePage::TryObjectHeaderFromInnerAddress(
return header;
}
+#if defined(CPPGC_YOUNG_GENERATION)
+void BasePage::AllocateSlotSet() {
+ DCHECK_NULL(slot_set_);
+ slot_set_ = decltype(slot_set_)(
+ static_cast<SlotSet*>(
+ SlotSet::Allocate(SlotSet::BucketsForSize(AllocatedSize()))),
+ SlotSetDeleter{AllocatedSize()});
+}
+
+void BasePage::SlotSetDeleter::operator()(SlotSet* slot_set) const {
+ DCHECK_NOT_NULL(slot_set);
+ SlotSet::Delete(slot_set, SlotSet::BucketsForSize(page_size_));
+}
+
+void BasePage::ResetSlotSet() { slot_set_.reset(); }
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
BasePage::BasePage(HeapBase& heap, BaseSpace& space, PageType type)
- : BasePageHandle(heap), space_(space), type_(type) {
+ : BasePageHandle(heap),
+ space_(space),
+ type_(type)
+#if defined(CPPGC_YOUNG_GENERATION)
+ ,
+ slot_set_(nullptr, SlotSetDeleter{})
+#endif // defined(CPPGC_YOUNG_GENERATION)
+{
DCHECK_EQ(0u, (reinterpret_cast<uintptr_t>(this) - kGuardPageSize) &
kPageOffsetMask);
DCHECK_EQ(&heap.raw_heap(), space_.raw_heap());
diff --git a/deps/v8/src/heap/cppgc/heap-page.h b/deps/v8/src/heap/cppgc/heap-page.h
index f20f159e73..a60bb1448d 100644
--- a/deps/v8/src/heap/cppgc/heap-page.h
+++ b/deps/v8/src/heap/cppgc/heap-page.h
@@ -8,6 +8,7 @@
#include "include/cppgc/internal/base-page-handle.h"
#include "src/base/iterator.h"
#include "src/base/macros.h"
+#include "src/heap/base/basic-slot-set.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/object-start-bitmap.h"
@@ -20,6 +21,7 @@ class NormalPageSpace;
class LargePageSpace;
class HeapBase;
class PageBackend;
+class SlotSet;
class V8_EXPORT_PRIVATE BasePage : public BasePageHandle {
public:
@@ -45,6 +47,9 @@ class V8_EXPORT_PRIVATE BasePage : public BasePageHandle {
Address PayloadEnd();
ConstAddress PayloadEnd() const;
+ // Size of the payload with the page header.
+ size_t AllocatedSize() const;
+
// Returns the size of live objects on the page at the last GC.
// The counter is update after sweeping.
size_t AllocatedBytesAtLastGC() const;
@@ -92,14 +97,29 @@ class V8_EXPORT_PRIVATE BasePage : public BasePageHandle {
contains_young_objects_ = value;
}
+#if defined(CPPGC_YOUNG_GENERATION)
+ V8_INLINE SlotSet* slot_set() const { return slot_set_.get(); }
+ V8_INLINE SlotSet& GetOrAllocateSlotSet();
+ void ResetSlotSet();
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
protected:
enum class PageType : uint8_t { kNormal, kLarge };
BasePage(HeapBase&, BaseSpace&, PageType);
private:
+ struct SlotSetDeleter {
+ void operator()(SlotSet*) const;
+ size_t page_size_ = 0;
+ };
+ void AllocateSlotSet();
+
BaseSpace& space_;
PageType type_;
bool contains_young_objects_ = false;
+#if defined(CPPGC_YOUNG_GENERATION)
+ std::unique_ptr<SlotSet, SlotSetDeleter> slot_set_;
+#endif // defined(CPPGC_YOUNG_GENERATION)
size_t discarded_memory_ = 0;
};
@@ -311,6 +331,13 @@ const HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(
return *header;
}
+#if defined(CPPGC_YOUNG_GENERATION)
+SlotSet& BasePage::GetOrAllocateSlotSet() {
+ if (!slot_set_) AllocateSlotSet();
+ return *slot_set_;
+}
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index 9cd52b8dd0..7bc55b51de 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -45,11 +45,10 @@ std::unique_ptr<Heap> Heap::Create(std::shared_ptr<cppgc::Platform> platform,
void Heap::ForceGarbageCollectionSlow(const char* source, const char* reason,
Heap::StackState stack_state) {
internal::Heap::From(this)->CollectGarbage(
- {internal::GarbageCollector::Config::CollectionType::kMajor, stack_state,
- MarkingType::kAtomic, SweepingType::kAtomic,
- internal::GarbageCollector::Config::FreeMemoryHandling::
- kDiscardWherePossible,
- internal::GarbageCollector::Config::IsForcedGC::kForced});
+ {internal::CollectionType::kMajor, stack_state, MarkingType::kAtomic,
+ SweepingType::kAtomic,
+ internal::GCConfig::FreeMemoryHandling::kDiscardWherePossible,
+ internal::GCConfig::IsForcedGC::kForced});
}
AllocationHandle& Heap::GetAllocationHandle() {
@@ -62,12 +61,11 @@ namespace internal {
namespace {
-void CheckConfig(Heap::Config config, HeapBase::MarkingType marking_support,
+void CheckConfig(GCConfig config, HeapBase::MarkingType marking_support,
HeapBase::SweepingType sweeping_support) {
- CHECK_WITH_MSG(
- (config.collection_type != Heap::Config::CollectionType::kMinor) ||
- (config.stack_state == Heap::Config::StackState::kNoHeapPointers),
- "Minor GCs with stack is currently not supported");
+ CHECK_WITH_MSG((config.collection_type != CollectionType::kMinor) ||
+ (config.stack_state == StackState::kNoHeapPointers),
+ "Minor GCs with stack is currently not supported");
CHECK_LE(static_cast<int>(config.marking_type),
static_cast<int>(marking_support));
CHECK_LE(static_cast<int>(config.sweeping_type),
@@ -94,17 +92,16 @@ Heap::~Heap() {
// Gracefully finish already running GC if any, but don't finalize live
// objects.
FinalizeIncrementalGarbageCollectionIfRunning(
- {Config::CollectionType::kMajor,
- Config::StackState::kMayContainHeapPointers,
- Config::MarkingType::kAtomic, Config::SweepingType::kAtomic});
+ {CollectionType::kMajor, StackState::kMayContainHeapPointers,
+ GCConfig::MarkingType::kAtomic, GCConfig::SweepingType::kAtomic});
{
subtle::NoGarbageCollectionScope no_gc(*this);
sweeper_.FinishIfRunning();
}
}
-void Heap::CollectGarbage(Config config) {
- DCHECK_EQ(Config::MarkingType::kAtomic, config.marking_type);
+void Heap::CollectGarbage(GCConfig config) {
+ DCHECK_EQ(GCConfig::MarkingType::kAtomic, config.marking_type);
CheckConfig(config, marking_support_, sweeping_support_);
if (in_no_gc_scope()) return;
@@ -118,9 +115,9 @@ void Heap::CollectGarbage(Config config) {
FinalizeGarbageCollection(config.stack_state);
}
-void Heap::StartIncrementalGarbageCollection(Config config) {
- DCHECK_NE(Config::MarkingType::kAtomic, config.marking_type);
- DCHECK_NE(marking_support_, Config::MarkingType::kAtomic);
+void Heap::StartIncrementalGarbageCollection(GCConfig config) {
+ DCHECK_NE(GCConfig::MarkingType::kAtomic, config.marking_type);
+ DCHECK_NE(marking_support_, GCConfig::MarkingType::kAtomic);
CheckConfig(config, marking_support_, sweeping_support_);
if (IsMarking() || in_no_gc_scope()) return;
@@ -130,19 +127,19 @@ void Heap::StartIncrementalGarbageCollection(Config config) {
StartGarbageCollection(config);
}
-void Heap::FinalizeIncrementalGarbageCollectionIfRunning(Config config) {
+void Heap::FinalizeIncrementalGarbageCollectionIfRunning(GCConfig config) {
CheckConfig(config, marking_support_, sweeping_support_);
if (!IsMarking()) return;
DCHECK(!in_no_gc_scope());
- DCHECK_NE(Config::MarkingType::kAtomic, config_.marking_type);
+ DCHECK_NE(GCConfig::MarkingType::kAtomic, config_.marking_type);
config_ = config;
FinalizeGarbageCollection(config.stack_state);
}
-void Heap::StartGarbageCollection(Config config) {
+void Heap::StartGarbageCollection(GCConfig config) {
DCHECK(!IsMarking());
DCHECK(!in_no_gc_scope());
@@ -152,18 +149,17 @@ void Heap::StartGarbageCollection(Config config) {
epoch_++;
#if defined(CPPGC_YOUNG_GENERATION)
- if (config.collection_type == Config::CollectionType::kMajor)
+ if (config.collection_type == CollectionType::kMajor)
SequentialUnmarker unmarker(raw_heap());
#endif // defined(CPPGC_YOUNG_GENERATION)
- const Marker::MarkingConfig marking_config{
- config.collection_type, config.stack_state, config.marking_type,
- config.is_forced_gc};
+ const MarkingConfig marking_config{config.collection_type, config.stack_state,
+ config.marking_type, config.is_forced_gc};
marker_ = std::make_unique<Marker>(AsBase(), platform_.get(), marking_config);
marker_->StartMarking();
}
-void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
+void Heap::FinalizeGarbageCollection(StackState stack_state) {
DCHECK(IsMarking());
DCHECK(!in_no_gc_scope());
CHECK(!in_disallow_gc_scope());
@@ -203,9 +199,8 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
#endif // defined(CPPGC_YOUNG_GENERATION)
subtle::NoGarbageCollectionScope no_gc(*this);
- const Sweeper::SweepingConfig sweeping_config{
- config_.sweeping_type,
- Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep,
+ const SweepingConfig sweeping_config{
+ config_.sweeping_type, SweepingConfig::CompactableSpaceHandling::kSweep,
config_.free_memory_handling};
sweeper_.Start(sweeping_config);
in_atomic_pause_ = false;
@@ -221,7 +216,7 @@ void Heap::EnableGenerationalGC() {
void Heap::DisableHeapGrowingForTesting() { growing_.DisableForTesting(); }
void Heap::FinalizeIncrementalGarbageCollectionIfNeeded(
- Config::StackState stack_state) {
+ StackState stack_state) {
StatsCollector::EnabledScope stats_scope(
stats_collector(), StatsCollector::kMarkIncrementalFinalize);
FinalizeGarbageCollection(stack_state);
@@ -230,10 +225,9 @@ void Heap::FinalizeIncrementalGarbageCollectionIfNeeded(
void Heap::StartIncrementalGarbageCollectionForTesting() {
DCHECK(!IsMarking());
DCHECK(!in_no_gc_scope());
- StartGarbageCollection({Config::CollectionType::kMajor,
- Config::StackState::kNoHeapPointers,
- Config::MarkingType::kIncrementalAndConcurrent,
- Config::SweepingType::kIncrementalAndConcurrent});
+ StartGarbageCollection({CollectionType::kMajor, StackState::kNoHeapPointers,
+ GCConfig::MarkingType::kIncrementalAndConcurrent,
+ GCConfig::SweepingType::kIncrementalAndConcurrent});
}
void Heap::FinalizeIncrementalGarbageCollectionForTesting(
diff --git a/deps/v8/src/heap/cppgc/heap.h b/deps/v8/src/heap/cppgc/heap.h
index cc027974f8..3a9e09fa5f 100644
--- a/deps/v8/src/heap/cppgc/heap.h
+++ b/deps/v8/src/heap/cppgc/heap.h
@@ -32,9 +32,9 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
HeapBase& AsBase() { return *this; }
const HeapBase& AsBase() const { return *this; }
- void CollectGarbage(Config) final;
- void StartIncrementalGarbageCollection(Config) final;
- void FinalizeIncrementalGarbageCollectionIfRunning(Config);
+ void CollectGarbage(GCConfig) final;
+ void StartIncrementalGarbageCollection(GCConfig) final;
+ void FinalizeIncrementalGarbageCollectionIfRunning(GCConfig);
size_t epoch() const final { return epoch_; }
const EmbedderStackState* override_stack_state() const final {
@@ -46,15 +46,15 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
void DisableHeapGrowingForTesting();
private:
- void StartGarbageCollection(Config);
- void FinalizeGarbageCollection(Config::StackState);
+ void StartGarbageCollection(GCConfig);
+ void FinalizeGarbageCollection(StackState);
- void FinalizeIncrementalGarbageCollectionIfNeeded(Config::StackState) final;
+ void FinalizeIncrementalGarbageCollectionIfNeeded(StackState) final;
void StartIncrementalGarbageCollectionForTesting() final;
void FinalizeIncrementalGarbageCollectionForTesting(EmbedderStackState) final;
- Config config_;
+ GCConfig config_;
GCInvoker gc_invoker_;
HeapGrowing growing_;
bool generational_gc_enabled_ = false;
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index 056f18912e..11197dafb8 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -32,11 +32,10 @@ namespace internal {
namespace {
-bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
- HeapBase& heap) {
- if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
+bool EnterIncrementalMarkingIfNeeded(MarkingConfig config, HeapBase& heap) {
+ if (config.marking_type == MarkingConfig::MarkingType::kIncremental ||
config.marking_type ==
- Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
WriteBarrier::FlagUpdater::Enter();
heap.set_incremental_marking_in_progress(true);
return true;
@@ -44,11 +43,10 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
return false;
}
-bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
- HeapBase& heap) {
- if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
+bool ExitIncrementalMarkingIfNeeded(MarkingConfig config, HeapBase& heap) {
+ if (config.marking_type == MarkingConfig::MarkingType::kIncremental ||
config.marking_type ==
- Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
WriteBarrier::FlagUpdater::Exit();
heap.set_incremental_marking_in_progress(false);
return true;
@@ -87,7 +85,7 @@ class MarkerBase::IncrementalMarkingTask final : public cppgc::Task {
public:
using Handle = SingleThreadedHandle;
- IncrementalMarkingTask(MarkerBase*, MarkingConfig::StackState);
+ IncrementalMarkingTask(MarkerBase*, StackState);
static Handle Post(cppgc::TaskRunner*, MarkerBase*);
@@ -95,13 +93,13 @@ class MarkerBase::IncrementalMarkingTask final : public cppgc::Task {
void Run() final;
MarkerBase* const marker_;
- MarkingConfig::StackState stack_state_;
+ StackState stack_state_;
// TODO(chromium:1056170): Change to CancelableTask.
Handle handle_;
};
MarkerBase::IncrementalMarkingTask::IncrementalMarkingTask(
- MarkerBase* marker, MarkingConfig::StackState stack_state)
+ MarkerBase* marker, StackState stack_state)
: marker_(marker),
stack_state_(stack_state),
handle_(Handle::NonEmptyTag{}) {}
@@ -117,10 +115,9 @@ MarkerBase::IncrementalMarkingTask::Post(cppgc::TaskRunner* runner,
DCHECK_IMPLIES(marker->heap().stack_support() !=
HeapBase::StackSupport::kSupportsConservativeStackScan,
runner->NonNestableTasksEnabled());
- MarkingConfig::StackState stack_state_for_task =
- runner->NonNestableTasksEnabled()
- ? MarkingConfig::StackState::kNoHeapPointers
- : MarkingConfig::StackState::kMayContainHeapPointers;
+ const auto stack_state_for_task = runner->NonNestableTasksEnabled()
+ ? StackState::kNoHeapPointers
+ : StackState::kMayContainHeapPointers;
auto task =
std::make_unique<IncrementalMarkingTask>(marker, stack_state_for_task);
auto handle = task->handle_;
@@ -152,9 +149,8 @@ MarkerBase::MarkerBase(HeapBase& heap, cppgc::Platform* platform,
foreground_task_runner_(platform_->GetForegroundTaskRunner()),
mutator_marking_state_(heap, marking_worklists_,
heap.compactor().compaction_worklists()) {
- DCHECK_IMPLIES(
- config_.collection_type == MarkingConfig::CollectionType::kMinor,
- heap_.generational_gc_supported());
+ DCHECK_IMPLIES(config_.collection_type == CollectionType::kMinor,
+ heap_.generational_gc_supported());
}
MarkerBase::~MarkerBase() {
@@ -163,7 +159,7 @@ MarkerBase::~MarkerBase() {
// and should thus already be marked.
if (!marking_worklists_.not_fully_constructed_worklist()->IsEmpty()) {
#if DEBUG
- DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state);
+ DCHECK_NE(StackState::kNoHeapPointers, config_.stack_state);
std::unordered_set<HeapObjectHeader*> objects =
mutator_marking_state_.not_fully_constructed_worklist().Extract();
for (HeapObjectHeader* object : objects) DCHECK(object->IsMarked());
@@ -229,7 +225,7 @@ void MarkerBase::StartMarking() {
// Performing incremental or concurrent marking.
schedule_.NotifyIncrementalMarkingStart();
// Scanning the stack is expensive so we only do it at the atomic pause.
- VisitRoots(MarkingConfig::StackState::kNoHeapPointers);
+ VisitRoots(StackState::kNoHeapPointers);
ScheduleIncrementalMarkingTask();
if (config_.marking_type ==
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
@@ -244,14 +240,14 @@ void MarkerBase::StartMarking() {
}
void MarkerBase::HandleNotFullyConstructedObjects() {
- if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
+ if (config_.stack_state == StackState::kNoHeapPointers) {
mutator_marking_state_.FlushNotFullyConstructedObjects();
} else {
MarkNotFullyConstructedObjects();
}
}
-void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
+void MarkerBase::EnterAtomicPause(StackState stack_state) {
StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
StatsCollector::kAtomicMark);
StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
@@ -310,7 +306,7 @@ void MarkerBase::LeaveAtomicPause() {
heap().SetStackStateOfPrevGC(config_.stack_state);
}
-void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
+void MarkerBase::FinishMarking(StackState stack_state) {
DCHECK(is_marking_);
EnterAtomicPause(stack_state);
{
@@ -383,7 +379,7 @@ void MarkerBase::ProcessWeakness() {
#if defined(CPPGC_YOUNG_GENERATION)
if (heap().generational_gc_supported()) {
auto& remembered_set = heap().remembered_set();
- if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
+ if (config_.collection_type == CollectionType::kMinor) {
// Custom callbacks assume that untraced pointers point to not yet freed
// objects. They must make sure that upon callback completion no
// UntracedMember points to a freed object. This may not hold true if a
@@ -425,7 +421,7 @@ void MarkerBase::ProcessWeakness() {
DCHECK(marking_worklists_.marking_worklist()->IsEmpty());
}
-void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
+void MarkerBase::VisitRoots(StackState stack_state) {
StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
StatsCollector::kMarkVisitRoots);
@@ -442,13 +438,13 @@ void MarkerBase::VisitRoots(MarkingConfig::StackState stack_state) {
}
}
- if (stack_state != MarkingConfig::StackState::kNoHeapPointers) {
+ if (stack_state != StackState::kNoHeapPointers) {
StatsCollector::DisabledScope stack_stats_scope(
heap().stats_collector(), StatsCollector::kMarkVisitStack);
heap().stack()->IteratePointers(&stack_visitor());
}
#if defined(CPPGC_YOUNG_GENERATION)
- if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
+ if (config_.collection_type == CollectionType::kMinor) {
StatsCollector::EnabledScope stats_scope(
heap().stats_collector(), StatsCollector::kMarkVisitRememberedSets);
heap().remembered_set().Visit(visitor(), mutator_marking_state_);
@@ -482,13 +478,12 @@ void MarkerBase::ScheduleIncrementalMarkingTask() {
IncrementalMarkingTask::Post(foreground_task_runner_.get(), this);
}
-bool MarkerBase::IncrementalMarkingStepForTesting(
- MarkingConfig::StackState stack_state) {
+bool MarkerBase::IncrementalMarkingStepForTesting(StackState stack_state) {
return IncrementalMarkingStep(stack_state);
}
-bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
- if (stack_state == MarkingConfig::StackState::kNoHeapPointers) {
+bool MarkerBase::IncrementalMarkingStep(StackState stack_state) {
+ if (stack_state == StackState::kNoHeapPointers) {
mutator_marking_state_.FlushNotFullyConstructedObjects();
}
config_.stack_state = stack_state;
diff --git a/deps/v8/src/heap/cppgc/marker.h b/deps/v8/src/heap/cppgc/marker.h
index 9c471250ad..7586a43957 100644
--- a/deps/v8/src/heap/cppgc/marker.h
+++ b/deps/v8/src/heap/cppgc/marker.h
@@ -15,6 +15,7 @@
#include "src/heap/base/worklist.h"
#include "src/heap/cppgc/concurrent-marker.h"
#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-config.h"
#include "src/heap/cppgc/incremental-marking-schedule.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/marking-visitor.h"
@@ -39,26 +40,6 @@ class V8_EXPORT_PRIVATE MarkerBase {
public:
class IncrementalMarkingTask;
- struct MarkingConfig {
- enum class CollectionType : uint8_t {
- kMinor,
- kMajor,
- };
- using StackState = cppgc::Heap::StackState;
- using MarkingType = cppgc::Heap::MarkingType;
- enum class IsForcedGC : uint8_t {
- kNotForced,
- kForced,
- };
-
- static constexpr MarkingConfig Default() { return {}; }
-
- const CollectionType collection_type = CollectionType::kMajor;
- StackState stack_state = StackState::kMayContainHeapPointers;
- MarkingType marking_type = MarkingType::kIncremental;
- IsForcedGC is_forced_gc = IsForcedGC::kNotForced;
- };
-
enum class WriteBarrierType {
kDijkstra,
kSteele,
@@ -89,7 +70,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
// - stops incremental/concurrent marking;
// - flushes back any in-construction worklists if needed;
// - Updates the MarkingConfig if the stack state has changed;
- void EnterAtomicPause(MarkingConfig::StackState);
+ void EnterAtomicPause(StackState);
// Makes marking progress. A `marked_bytes_limit` of 0 means that the limit
// is determined by the internal marking scheduler.
@@ -113,7 +94,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
// - AdvanceMarkingWithLimits()
// - ProcessWeakness()
// - LeaveAtomicPause()
- void FinishMarking(MarkingConfig::StackState);
+ void FinishMarking(StackState);
void ProcessWeakness();
@@ -134,7 +115,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
void SetMainThreadMarkingDisabledForTesting(bool);
void WaitForConcurrentMarkingForTesting();
void ClearAllWorklistsForTesting();
- bool IncrementalMarkingStepForTesting(MarkingConfig::StackState);
+ bool IncrementalMarkingStepForTesting(StackState);
MarkingWorklists& MarkingWorklistsForTesting() { return marking_worklists_; }
MutatorMarkingState& MutatorMarkingStateForTesting() {
@@ -157,7 +138,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool ProcessWorklistsWithDeadline(size_t, v8::base::TimeTicks);
- void VisitRoots(MarkingConfig::StackState);
+ void VisitRoots(StackState);
bool VisitCrossThreadPersistentsIfNeeded();
@@ -165,7 +146,7 @@ class V8_EXPORT_PRIVATE MarkerBase {
void ScheduleIncrementalMarkingTask();
- bool IncrementalMarkingStep(MarkingConfig::StackState);
+ bool IncrementalMarkingStep(StackState);
void AdvanceMarkingOnAllocation();
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index a64a6d5f25..666e715cd7 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -36,7 +36,7 @@ void VerificationState::VerifyMarked(const void* base_object_payload) const {
}
MarkingVerifierBase::MarkingVerifierBase(
- HeapBase& heap, Heap::Config::CollectionType collection_type,
+ HeapBase& heap, CollectionType collection_type,
VerificationState& verification_state,
std::unique_ptr<cppgc::Visitor> visitor)
: ConservativeTracingVisitor(heap, *heap.page_backend(), *visitor.get()),
@@ -45,7 +45,7 @@ MarkingVerifierBase::MarkingVerifierBase(
collection_type_(collection_type) {}
void MarkingVerifierBase::Run(
- Heap::Config::StackState stack_state, uintptr_t stack_end,
+ StackState stack_state, uintptr_t stack_end,
v8::base::Optional<size_t> expected_marked_bytes) {
Traverse(heap_.raw_heap());
// Avoid verifying the stack when running with TSAN as the TSAN runtime changes
@@ -61,7 +61,7 @@ void MarkingVerifierBase::Run(
// TODO(chromium:1325007): Investigate if Oilpan verification can be moved
// before V8 compaction or compaction never runs with stack.
#if !defined(THREAD_SANITIZER) && !defined(CPPGC_POINTER_COMPRESSION)
- if (stack_state == Heap::Config::StackState::kMayContainHeapPointers) {
+ if (stack_state == StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
heap_.stack()->IteratePointersUnsafe(this, stack_end);
// The objects found through the unsafe iteration are only a subset of the
@@ -114,7 +114,7 @@ bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader& header) {
DCHECK(!header.IsFree());
#if defined(CPPGC_YOUNG_GENERATION)
- if (collection_type_ == Heap::Config::CollectionType::kMinor) {
+ if (collection_type_ == CollectionType::kMinor) {
auto& caged_heap = CagedHeap::Instance();
const auto age = CagedHeapLocalData::Get().age_table.GetAge(
caged_heap.OffsetFromAddress(header.ObjectStart()));
@@ -185,7 +185,7 @@ class VerificationVisitor final : public cppgc::Visitor {
} // namespace
MarkingVerifier::MarkingVerifier(HeapBase& heap_base,
- Heap::Config::CollectionType collection_type)
+ CollectionType collection_type)
: MarkingVerifierBase(heap_base, collection_type, state_,
std::make_unique<VerificationVisitor>(state_)) {}
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.h b/deps/v8/src/heap/cppgc/marking-verifier.h
index cb2eb4c80c..c966aea51f 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.h
+++ b/deps/v8/src/heap/cppgc/marking-verifier.h
@@ -41,11 +41,11 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
MarkingVerifierBase(const MarkingVerifierBase&) = delete;
MarkingVerifierBase& operator=(const MarkingVerifierBase&) = delete;
- void Run(Heap::Config::StackState, uintptr_t, v8::base::Optional<size_t>);
+ void Run(StackState, uintptr_t, v8::base::Optional<size_t>);
protected:
- MarkingVerifierBase(HeapBase&, Heap::Config::CollectionType,
- VerificationState&, std::unique_ptr<cppgc::Visitor>);
+ MarkingVerifierBase(HeapBase&, CollectionType, VerificationState&,
+ std::unique_ptr<cppgc::Visitor>);
private:
void VisitInConstructionConservatively(HeapObjectHeader&,
@@ -63,12 +63,12 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
&in_construction_objects_heap_;
size_t verifier_found_marked_bytes_ = 0;
bool verifier_found_marked_bytes_are_exact_ = true;
- Heap::Config::CollectionType collection_type_;
+ CollectionType collection_type_;
};
class V8_EXPORT_PRIVATE MarkingVerifier final : public MarkingVerifierBase {
public:
- MarkingVerifier(HeapBase&, Heap::Config::CollectionType);
+ MarkingVerifier(HeapBase&, CollectionType);
~MarkingVerifier() final = default;
private:
diff --git a/deps/v8/src/heap/cppgc/member-storage.cc b/deps/v8/src/heap/cppgc/member-storage.cc
index a0e4562472..c457c60ba4 100644
--- a/deps/v8/src/heap/cppgc/member-storage.cc
+++ b/deps/v8/src/heap/cppgc/member-storage.cc
@@ -4,6 +4,11 @@
#include "include/cppgc/internal/member-storage.h"
+#include "include/cppgc/garbage-collected.h"
+#include "include/cppgc/member.h"
+#include "src/base/compiler-specific.h"
+#include "src/base/macros.h"
+
namespace cppgc {
namespace internal {
@@ -11,5 +16,26 @@ namespace internal {
uintptr_t CageBaseGlobal::g_base_ = CageBaseGlobal::kLowerHalfWordMask;
#endif // defined(CPPGC_POINTER_COMPRESSION)
+// Debugging helpers.
+
+#if defined(CPPGC_POINTER_COMPRESSION)
+extern "C" V8_DONT_STRIP_SYMBOL V8_EXPORT_PRIVATE void*
+_cppgc_internal_Decompress_Compressed_Pointer(uint32_t cmprsd) {
+ return MemberStorage::Decompress(cmprsd);
+}
+#endif // !defined(CPPGC_POINTER_COMPRESSION)
+
+class MemberDebugHelper final {
+ public:
+ static void* PrintUncompressed(MemberBase* m) {
+ return const_cast<void*>(m->GetRaw());
+ }
+};
+
+extern "C" V8_DONT_STRIP_SYMBOL V8_EXPORT_PRIVATE void*
+_cppgc_internal_Print_Member(MemberBase* m) {
+ return MemberDebugHelper::PrintUncompressed(m);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
index 38a3ccd8e9..b88ba5c200 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.cc
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -148,9 +148,9 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
void* result = TryAllocateLargeObject(page_backend_, large_space,
stats_collector_, size, gcinfo);
if (!result) {
- auto config = GarbageCollector::Config::ConservativeAtomicConfig();
+ auto config = GCConfig::ConservativeAtomicConfig();
config.free_memory_handling =
- GarbageCollector::Config::FreeMemoryHandling::kDiscardWherePossible;
+ GCConfig::FreeMemoryHandling::kDiscardWherePossible;
garbage_collector_.CollectGarbage(config);
result = TryAllocateLargeObject(page_backend_, large_space,
stats_collector_, size, gcinfo);
@@ -170,9 +170,9 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
}
if (!TryRefillLinearAllocationBuffer(space, request_size)) {
- auto config = GarbageCollector::Config::ConservativeAtomicConfig();
+ auto config = GCConfig::ConservativeAtomicConfig();
config.free_memory_handling =
- GarbageCollector::Config::FreeMemoryHandling::kDiscardWherePossible;
+ GCConfig::FreeMemoryHandling::kDiscardWherePossible;
garbage_collector_.CollectGarbage(config);
if (!TryRefillLinearAllocationBuffer(space, request_size)) {
oom_handler_("Oilpan: Normal allocation.");
@@ -187,42 +187,64 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
return result;
}
+bool ObjectAllocator::TryExpandAndRefillLinearAllocationBuffer(
+ NormalPageSpace& space) {
+ auto* const new_page = NormalPage::TryCreate(page_backend_, space);
+ if (!new_page) return false;
+
+ space.AddPage(new_page);
+ // Set linear allocation buffer to new page.
+ ReplaceLinearAllocationBuffer(space, stats_collector_,
+ new_page->PayloadStart(),
+ new_page->PayloadSize());
+ return true;
+}
+
bool ObjectAllocator::TryRefillLinearAllocationBuffer(NormalPageSpace& space,
size_t size) {
// Try to allocate from the freelist.
if (TryRefillLinearAllocationBufferFromFreeList(space, size)) return true;
- // Lazily sweep pages of this heap until we find a freed area for this
- // allocation or we finish sweeping all pages of this heap.
Sweeper& sweeper = raw_heap_.heap()->sweeper();
- // TODO(chromium:1056170): Investigate whether this should be a loop which
- // would result in more aggressive re-use of memory at the expense of
- // potentially larger allocation time.
- if (sweeper.SweepForAllocationIfRunning(&space, size)) {
- // Sweeper found a block of at least `size` bytes. Allocation from the
- // free list may still fail as actual buckets are not exhaustively
- // searched for a suitable block. Instead, buckets are tested from larger
- // sizes that are guaranteed to fit the block to smaller bucket sizes that
- // may only potentially fit the block. For the bucket that may exactly fit
- // the allocation of `size` bytes (no overallocation), only the first
- // entry is checked.
- if (TryRefillLinearAllocationBufferFromFreeList(space, size)) return true;
+ // Lazily sweep pages of this heap. This is not exhaustive to limit jank on
+ // allocation. Allocation from the free list may still fail as actual buckets
+ // are not exhaustively searched for a suitable block. Instead, buckets are
+ // tested from larger sizes that are guaranteed to fit the block to smaller
+ // bucket sizes that may only potentially fit the block. For the bucket that
+ // may exactly fit the allocation of `size` bytes (no overallocation), only
+ // the first entry is checked.
+ if (sweeper.SweepForAllocationIfRunning(
+ &space, size, v8::base::TimeDelta::FromMicroseconds(500)) &&
+ TryRefillLinearAllocationBufferFromFreeList(space, size)) {
+ return true;
}
- sweeper.FinishIfRunning();
- // TODO(chromium:1056170): Make use of the synchronously freed memory.
-
- auto* new_page = NormalPage::TryCreate(page_backend_, space);
- if (!new_page) {
- return false;
+ // Sweeping was off or did not yield in any memory within limited
+ // contributing. We expand at this point as that's cheaper than possibly
+ // continuing sweeping the whole heap.
+ if (TryExpandAndRefillLinearAllocationBuffer(space)) return true;
+
+ // Expansion failed. Before finishing all sweeping, finish sweeping of a given
+ // space which is cheaper.
+ if (sweeper.SweepForAllocationIfRunning(&space, size,
+ v8::base::TimeDelta::Max()) &&
+ TryRefillLinearAllocationBufferFromFreeList(space, size)) {
+ return true;
}
- space.AddPage(new_page);
- // Set linear allocation buffer to new page.
- ReplaceLinearAllocationBuffer(space, stats_collector_,
- new_page->PayloadStart(),
- new_page->PayloadSize());
- return true;
+ // Heap expansion and sweeping of a space failed. At this point the caller
+ // could run OOM or do a full GC which needs to finish sweeping if it's
+ // running. Hence, we may as well finish sweeping here. Note that this is
+ // possibly very expensive but not more expensive than running a full GC as
+ // the alternative is OOM.
+ if (sweeper.FinishIfRunning()) {
+ // Sweeping may have added memory to the free list.
+ if (TryRefillLinearAllocationBufferFromFreeList(space, size)) return true;
+
+ // Sweeping may have freed pages completely.
+ if (TryExpandAndRefillLinearAllocationBuffer(space)) return true;
+ }
+ return false;
}
bool ObjectAllocator::TryRefillLinearAllocationBufferFromFreeList(
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
index ea01f671f7..77f26ce3b5 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.h
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -70,6 +70,7 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
bool TryRefillLinearAllocationBuffer(NormalPageSpace&, size_t);
bool TryRefillLinearAllocationBufferFromFreeList(NormalPageSpace&, size_t);
+ bool TryExpandAndRefillLinearAllocationBuffer(NormalPageSpace&);
RawHeap& raw_heap_;
PageBackend& page_backend_;
diff --git a/deps/v8/src/heap/cppgc/remembered-set.cc b/deps/v8/src/heap/cppgc/remembered-set.cc
index 485fb4057f..60e8f978ef 100644
--- a/deps/v8/src/heap/cppgc/remembered-set.cc
+++ b/deps/v8/src/heap/cppgc/remembered-set.cc
@@ -2,15 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#if defined(CPPGC_YOUNG_GENERATION)
+
#include "src/heap/cppgc/remembered-set.h"
#include <algorithm>
#include "include/cppgc/member.h"
#include "include/cppgc/visitor.h"
+#include "src/heap/base/basic-slot-set.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/marking-state.h"
namespace cppgc {
@@ -20,23 +24,54 @@ namespace {
enum class SlotType { kCompressed, kUncompressed };
-template <SlotType slot_type>
-void InvalidateRememberedSlots(std::set<void*>& slots, void* begin, void* end) {
+void EraseFromSet(std::set<void*>& set, void* begin, void* end) {
// TODO(1029379): The 2 binary walks can be optimized with a custom algorithm.
- auto from = slots.lower_bound(begin), to = slots.lower_bound(end);
- slots.erase(from, to);
+ auto from = set.lower_bound(begin), to = set.lower_bound(end);
+ set.erase(from, to);
+}
+
+// TODO(1029379): Make the implementation functions private functions of
+// OldToNewRememberedSet to avoid parameter passing.
+void InvalidateCompressedRememberedSlots(
+ const HeapBase& heap, void* begin, void* end,
+ std::set<void*>& remembered_slots_for_verification) {
+ DCHECK_LT(begin, end);
+
+ BasePage* page = BasePage::FromInnerAddress(&heap, begin);
+ DCHECK_NOT_NULL(page);
+ // The input range must reside within the same page.
+ DCHECK_EQ(page, BasePage::FromInnerAddress(
+ &heap, reinterpret_cast<void*>(
+ reinterpret_cast<uintptr_t>(end) - 1)));
+
+ auto* slot_set = page->slot_set();
+ if (!slot_set) return;
+
+ const size_t buckets_size = SlotSet::BucketsForSize(page->AllocatedSize());
+
+ const uintptr_t page_start = reinterpret_cast<uintptr_t>(page);
+ const uintptr_t ubegin = reinterpret_cast<uintptr_t>(begin);
+ const uintptr_t uend = reinterpret_cast<uintptr_t>(end);
+
+ slot_set->RemoveRange(ubegin - page_start, uend - page_start, buckets_size,
+ SlotSet::EmptyBucketMode::FREE_EMPTY_BUCKETS);
+#if DEBUG
+ EraseFromSet(remembered_slots_for_verification, begin, end);
+#endif // DEBUG
+}
+
+void InvalidateUncompressedRememberedSlots(
+ std::set<void*>& slots, void* begin, void* end,
+ std::set<void*>& remembered_slots_for_verification) {
+ EraseFromSet(slots, begin, end);
+#if DEBUG
+ EraseFromSet(remembered_slots_for_verification, begin, end);
+#endif // DEBUG
#if defined(ENABLE_SLOW_DCHECKS)
// Check that no remembered slots are referring to the freed area.
DCHECK(std::none_of(slots.begin(), slots.end(), [begin, end](void* slot) {
void* value = nullptr;
-#if defined(CPPGC_POINTER_COMPRESSION)
- if constexpr (slot_type == SlotType::kCompressed)
- value = CompressedPointer::Decompress(*reinterpret_cast<uint32_t*>(slot));
- else
- value = *reinterpret_cast<void**>(slot);
-#else // !defined(CPPGC_POINTER_COMPRESSION)
value = *reinterpret_cast<void**>(slot);
-#endif // !defined(CPPGC_POINTER_COMPRESSION)
return begin <= value && value < end;
}));
#endif // defined(ENABLE_SLOW_DCHECKS)
@@ -44,45 +79,155 @@ void InvalidateRememberedSlots(std::set<void*>& slots, void* begin, void* end) {
// Visit remembered set that was recorded in the generational barrier.
template <SlotType slot_type>
-void VisitRememberedSlots(const std::set<void*>& slots, const HeapBase& heap,
- MutatorMarkingState& mutator_marking_state) {
- for (void* slot : slots) {
- // Slot must always point to a valid, not freed object.
- auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
- ->ObjectHeaderFromInnerAddress(slot);
- // The age checking in the generational barrier is imprecise, since a card
- // may have mixed young/old objects. Check here precisely if the object is
- // old.
- if (slot_header.IsYoung()) continue;
- // The design of young generation requires collections to be executed at the
- // top level (with the guarantee that no objects are currently being in
- // construction). This can be ensured by running young GCs from safe points
- // or by reintroducing nested allocation scopes that avoid finalization.
- DCHECK(!slot_header.template IsInConstruction<AccessMode::kNonAtomic>());
+void VisitSlot(const HeapBase& heap, const BasePage& page, Address slot,
+ MutatorMarkingState& marking_state,
+ const std::set<void*>& slots_for_verification) {
+#if defined(DEBUG)
+ DCHECK_EQ(BasePage::FromInnerAddress(&heap, slot), &page);
+ DCHECK_NE(slots_for_verification.end(), slots_for_verification.find(slot));
+#endif // defined(DEBUG)
+
+ // Slot must always point to a valid, not freed object.
+ auto& slot_header = page.ObjectHeaderFromInnerAddress(slot);
+ // The age checking in the generational barrier is imprecise, since a card
+ // may have mixed young/old objects. Check here precisely if the object is
+ // old.
+ if (slot_header.IsYoung()) return;
+ // The design of young generation requires collections to be executed at the
+ // top level (with the guarantee that no objects are currently being in
+ // construction). This can be ensured by running young GCs from safe points
+ // or by reintroducing nested allocation scopes that avoid finalization.
+ DCHECK(!slot_header.template IsInConstruction<AccessMode::kNonAtomic>());
#if defined(CPPGC_POINTER_COMPRESSION)
- void* value = nullptr;
- if constexpr (slot_type == SlotType::kCompressed) {
- value = CompressedPointer::Decompress(*reinterpret_cast<uint32_t*>(slot));
- } else {
- value = *reinterpret_cast<void**>(slot);
- }
+ void* value = nullptr;
+ if constexpr (slot_type == SlotType::kCompressed) {
+ value = CompressedPointer::Decompress(*reinterpret_cast<uint32_t*>(slot));
+ } else {
+ value = *reinterpret_cast<void**>(slot);
+ }
#else // !defined(CPPGC_POINTER_COMPRESSION)
- void* value = *reinterpret_cast<void**>(slot);
+ void* value = *reinterpret_cast<void**>(slot);
#endif // !defined(CPPGC_POINTER_COMPRESSION)
- // Slot could be updated to nullptr or kSentinelPointer by the mutator.
- if (value == kSentinelPointer || value == nullptr) continue;
+ // Slot could be updated to nullptr or kSentinelPointer by the mutator.
+ if (value == kSentinelPointer || value == nullptr) return;
-#if DEBUG
- // Check that the slot can not point to a freed object.
- HeapObjectHeader& header =
- BasePage::FromPayload(value)->ObjectHeaderFromInnerAddress(value);
- DCHECK(!header.IsFree());
-#endif
+#if defined(DEBUG)
+ // Check that the slot can not point to a freed object.
+ HeapObjectHeader& header =
+ BasePage::FromPayload(value)->ObjectHeaderFromInnerAddress(value);
+ DCHECK(!header.IsFree());
+#endif // defined(DEBUG)
+
+ marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
+}
+
+class CompressedSlotVisitor : HeapVisitor<CompressedSlotVisitor> {
+ friend class HeapVisitor<CompressedSlotVisitor>;
+
+ public:
+ CompressedSlotVisitor(HeapBase& heap, MutatorMarkingState& marking_state,
+ const std::set<void*>& slots_for_verification)
+ : heap_(heap),
+ marking_state_(marking_state),
+ remembered_slots_for_verification_(slots_for_verification) {}
- mutator_marking_state.DynamicallyMarkAddress(static_cast<Address>(value));
+ size_t Run() {
+ Traverse(heap_.raw_heap());
+ return objects_visited_;
}
+
+ private:
+ heap::base::SlotCallbackResult VisitCompressedSlot(Address slot) {
+ DCHECK(current_page_);
+ VisitSlot<SlotType::kCompressed>(heap_, *current_page_, slot,
+ marking_state_,
+ remembered_slots_for_verification_);
+ ++objects_visited_;
+ return heap::base::KEEP_SLOT;
+ }
+
+ void VisitSlotSet(SlotSet* slot_set) {
+ DCHECK(current_page_);
+
+ if (!slot_set) return;
+
+ const uintptr_t page_start = reinterpret_cast<uintptr_t>(current_page_);
+ const size_t buckets_size =
+ SlotSet::BucketsForSize(current_page_->AllocatedSize());
+
+ slot_set->Iterate(
+ page_start, 0, buckets_size,
+ [this](SlotSet::Address slot) {
+ return VisitCompressedSlot(reinterpret_cast<Address>(slot));
+ },
+ SlotSet::EmptyBucketMode::FREE_EMPTY_BUCKETS);
+ }
+
+ bool VisitNormalPage(NormalPage& page) {
+ current_page_ = &page;
+ VisitSlotSet(page.slot_set());
+ return true;
+ }
+
+ bool VisitLargePage(LargePage& page) {
+ current_page_ = &page;
+ VisitSlotSet(page.slot_set());
+ return true;
+ }
+
+ HeapBase& heap_;
+ MutatorMarkingState& marking_state_;
+ BasePage* current_page_ = nullptr;
+
+ const std::set<void*>& remembered_slots_for_verification_;
+ size_t objects_visited_ = 0u;
+};
+
+class SlotRemover : HeapVisitor<SlotRemover> {
+ friend class HeapVisitor<SlotRemover>;
+
+ public:
+ explicit SlotRemover(HeapBase& heap) : heap_(heap) {}
+
+ void Run() { Traverse(heap_.raw_heap()); }
+
+ private:
+ bool VisitNormalPage(NormalPage& page) {
+ page.ResetSlotSet();
+ return true;
+ }
+
+ bool VisitLargePage(LargePage& page) {
+ page.ResetSlotSet();
+ return true;
+ }
+
+ HeapBase& heap_;
+};
+
+// Visit remembered set that was recorded in the generational barrier.
+void VisitRememberedSlots(
+ HeapBase& heap, MutatorMarkingState& mutator_marking_state,
+ const std::set<void*>& remembered_uncompressed_slots,
+ const std::set<void*>& remembered_slots_for_verification) {
+ size_t objects_visited = 0;
+ {
+ CompressedSlotVisitor slot_visitor(heap, mutator_marking_state,
+ remembered_slots_for_verification);
+ objects_visited += slot_visitor.Run();
+ }
+ for (void* uncompressed_slot : remembered_uncompressed_slots) {
+ auto* page = BasePage::FromInnerAddress(&heap, uncompressed_slot);
+ DCHECK(page);
+ VisitSlot<SlotType::kUncompressed>(
+ heap, *page, static_cast<Address>(uncompressed_slot),
+ mutator_marking_state, remembered_slots_for_verification);
+ ++objects_visited;
+ }
+ DCHECK_EQ(remembered_slots_for_verification.size(), objects_visited);
+ USE(objects_visited);
}
// Visits source objects that were recorded in the generational barrier for
@@ -114,12 +259,29 @@ void VisitRememberedSourceObjects(
void OldToNewRememberedSet::AddSlot(void* slot) {
DCHECK(heap_.generational_gc_supported());
- remembered_slots_.insert(slot);
+
+ BasePage* source_page = BasePage::FromInnerAddress(&heap_, slot);
+ DCHECK(source_page);
+
+ auto& slot_set = source_page->GetOrAllocateSlotSet();
+
+ const uintptr_t slot_offset = reinterpret_cast<uintptr_t>(slot) -
+ reinterpret_cast<uintptr_t>(source_page);
+
+ slot_set.Insert<SlotSet::AccessMode::NON_ATOMIC>(
+ static_cast<size_t>(slot_offset));
+
+#if defined(DEBUG)
+ remembered_slots_for_verification_.insert(slot);
+#endif // defined(DEBUG)
}
void OldToNewRememberedSet::AddUncompressedSlot(void* uncompressed_slot) {
DCHECK(heap_.generational_gc_supported());
remembered_uncompressed_slots_.insert(uncompressed_slot);
+#if defined(DEBUG)
+ remembered_slots_for_verification_.insert(uncompressed_slot);
+#endif // defined(DEBUG)
}
void OldToNewRememberedSet::AddSourceObject(HeapObjectHeader& hoh) {
@@ -138,10 +300,11 @@ void OldToNewRememberedSet::AddWeakCallback(WeakCallbackItem item) {
void OldToNewRememberedSet::InvalidateRememberedSlotsInRange(void* begin,
void* end) {
DCHECK(heap_.generational_gc_supported());
- InvalidateRememberedSlots<SlotType::kCompressed>(remembered_slots_, begin,
- end);
- InvalidateRememberedSlots<SlotType::kUncompressed>(
- remembered_uncompressed_slots_, begin, end);
+ InvalidateCompressedRememberedSlots(heap_, begin, end,
+ remembered_slots_for_verification_);
+ InvalidateUncompressedRememberedSlots(remembered_uncompressed_slots_, begin,
+ end,
+ remembered_slots_for_verification_);
}
void OldToNewRememberedSet::InvalidateRememberedSourceObject(
@@ -153,10 +316,8 @@ void OldToNewRememberedSet::InvalidateRememberedSourceObject(
void OldToNewRememberedSet::Visit(Visitor& visitor,
MutatorMarkingState& marking_state) {
DCHECK(heap_.generational_gc_supported());
- VisitRememberedSlots<SlotType::kCompressed>(remembered_slots_, heap_,
- marking_state);
- VisitRememberedSlots<SlotType::kUncompressed>(remembered_uncompressed_slots_,
- heap_, marking_state);
+ VisitRememberedSlots(heap_, marking_state, remembered_uncompressed_slots_,
+ remembered_slots_for_verification_);
VisitRememberedSourceObjects(remembered_source_objects_, visitor);
}
@@ -174,16 +335,23 @@ void OldToNewRememberedSet::ReleaseCustomCallbacks() {
void OldToNewRememberedSet::Reset() {
DCHECK(heap_.generational_gc_supported());
- remembered_slots_.clear();
+ SlotRemover slot_remover(heap_);
+ slot_remover.Run();
remembered_uncompressed_slots_.clear();
remembered_source_objects_.clear();
+#if DEBUG
+ remembered_slots_for_verification_.clear();
+#endif // DEBUG
}
bool OldToNewRememberedSet::IsEmpty() const {
- return remembered_slots_.empty() && remembered_uncompressed_slots_.empty() &&
+ // TODO(1029379): Add visitor to check if empty.
+ return remembered_uncompressed_slots_.empty() &&
remembered_source_objects_.empty() &&
remembered_weak_callbacks_.empty();
}
} // namespace internal
} // namespace cppgc
+
+#endif // defined(CPPGC_YOUNG_GENERATION)
diff --git a/deps/v8/src/heap/cppgc/remembered-set.h b/deps/v8/src/heap/cppgc/remembered-set.h
index 24e460d438..086ba62289 100644
--- a/deps/v8/src/heap/cppgc/remembered-set.h
+++ b/deps/v8/src/heap/cppgc/remembered-set.h
@@ -5,9 +5,12 @@
#ifndef V8_HEAP_CPPGC_REMEMBERED_SET_H_
#define V8_HEAP_CPPGC_REMEMBERED_SET_H_
+#if defined(CPPGC_YOUNG_GENERATION)
+
#include <set>
#include "src/base/macros.h"
+#include "src/heap/base/basic-slot-set.h"
#include "src/heap/cppgc/marking-worklists.h"
namespace cppgc {
@@ -21,11 +24,14 @@ class HeapBase;
class HeapObjectHeader;
class MutatorMarkingState;
+class SlotSet : public ::heap::base::BasicSlotSet<kSlotSize> {};
+
+// OldToNewRememberedSet represents a per-heap set of old-to-new references.
class V8_EXPORT_PRIVATE OldToNewRememberedSet final {
public:
using WeakCallbackItem = MarkingWorklists::WeakCallbackItem;
- explicit OldToNewRememberedSet(const HeapBase& heap)
+ explicit OldToNewRememberedSet(HeapBase& heap)
: heap_(heap), remembered_weak_callbacks_(compare_parameter) {}
OldToNewRememberedSet(const OldToNewRememberedSet&) = delete;
@@ -58,15 +64,19 @@ class V8_EXPORT_PRIVATE OldToNewRememberedSet final {
}
} compare_parameter{};
- const HeapBase& heap_;
- std::set<void*> remembered_slots_;
- std::set<void*> remembered_uncompressed_slots_;
+ HeapBase& heap_;
std::set<HeapObjectHeader*> remembered_source_objects_;
std::set<WeakCallbackItem, decltype(compare_parameter)>
remembered_weak_callbacks_;
+ // Compressed slots are stored in slot-sets (per-page two-level bitmaps),
+ // whereas uncompressed are stored in std::set.
+ std::set<void*> remembered_uncompressed_slots_;
+ std::set<void*> remembered_slots_for_verification_;
};
} // namespace internal
} // namespace cppgc
+#endif // defined(CPPGC_YOUNG_GENERATION)
+
#endif // V8_HEAP_CPPGC_REMEMBERED_SET_H_
diff --git a/deps/v8/src/heap/cppgc/stats-collector.cc b/deps/v8/src/heap/cppgc/stats-collector.cc
index ca01122208..f65309b6f4 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.cc
+++ b/deps/v8/src/heap/cppgc/stats-collector.cc
@@ -171,8 +171,7 @@ int64_t SumPhases(const MetricRecorder::GCCycle::Phases& phases) {
}
MetricRecorder::GCCycle GetCycleEventForMetricRecorder(
- StatsCollector::CollectionType type,
- StatsCollector::MarkingType marking_type,
+ CollectionType type, StatsCollector::MarkingType marking_type,
StatsCollector::SweepingType sweeping_type, int64_t atomic_mark_us,
int64_t atomic_weak_us, int64_t atomic_compact_us, int64_t atomic_sweep_us,
int64_t incremental_mark_us, int64_t incremental_sweep_us,
@@ -181,7 +180,7 @@ MetricRecorder::GCCycle GetCycleEventForMetricRecorder(
int64_t objects_freed_bytes, int64_t memory_before_bytes,
int64_t memory_after_bytes, int64_t memory_freed_bytes) {
MetricRecorder::GCCycle event;
- event.type = (type == StatsCollector::CollectionType::kMajor)
+ event.type = (type == CollectionType::kMajor)
? MetricRecorder::GCCycle::Type::kMajor
: MetricRecorder::GCCycle::Type::kMinor;
// MainThread.Incremental:
diff --git a/deps/v8/src/heap/cppgc/stats-collector.h b/deps/v8/src/heap/cppgc/stats-collector.h
index c78db86acf..ff040a3dcc 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.h
+++ b/deps/v8/src/heap/cppgc/stats-collector.h
@@ -33,6 +33,7 @@ namespace internal {
V(IncrementalSweep)
#define CPPGC_FOR_ALL_SCOPES(V) \
+ V(Unmark) \
V(MarkIncrementalStart) \
V(MarkIncrementalFinalize) \
V(MarkAtomicPrologue) \
@@ -52,9 +53,10 @@ namespace internal {
V(MarkVisitCrossThreadPersistents) \
V(MarkVisitStack) \
V(MarkVisitRememberedSets) \
+ V(SweepFinishIfOutOfWork) \
V(SweepInvokePreFinalizers) \
- V(SweepIdleStep) \
V(SweepInTask) \
+ V(SweepInTaskForStatistics) \
V(SweepOnAllocation) \
V(SweepFinalize)
@@ -67,12 +69,11 @@ namespace internal {
// Sink for various time and memory statistics.
class V8_EXPORT_PRIVATE StatsCollector final {
- using IsForcedGC = GarbageCollector::Config::IsForcedGC;
+ using IsForcedGC = GCConfig::IsForcedGC;
public:
- using CollectionType = GarbageCollector::Config::CollectionType;
- using MarkingType = GarbageCollector::Config::MarkingType;
- using SweepingType = GarbageCollector::Config::SweepingType;
+ using MarkingType = GCConfig::MarkingType;
+ using SweepingType = GCConfig::SweepingType;
#if defined(CPPGC_DECLARE_ENUM)
static_assert(false, "CPPGC_DECLARE_ENUM macro is already defined");
diff --git a/deps/v8/src/heap/cppgc/sweeper.cc b/deps/v8/src/heap/cppgc/sweeper.cc
index 23e684ed4d..3cb96f8baa 100644
--- a/deps/v8/src/heap/cppgc/sweeper.cc
+++ b/deps/v8/src/heap/cppgc/sweeper.cc
@@ -11,6 +11,7 @@
#include "include/cppgc/platform.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
+#include "src/base/platform/time.h"
#include "src/heap/cppgc/free-list.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-base.h"
@@ -25,13 +26,41 @@
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/task-handle.h"
-namespace cppgc {
-namespace internal {
+namespace cppgc::internal {
namespace {
+class DeadlineChecker final {
+ public:
+ explicit DeadlineChecker(v8::base::TimeTicks end) : end_(end) {}
+
+ bool Check() {
+ return (++count_ % kInterval == 0) && (end_ < v8::base::TimeTicks::Now());
+ }
+
+ private:
+ static constexpr size_t kInterval = 4;
+
+ const v8::base::TimeTicks end_;
+ size_t count_ = 0;
+};
+
using v8::base::Optional;
+enum class MutatorThreadSweepingMode {
+ kOnlyFinalizers,
+ kAll,
+};
+
+constexpr const char* ToString(MutatorThreadSweepingMode sweeping_mode) {
+ switch (sweeping_mode) {
+ case MutatorThreadSweepingMode::kAll:
+ return "all";
+ case MutatorThreadSweepingMode::kOnlyFinalizers:
+ return "only-finalizers";
+ }
+}
+
enum class StickyBits : uint8_t {
kDisabled,
kEnabled,
@@ -220,6 +249,9 @@ class InlinedFinalizationBuilderBase {
bool is_empty = false;
size_t largest_new_free_list_entry = 0;
};
+
+ protected:
+ ResultType result_;
};
// Builder that finalizes objects and adds freelist entries right away.
@@ -238,10 +270,13 @@ class InlinedFinalizationBuilder final : public InlinedFinalizationBuilderBase,
void AddFreeListEntry(Address start, size_t size) {
FreeHandler::Free({start, size});
+ result_.largest_new_free_list_entry =
+ std::max(result_.largest_new_free_list_entry, size);
}
- ResultType GetResult(bool is_empty, size_t largest_new_free_list_entry) {
- return {is_empty, largest_new_free_list_entry};
+ ResultType&& GetResult(bool is_empty) {
+ result_.is_empty = is_empty;
+ return std::move(result_);
}
};
@@ -282,12 +317,13 @@ class DeferredFinalizationBuilder final : public FreeHandler {
} else {
FreeHandler::Free({start, size});
}
+ result_.largest_new_free_list_entry =
+ std::max(result_.largest_new_free_list_entry, size);
found_finalizer_ = false;
}
- ResultType&& GetResult(bool is_empty, size_t largest_new_free_list_entry) {
+ ResultType&& GetResult(bool is_empty) {
result_.is_empty = is_empty;
- result_.largest_new_free_list_entry = largest_new_free_list_entry;
return std::move(result_);
}
@@ -305,7 +341,6 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
PlatformAwareObjectStartBitmap& bitmap = page->object_start_bitmap();
- size_t largest_new_free_list_entry = 0;
size_t live_bytes = 0;
Address start_of_gap = page->PayloadStart();
@@ -346,12 +381,10 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
// The object is alive.
const Address header_address = reinterpret_cast<Address>(header);
if (start_of_gap != header_address) {
- size_t new_free_list_entry_size =
+ const size_t new_free_list_entry_size =
static_cast<size_t>(header_address - start_of_gap);
builder.AddFreeListEntry(start_of_gap, new_free_list_entry_size);
DCHECK(bitmap.CheckBit<AccessMode::kAtomic>(start_of_gap));
- largest_new_free_list_entry =
- std::max(largest_new_free_list_entry, new_free_list_entry_size);
}
StickyUnmark(header, sticky_bits);
begin += size;
@@ -368,7 +401,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
page->SetAllocatedBytesAtLastGC(live_bytes);
const bool is_empty = (start_of_gap == page->PayloadStart());
- return builder.GetResult(is_empty, largest_new_free_list_entry);
+ return builder.GetResult(is_empty);
}
// SweepFinalizer is responsible for heap/space/page finalization. Finalization
@@ -377,7 +410,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(
// - returns (unmaps) empty pages;
// - merges freelists to the space's freelist.
class SweepFinalizer final {
- using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
+ using FreeMemoryHandling = SweepingConfig::FreeMemoryHandling;
public:
SweepFinalizer(cppgc::Platform* platform,
@@ -397,20 +430,13 @@ class SweepFinalizer final {
}
bool FinalizeSpaceWithDeadline(SpaceState* space_state,
- double deadline_in_seconds) {
+ v8::base::TimeTicks deadline) {
DCHECK(platform_);
- static constexpr size_t kDeadlineCheckInterval = 8;
- size_t page_count = 1;
-
+ DeadlineChecker deadline_check(deadline);
while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
FinalizePage(&*page_state);
- if (page_count % kDeadlineCheckInterval == 0 &&
- deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
- return false;
- }
-
- page_count++;
+ if (deadline_check.Check()) return false;
}
return true;
@@ -488,7 +514,7 @@ class SweepFinalizer final {
class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
friend class HeapVisitor<MutatorThreadSweeper>;
- using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
+ using FreeMemoryHandling = SweepingConfig::FreeMemoryHandling;
public:
MutatorThreadSweeper(HeapBase* heap, SpaceStates* states,
@@ -511,25 +537,23 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
void SweepPage(BasePage& page) { Traverse(page); }
- bool SweepWithDeadline(double deadline_in_seconds) {
+ bool SweepWithDeadline(v8::base::TimeDelta max_duration,
+ MutatorThreadSweepingMode sweeping_mode) {
DCHECK(platform_);
- static constexpr double kSlackInSeconds = 0.001;
for (SpaceState& state : *states_) {
- // FinalizeSpaceWithDeadline() and SweepSpaceWithDeadline() won't check
- // the deadline until it sweeps 10 pages. So we give a small slack for
- // safety.
- const double remaining_budget = deadline_in_seconds - kSlackInSeconds -
- platform_->MonotonicallyIncreasingTime();
- if (remaining_budget <= 0.) return false;
+ const auto deadline = v8::base::TimeTicks::Now() + max_duration;
// First, prioritize finalization of pages that were swept concurrently.
SweepFinalizer finalizer(platform_, free_memory_handling_);
- if (!finalizer.FinalizeSpaceWithDeadline(&state, deadline_in_seconds)) {
+ if (!finalizer.FinalizeSpaceWithDeadline(&state, deadline)) {
return false;
}
+ if (sweeping_mode == MutatorThreadSweepingMode::kOnlyFinalizers)
+ return false;
+
// Help out the concurrent sweeper.
- if (!SweepSpaceWithDeadline(&state, deadline_in_seconds)) {
+ if (!SweepSpaceWithDeadline(&state, deadline)) {
return false;
}
}
@@ -541,16 +565,11 @@ class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
}
private:
- bool SweepSpaceWithDeadline(SpaceState* state, double deadline_in_seconds) {
- static constexpr size_t kDeadlineCheckInterval = 8;
- size_t page_count = 1;
+ bool SweepSpaceWithDeadline(SpaceState* state, v8::base::TimeTicks deadline) {
+ DeadlineChecker deadline_check(deadline);
while (auto page = state->unswept_pages.Pop()) {
Traverse(**page);
- if (page_count % kDeadlineCheckInterval == 0 &&
- deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
- return false;
- }
- page_count++;
+ if (deadline_check.Check()) return false;
}
return true;
@@ -603,7 +622,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
private HeapVisitor<ConcurrentSweepTask> {
friend class HeapVisitor<ConcurrentSweepTask>;
- using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
+ using FreeMemoryHandling = SweepingConfig::FreeMemoryHandling;
public:
ConcurrentSweepTask(HeapBase& heap, SpaceStates* states, Platform* platform,
@@ -693,8 +712,7 @@ class ConcurrentSweepTask final : public cppgc::JobTask,
class PrepareForSweepVisitor final
: protected HeapVisitor<PrepareForSweepVisitor> {
friend class HeapVisitor<PrepareForSweepVisitor>;
- using CompactableSpaceHandling =
- Sweeper::SweepingConfig::CompactableSpaceHandling;
+ using CompactableSpaceHandling = SweepingConfig::CompactableSpaceHandling;
public:
PrepareForSweepVisitor(SpaceStates* states,
@@ -746,7 +764,7 @@ class PrepareForSweepVisitor final
} // namespace
class Sweeper::SweeperImpl final {
- using FreeMemoryHandling = Sweeper::SweepingConfig::FreeMemoryHandling;
+ using FreeMemoryHandling = SweepingConfig::FreeMemoryHandling;
public:
SweeperImpl(RawHeap& heap, StatsCollector* stats_collector)
@@ -787,7 +805,8 @@ class Sweeper::SweeperImpl final {
}
}
- bool SweepForAllocationIfRunning(NormalPageSpace* space, size_t size) {
+ bool SweepForAllocationIfRunning(NormalPageSpace* space, size_t size,
+ v8::base::TimeDelta max_duration) {
if (!is_in_progress_) return false;
// Bail out for recursive sweeping calls. This can happen when finalizers
@@ -808,14 +827,19 @@ class Sweeper::SweeperImpl final {
StatsCollector::EnabledScope inner_scope(
stats_collector_, StatsCollector::kSweepOnAllocation);
MutatorThreadSweepingScope sweeping_in_progress(*this);
-
+ DeadlineChecker deadline_check(v8::base::TimeTicks::Now() + max_duration);
{
// First, process unfinalized pages as finalizing a page is faster than
// sweeping.
SweepFinalizer finalizer(platform_, config_.free_memory_handling);
while (auto page = space_state.swept_unfinalized_pages.Pop()) {
finalizer.FinalizePage(&*page);
- if (size <= finalizer.largest_new_free_list_entry()) return true;
+ if (size <= finalizer.largest_new_free_list_entry()) {
+ return true;
+ }
+ if (deadline_check.Check()) {
+ return false;
+ }
}
}
{
@@ -825,19 +849,24 @@ class Sweeper::SweeperImpl final {
config_.free_memory_handling);
while (auto page = space_state.unswept_pages.Pop()) {
sweeper.SweepPage(**page);
- if (size <= sweeper.largest_new_free_list_entry()) return true;
+ if (size <= sweeper.largest_new_free_list_entry()) {
+ return true;
+ }
+ if (deadline_check.Check()) {
+ return false;
+ }
}
}
return false;
}
- void FinishIfRunning() {
- if (!is_in_progress_) return;
+ bool FinishIfRunning() {
+ if (!is_in_progress_) return false;
// Bail out for recursive sweeping calls. This can happen when finalizers
// allocate new memory.
- if (is_sweeping_on_mutator_thread_) return;
+ if (is_sweeping_on_mutator_thread_) return false;
{
StatsCollector::EnabledScope stats_scope(
@@ -852,12 +881,22 @@ class Sweeper::SweeperImpl final {
Finish();
}
NotifyDone();
+ return true;
+ }
+
+ bool IsConcurrentSweepingDone() const {
+ return !concurrent_sweeper_handle_ ||
+ (concurrent_sweeper_handle_->IsValid() &&
+ !concurrent_sweeper_handle_->IsActive());
}
void FinishIfOutOfWork() {
if (is_in_progress_ && !is_sweeping_on_mutator_thread_ &&
concurrent_sweeper_handle_ && concurrent_sweeper_handle_->IsValid() &&
!concurrent_sweeper_handle_->IsActive()) {
+ StatsCollector::EnabledScope stats_scope(
+ stats_collector_, StatsCollector::kSweepFinishIfOutOfWork);
+ MutatorThreadSweepingScope sweeping_in_progress(*this);
// At this point we know that the concurrent sweeping task has run
// out-of-work: all pages are swept. The main thread still needs to finish
// sweeping though.
@@ -865,8 +904,18 @@ class Sweeper::SweeperImpl final {
[](const SpaceState& state) {
return state.unswept_pages.IsEmpty();
}));
- FinishIfRunning();
+
+ // There may be unfinalized pages left. Since it's hard to estimate
+ // the actual amount of sweeping necessary, we sweep with a small
+ // deadline to see if sweeping can be fully finished.
+ MutatorThreadSweeper sweeper(heap_.heap(), &space_states_, platform_,
+ config_.free_memory_handling);
+ if (sweeper.SweepWithDeadline(v8::base::TimeDelta::FromMilliseconds(2),
+ MutatorThreadSweepingMode::kAll)) {
+ FinalizeSweep();
+ }
}
+ NotifyDoneIfNeeded();
}
void Finish() {
@@ -920,8 +969,9 @@ class Sweeper::SweeperImpl final {
bool IsSweepingInProgress() const { return is_in_progress_; }
- bool PerformSweepOnMutatorThread(double deadline_in_seconds,
- StatsCollector::ScopeId internal_scope_id) {
+ bool PerformSweepOnMutatorThread(v8::base::TimeDelta max_duration,
+ StatsCollector::ScopeId internal_scope_id,
+ MutatorThreadSweepingMode sweeping_mode) {
if (!is_in_progress_) return true;
MutatorThreadSweepingScope sweeping_in_progress(*this);
@@ -935,10 +985,10 @@ class Sweeper::SweeperImpl final {
config_.free_memory_handling);
{
StatsCollector::EnabledScope inner_stats_scope(
- stats_collector_, internal_scope_id, "deltaInSeconds",
- deadline_in_seconds - platform_->MonotonicallyIncreasingTime());
-
- sweep_complete = sweeper.SweepWithDeadline(deadline_in_seconds);
+ stats_collector_, internal_scope_id, "max_duration_ms",
+ max_duration.InMillisecondsF(), "sweeping_mode",
+ ToString(sweeping_mode));
+ sweep_complete = sweeper.SweepWithDeadline(max_duration, sweeping_mode);
}
if (sweep_complete) {
FinalizeSweep();
@@ -948,6 +998,23 @@ class Sweeper::SweeperImpl final {
return sweep_complete;
}
+ void AddMutatorThreadSweepingObserver(
+ Sweeper::SweepingOnMutatorThreadObserver* observer) {
+ DCHECK_EQ(mutator_thread_sweeping_observers_.end(),
+ std::find(mutator_thread_sweeping_observers_.begin(),
+ mutator_thread_sweeping_observers_.end(), observer));
+ mutator_thread_sweeping_observers_.push_back(observer);
+ }
+
+ void RemoveMutatorThreadSweepingObserver(
+ Sweeper::SweepingOnMutatorThreadObserver* observer) {
+ const auto it =
+ std::find(mutator_thread_sweeping_observers_.begin(),
+ mutator_thread_sweeping_observers_.end(), observer);
+ DCHECK_NE(mutator_thread_sweeping_observers_.end(), it);
+ mutator_thread_sweeping_observers_.erase(it);
+ }
+
private:
class MutatorThreadSweepingScope final {
public:
@@ -955,9 +1022,15 @@ class Sweeper::SweeperImpl final {
: sweeper_(sweeper) {
DCHECK(!sweeper_.is_sweeping_on_mutator_thread_);
sweeper_.is_sweeping_on_mutator_thread_ = true;
+ for (auto* observer : sweeper_.mutator_thread_sweeping_observers_) {
+ observer->Start();
+ }
}
~MutatorThreadSweepingScope() {
sweeper_.is_sweeping_on_mutator_thread_ = false;
+ for (auto* observer : sweeper_.mutator_thread_sweeping_observers_) {
+ observer->End();
+ }
}
MutatorThreadSweepingScope(const MutatorThreadSweepingScope&) = delete;
@@ -968,33 +1041,37 @@ class Sweeper::SweeperImpl final {
SweeperImpl& sweeper_;
};
- class IncrementalSweepTask : public cppgc::IdleTask {
+ class IncrementalSweepTask final : public cppgc::Task {
public:
using Handle = SingleThreadedHandle;
- explicit IncrementalSweepTask(SweeperImpl* sweeper)
+ explicit IncrementalSweepTask(SweeperImpl& sweeper)
: sweeper_(sweeper), handle_(Handle::NonEmptyTag{}) {}
- static Handle Post(SweeperImpl* sweeper, cppgc::TaskRunner* runner) {
+ static Handle Post(SweeperImpl& sweeper, cppgc::TaskRunner* runner) {
auto task = std::make_unique<IncrementalSweepTask>(sweeper);
auto handle = task->GetHandle();
- runner->PostIdleTask(std::move(task));
+ runner->PostTask(std::move(task));
return handle;
}
private:
- void Run(double deadline_in_seconds) override {
+ void Run() override {
if (handle_.IsCanceled()) return;
- if (!sweeper_->PerformSweepOnMutatorThread(
- deadline_in_seconds, StatsCollector::kSweepIdleStep)) {
- sweeper_->ScheduleIncrementalSweeping();
+ if (!sweeper_.PerformSweepOnMutatorThread(
+ v8::base::TimeDelta::FromMilliseconds(5),
+ StatsCollector::kSweepInTask,
+ sweeper_.IsConcurrentSweepingDone()
+ ? MutatorThreadSweepingMode::kAll
+ : MutatorThreadSweepingMode::kOnlyFinalizers)) {
+ sweeper_.ScheduleIncrementalSweeping();
}
}
Handle GetHandle() const { return handle_; }
- SweeperImpl* sweeper_;
+ SweeperImpl& sweeper_;
// TODO(chromium:1056170): Change to CancelableTask.
Handle handle_;
};
@@ -1002,10 +1079,10 @@ class Sweeper::SweeperImpl final {
void ScheduleIncrementalSweeping() {
DCHECK(platform_);
auto runner = platform_->GetForegroundTaskRunner();
- if (!runner || !runner->IdleTasksEnabled()) return;
+ if (!runner) return;
incremental_sweeper_handle_ =
- IncrementalSweepTask::Post(this, runner.get());
+ IncrementalSweepTask::Post(*this, runner.get());
}
void ScheduleConcurrentSweeping() {
@@ -1042,6 +1119,8 @@ class Sweeper::SweeperImpl final {
SweepingConfig config_;
IncrementalSweepTask::Handle incremental_sweeper_handle_;
std::unique_ptr<cppgc::JobHandle> concurrent_sweeper_handle_;
+ std::vector<Sweeper::SweepingOnMutatorThreadObserver*>
+ mutator_thread_sweeping_observers_;
// Indicates whether the sweeping phase is in progress.
bool is_in_progress_ = false;
bool notify_done_pending_ = false;
@@ -1060,14 +1139,16 @@ Sweeper::~Sweeper() = default;
void Sweeper::Start(SweepingConfig config) {
impl_->Start(config, heap_.platform());
}
-void Sweeper::FinishIfRunning() { impl_->FinishIfRunning(); }
+
+bool Sweeper::FinishIfRunning() { return impl_->FinishIfRunning(); }
void Sweeper::FinishIfOutOfWork() { impl_->FinishIfOutOfWork(); }
void Sweeper::WaitForConcurrentSweepingForTesting() {
impl_->WaitForConcurrentSweepingForTesting();
}
void Sweeper::NotifyDoneIfNeeded() { impl_->NotifyDoneIfNeeded(); }
-bool Sweeper::SweepForAllocationIfRunning(NormalPageSpace* space, size_t size) {
- return impl_->SweepForAllocationIfRunning(space, size);
+bool Sweeper::SweepForAllocationIfRunning(NormalPageSpace* space, size_t size,
+ v8::base::TimeDelta max_duration) {
+ return impl_->SweepForAllocationIfRunning(space, size, max_duration);
}
bool Sweeper::IsSweepingOnMutatorThread() const {
return impl_->IsSweepingOnMutatorThread();
@@ -1077,10 +1158,20 @@ bool Sweeper::IsSweepingInProgress() const {
return impl_->IsSweepingInProgress();
}
-bool Sweeper::PerformSweepOnMutatorThread(double deadline_in_seconds) {
- return impl_->PerformSweepOnMutatorThread(deadline_in_seconds,
- StatsCollector::kSweepInTask);
+bool Sweeper::PerformSweepOnMutatorThread(v8::base::TimeDelta max_duration,
+ StatsCollector::ScopeId scope_id) {
+ return impl_->PerformSweepOnMutatorThread(max_duration, scope_id,
+ MutatorThreadSweepingMode::kAll);
+}
+
+Sweeper::SweepingOnMutatorThreadObserver::SweepingOnMutatorThreadObserver(
+ Sweeper& sweeper)
+ : sweeper_(sweeper) {
+ sweeper_.impl_->AddMutatorThreadSweepingObserver(this);
+}
+
+Sweeper::SweepingOnMutatorThreadObserver::~SweepingOnMutatorThreadObserver() {
+ sweeper_.impl_->RemoveMutatorThreadSweepingObserver(this);
}
-} // namespace internal
-} // namespace cppgc
+} // namespace cppgc::internal
diff --git a/deps/v8/src/heap/cppgc/sweeper.h b/deps/v8/src/heap/cppgc/sweeper.h
index 845dfbbfc1..95b61729b8 100644
--- a/deps/v8/src/heap/cppgc/sweeper.h
+++ b/deps/v8/src/heap/cppgc/sweeper.h
@@ -7,16 +7,13 @@
#include <memory>
-#include "include/cppgc/heap.h"
#include "src/base/macros.h"
#include "src/base/platform/time.h"
+#include "src/heap/cppgc/heap-config.h"
#include "src/heap/cppgc/memory.h"
+#include "src/heap/cppgc/stats-collector.h"
-namespace cppgc {
-
-class Platform;
-
-namespace internal {
+namespace cppgc::internal {
class HeapBase;
class ConcurrentSweeperTest;
@@ -24,15 +21,16 @@ class NormalPageSpace;
class V8_EXPORT_PRIVATE Sweeper final {
public:
- struct SweepingConfig {
- using SweepingType = cppgc::Heap::SweepingType;
- enum class CompactableSpaceHandling { kSweep, kIgnore };
- enum class FreeMemoryHandling { kDoNotDiscard, kDiscardWherePossible };
-
- SweepingType sweeping_type = SweepingType::kIncrementalAndConcurrent;
- CompactableSpaceHandling compactable_space_handling =
- CompactableSpaceHandling::kSweep;
- FreeMemoryHandling free_memory_handling = FreeMemoryHandling::kDoNotDiscard;
+ class V8_EXPORT_PRIVATE SweepingOnMutatorThreadObserver {
+ public:
+ explicit SweepingOnMutatorThreadObserver(Sweeper&);
+ virtual ~SweepingOnMutatorThreadObserver();
+
+ virtual void Start() = 0;
+ virtual void End() = 0;
+
+ private:
+ Sweeper& sweeper_;
};
static constexpr bool CanDiscardMemory() {
@@ -47,19 +45,24 @@ class V8_EXPORT_PRIVATE Sweeper final {
// Sweeper::Start assumes the heap holds no linear allocation buffers.
void Start(SweepingConfig);
- void FinishIfRunning();
+ // Returns true when sweeping was finished and false if it was not running or
+ // couldn't be finished due to being a recursive sweep call.
+ bool FinishIfRunning();
void FinishIfOutOfWork();
void NotifyDoneIfNeeded();
- // SweepForAllocationIfRunning sweeps the given |space| until a slot that can
- // fit an allocation of size |size| is found. Returns true if a slot was
- // found.
- bool SweepForAllocationIfRunning(NormalPageSpace* space, size_t size);
+ // SweepForAllocationIfRunning sweeps the given `space` until a slot that can
+ // fit an allocation of `min_wanted_size` bytes is found. Returns true if a
+ // slot was found. Aborts after `max_duration`.
+ bool SweepForAllocationIfRunning(NormalPageSpace* space,
+ size_t min_wanted_size,
+ v8::base::TimeDelta max_duration);
bool IsSweepingOnMutatorThread() const;
bool IsSweepingInProgress() const;
// Assist with sweeping. Returns true if sweeping is done.
- bool PerformSweepOnMutatorThread(double deadline_in_seconds);
+ bool PerformSweepOnMutatorThread(v8::base::TimeDelta max_duration,
+ StatsCollector::ScopeId);
private:
void WaitForConcurrentSweepingForTesting();
@@ -72,7 +75,6 @@ class V8_EXPORT_PRIVATE Sweeper final {
friend class ConcurrentSweeperTest;
};
-} // namespace internal
-} // namespace cppgc
+} // namespace cppgc::internal
#endif // V8_HEAP_CPPGC_SWEEPER_H_
diff --git a/deps/v8/src/heap/cppgc/write-barrier.cc b/deps/v8/src/heap/cppgc/write-barrier.cc
index 098f950d2a..5cbec656a9 100644
--- a/deps/v8/src/heap/cppgc/write-barrier.cc
+++ b/deps/v8/src/heap/cppgc/write-barrier.cc
@@ -187,24 +187,6 @@ void WriteBarrier::CheckParams(Type expected_type, const Params& params) {
}
#endif // V8_ENABLE_CHECKS
-// static
-bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(const void* object,
- HeapHandle** handle) {
- // Large objects cannot have mixins, so we are guaranteed to always have
- // a pointer on the same page.
- const auto* page = BasePage::FromPayload(object);
- *handle = &page->heap();
- const MarkerBase* marker = page->heap().marker();
- return marker && marker->IsMarking();
-}
-
-// static
-bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(HeapHandle& heap_handle) {
- const auto& heap_base = internal::HeapBase::From(heap_handle);
- const MarkerBase* marker = heap_base.marker();
- return marker && marker->IsMarking();
-}
-
#if defined(CPPGC_YOUNG_GENERATION)
// static
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index 8bb5bcad4e..ceac516f9c 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -45,9 +45,8 @@ CppHeap::GarbageCollectionFlags ConvertTraceFlags(
void LocalEmbedderHeapTracer::PrepareForTrace(
EmbedderHeapTracer::TraceFlags flags) {
if (cpp_heap_)
- cpp_heap()->InitializeTracing(
- cppgc::internal::GarbageCollector::Config::CollectionType::kMajor,
- ConvertTraceFlags(flags));
+ cpp_heap()->InitializeTracing(cppgc::internal::CollectionType::kMajor,
+ ConvertTraceFlags(flags));
}
void LocalEmbedderHeapTracer::TracePrologue(
diff --git a/deps/v8/src/heap/evacuation-allocator-inl.h b/deps/v8/src/heap/evacuation-allocator-inl.h
index b474664a62..8d83eaad1c 100644
--- a/deps/v8/src/heap/evacuation-allocator-inl.h
+++ b/deps/v8/src/heap/evacuation-allocator-inl.h
@@ -16,6 +16,7 @@ AllocationResult EvacuationAllocator::Allocate(AllocationSpace space,
int object_size,
AllocationOrigin origin,
AllocationAlignment alignment) {
+ object_size = ALIGN_TO_ALLOCATION_ALIGNMENT(object_size);
switch (space) {
case NEW_SPACE:
return AllocateInNewSpace(object_size, origin, alignment);
@@ -28,6 +29,9 @@ AllocationResult EvacuationAllocator::Allocate(AllocationSpace space,
case CODE_SPACE:
return compaction_spaces_.Get(CODE_SPACE)
->AllocateRaw(object_size, alignment, origin);
+ case SHARED_SPACE:
+ return compaction_spaces_.Get(SHARED_SPACE)
+ ->AllocateRaw(object_size, alignment, origin);
default:
UNREACHABLE();
}
@@ -35,15 +39,19 @@ AllocationResult EvacuationAllocator::Allocate(AllocationSpace space,
void EvacuationAllocator::FreeLast(AllocationSpace space, HeapObject object,
int object_size) {
+ object_size = ALIGN_TO_ALLOCATION_ALIGNMENT(object_size);
switch (space) {
case NEW_SPACE:
FreeLastInNewSpace(object, object_size);
return;
case OLD_SPACE:
- FreeLastInOldSpace(object, object_size);
+ FreeLastInCompactionSpace(OLD_SPACE, object, object_size);
return;
case MAP_SPACE:
- FreeLastInMapSpace(object, object_size);
+ FreeLastInCompactionSpace(MAP_SPACE, object, object_size);
+ return;
+ case SHARED_SPACE:
+ FreeLastInCompactionSpace(SHARED_SPACE, object, object_size);
return;
default:
// Only new and old space supported.
@@ -59,19 +67,11 @@ void EvacuationAllocator::FreeLastInNewSpace(HeapObject object,
}
}
-void EvacuationAllocator::FreeLastInOldSpace(HeapObject object,
- int object_size) {
- if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object.address(),
- object_size)) {
- // We couldn't free the last object so we have to write a proper filler.
- heap_->CreateFillerObjectAt(object.address(), object_size);
- }
-}
-
-void EvacuationAllocator::FreeLastInMapSpace(HeapObject object,
- int object_size) {
- if (!compaction_spaces_.Get(MAP_SPACE)->TryFreeLast(object.address(),
- object_size)) {
+void EvacuationAllocator::FreeLastInCompactionSpace(AllocationSpace space,
+ HeapObject object,
+ int object_size) {
+ if (!compaction_spaces_.Get(space)->TryFreeLast(object.address(),
+ object_size)) {
// We couldn't free the last object so we have to write a proper filler.
heap_->CreateFillerObjectAt(object.address(), object_size);
}
diff --git a/deps/v8/src/heap/evacuation-allocator.h b/deps/v8/src/heap/evacuation-allocator.h
index 6dbeab1b29..14f5cb0a1f 100644
--- a/deps/v8/src/heap/evacuation-allocator.h
+++ b/deps/v8/src/heap/evacuation-allocator.h
@@ -39,6 +39,10 @@ class EvacuationAllocator {
heap_->map_space()->MergeCompactionSpace(
compaction_spaces_.Get(MAP_SPACE));
}
+ if (heap_->shared_space()) {
+ heap_->shared_space()->MergeCompactionSpace(
+ compaction_spaces_.Get(SHARED_SPACE));
+ }
// Give back remaining LAB space if this EvacuationAllocator's new space LAB
// sits right next to new space allocation top.
@@ -60,8 +64,8 @@ class EvacuationAllocator {
inline AllocationResult AllocateInLAB(int object_size,
AllocationAlignment alignment);
inline void FreeLastInNewSpace(HeapObject object, int object_size);
- inline void FreeLastInOldSpace(HeapObject object, int object_size);
- inline void FreeLastInMapSpace(HeapObject object, int object_size);
+ inline void FreeLastInCompactionSpace(AllocationSpace space,
+ HeapObject object, int object_size);
Heap* const heap_;
NewSpace* const new_space_;
diff --git a/deps/v8/src/heap/evacuation-verifier-inl.h b/deps/v8/src/heap/evacuation-verifier-inl.h
new file mode 100644
index 0000000000..cf1eee1351
--- /dev/null
+++ b/deps/v8/src/heap/evacuation-verifier-inl.h
@@ -0,0 +1,64 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_EVACUATION_VERIFIER_INL_H_
+#define V8_HEAP_EVACUATION_VERIFIER_INL_H_
+
+#include "src/heap/evacuation-verifier.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/mark-compact.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef VERIFY_HEAP
+
+void FullEvacuationVerifier::VerifyHeapObjectImpl(HeapObject heap_object) {
+ if (!ShouldVerifyObject(heap_object)) return;
+ CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
+ Heap::InToPage(heap_object));
+ CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
+}
+
+bool FullEvacuationVerifier::ShouldVerifyObject(HeapObject heap_object) {
+ const bool in_shared_heap = heap_object.InSharedWritableHeap();
+ return heap_->isolate()->is_shared_heap_isolate() ? in_shared_heap
+ : !in_shared_heap;
+}
+
+template <typename TSlot>
+void FullEvacuationVerifier::VerifyPointersImpl(TSlot start, TSlot end) {
+ for (TSlot current = start; current < end; ++current) {
+ typename TSlot::TObject object = current.load(cage_base());
+ HeapObject heap_object;
+ if (object.GetHeapObjectIfStrong(&heap_object)) {
+ VerifyHeapObjectImpl(heap_object);
+ }
+ }
+}
+
+void YoungGenerationEvacuationVerifier::VerifyHeapObjectImpl(
+ HeapObject heap_object) {
+ CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
+ Heap::InToPage(heap_object));
+}
+
+template <typename TSlot>
+void YoungGenerationEvacuationVerifier::VerifyPointersImpl(TSlot start,
+ TSlot end) {
+ for (TSlot current = start; current < end; ++current) {
+ typename TSlot::TObject object = current.load(cage_base());
+ HeapObject heap_object;
+ if (object.GetHeapObject(&heap_object)) {
+ VerifyHeapObjectImpl(heap_object);
+ }
+ }
+}
+
+#endif // VERIFY_HEAP
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_EVACUATION_VERIFIER_INL_H_
diff --git a/deps/v8/src/heap/evacuation-verifier.cc b/deps/v8/src/heap/evacuation-verifier.cc
new file mode 100644
index 0000000000..2396e73f36
--- /dev/null
+++ b/deps/v8/src/heap/evacuation-verifier.cc
@@ -0,0 +1,179 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/reloc-info.h"
+#include "src/heap/evacuation-verifier-inl.h"
+#include "src/objects/map-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef VERIFY_HEAP
+
+EvacuationVerifier::EvacuationVerifier(Heap* heap)
+ : ObjectVisitorWithCageBases(heap), heap_(heap) {}
+
+void EvacuationVerifier::VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) {
+ VerifyPointers(start, end);
+}
+
+void EvacuationVerifier::VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) {
+ VerifyPointers(start, end);
+}
+
+void EvacuationVerifier::VisitCodePointer(HeapObject host,
+ CodeObjectSlot slot) {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ VerifyCodePointer(slot);
+}
+
+void EvacuationVerifier::VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start,
+ FullObjectSlot end) {
+ VerifyRootPointers(start, end);
+}
+
+void EvacuationVerifier::VisitMapPointer(HeapObject object) {
+ VerifyMap(object.map(cage_base()));
+}
+void EvacuationVerifier::VerifyRoots() {
+ heap_->IterateRootsIncludingClients(this,
+ base::EnumSet<SkipRoot>{SkipRoot::kWeak});
+}
+
+void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
+ Address current = start;
+ while (current < end) {
+ HeapObject object = HeapObject::FromAddress(current);
+ if (!object.IsFreeSpaceOrFiller(cage_base())) {
+ object.Iterate(cage_base(), this);
+ }
+ current += ALIGN_TO_ALLOCATION_ALIGNMENT(object.Size(cage_base()));
+ }
+}
+
+void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
+ if (!space) return;
+ if (v8_flags.minor_mc) {
+ VerifyEvacuation(PagedNewSpace::From(space)->paged_space());
+ return;
+ }
+ PageRange range(space->first_allocatable_address(), space->top());
+ for (auto it = range.begin(); it != range.end();) {
+ Page* page = *(it++);
+ Address current = page->area_start();
+ Address limit = it != range.end() ? page->area_end() : space->top();
+ CHECK(limit == space->top() || !page->Contains(space->top()));
+ VerifyEvacuationOnPage(current, limit);
+ }
+}
+
+void EvacuationVerifier::VerifyEvacuation(PagedSpaceBase* space) {
+ for (Page* p : *space) {
+ if (p->IsEvacuationCandidate()) continue;
+ if (p->Contains(space->top())) {
+ CodePageMemoryModificationScope memory_modification_scope(p);
+ heap_->CreateFillerObjectAt(
+ space->top(), static_cast<int>(space->limit() - space->top()));
+ }
+ VerifyEvacuationOnPage(p->area_start(), p->area_end());
+ }
+}
+
+FullEvacuationVerifier::FullEvacuationVerifier(Heap* heap)
+ : EvacuationVerifier(heap) {}
+
+void FullEvacuationVerifier::Run() {
+ DCHECK(!heap_->sweeping_in_progress());
+ VerifyRoots();
+ VerifyEvacuation(heap_->new_space());
+ VerifyEvacuation(heap_->old_space());
+ VerifyEvacuation(heap_->code_space());
+ if (heap_->shared_space()) VerifyEvacuation(heap_->shared_space());
+ if (heap_->map_space()) VerifyEvacuation(heap_->map_space());
+}
+
+void FullEvacuationVerifier::VerifyMap(Map map) { VerifyHeapObjectImpl(map); }
+void FullEvacuationVerifier::VerifyPointers(ObjectSlot start, ObjectSlot end) {
+ VerifyPointersImpl(start, end);
+}
+void FullEvacuationVerifier::VerifyPointers(MaybeObjectSlot start,
+ MaybeObjectSlot end) {
+ VerifyPointersImpl(start, end);
+}
+void FullEvacuationVerifier::VerifyCodePointer(CodeObjectSlot slot) {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ Object maybe_code = slot.load(code_cage_base());
+ HeapObject code;
+ // The slot might contain smi during CodeDataContainer creation, so skip it.
+ if (maybe_code.GetHeapObject(&code)) {
+ VerifyHeapObjectImpl(code);
+ }
+}
+void FullEvacuationVerifier::VisitCodeTarget(Code host, RelocInfo* rinfo) {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VerifyHeapObjectImpl(target);
+}
+void FullEvacuationVerifier::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
+ VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
+}
+void FullEvacuationVerifier::VerifyRootPointers(FullObjectSlot start,
+ FullObjectSlot end) {
+ VerifyPointersImpl(start, end);
+}
+
+YoungGenerationEvacuationVerifier::YoungGenerationEvacuationVerifier(Heap* heap)
+ : EvacuationVerifier(heap) {}
+
+void YoungGenerationEvacuationVerifier::YoungGenerationEvacuationVerifier::
+ Run() {
+ DCHECK(!heap_->sweeping_in_progress());
+ VerifyRoots();
+ VerifyEvacuation(heap_->new_space());
+ VerifyEvacuation(heap_->old_space());
+ VerifyEvacuation(heap_->code_space());
+ if (heap_->map_space()) VerifyEvacuation(heap_->map_space());
+}
+
+void YoungGenerationEvacuationVerifier::VerifyMap(Map map) {
+ VerifyHeapObjectImpl(map);
+}
+void YoungGenerationEvacuationVerifier::VerifyPointers(ObjectSlot start,
+ ObjectSlot end) {
+ VerifyPointersImpl(start, end);
+}
+void YoungGenerationEvacuationVerifier::VerifyPointers(MaybeObjectSlot start,
+ MaybeObjectSlot end) {
+ VerifyPointersImpl(start, end);
+}
+void YoungGenerationEvacuationVerifier::VerifyCodePointer(CodeObjectSlot slot) {
+ CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
+ Object maybe_code = slot.load(code_cage_base());
+ HeapObject code;
+ // The slot might contain smi during CodeDataContainer creation, so skip it.
+ if (maybe_code.GetHeapObject(&code)) {
+ VerifyHeapObjectImpl(code);
+ }
+}
+void YoungGenerationEvacuationVerifier::VisitCodeTarget(Code host,
+ RelocInfo* rinfo) {
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ VerifyHeapObjectImpl(target);
+}
+void YoungGenerationEvacuationVerifier::VisitEmbeddedPointer(Code host,
+ RelocInfo* rinfo) {
+ VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
+}
+void YoungGenerationEvacuationVerifier::VerifyRootPointers(FullObjectSlot start,
+ FullObjectSlot end) {
+ VerifyPointersImpl(start, end);
+}
+
+#endif // VERIFY_HEAP
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/evacuation-verifier.h b/deps/v8/src/heap/evacuation-verifier.h
new file mode 100644
index 0000000000..3aa4702eaa
--- /dev/null
+++ b/deps/v8/src/heap/evacuation-verifier.h
@@ -0,0 +1,104 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_EVACUATION_VERIFIER_H_
+#define V8_HEAP_EVACUATION_VERIFIER_H_
+
+#include "src/heap/new-spaces.h"
+#include "src/heap/paged-spaces.h"
+#include "src/objects/map.h"
+#include "src/objects/visitors.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef VERIFY_HEAP
+
+class EvacuationVerifier : public ObjectVisitorWithCageBases,
+ public RootVisitor {
+ public:
+ virtual void Run() = 0;
+
+ void VisitPointers(HeapObject host, ObjectSlot start,
+ ObjectSlot end) override;
+
+ void VisitPointers(HeapObject host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override;
+
+ void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override;
+
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) override;
+
+ void VisitMapPointer(HeapObject object) override;
+
+ protected:
+ explicit EvacuationVerifier(Heap* heap);
+
+ inline Heap* heap() { return heap_; }
+
+ virtual void VerifyMap(Map map) = 0;
+ virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
+ virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
+ virtual void VerifyCodePointer(CodeObjectSlot slot) = 0;
+ virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
+
+ void VerifyRoots();
+ void VerifyEvacuationOnPage(Address start, Address end);
+ void VerifyEvacuation(NewSpace* new_space);
+ void VerifyEvacuation(PagedSpaceBase* paged_space);
+
+ Heap* heap_;
+};
+
+class FullEvacuationVerifier : public EvacuationVerifier {
+ public:
+ explicit FullEvacuationVerifier(Heap* heap);
+
+ void Run() override;
+
+ protected:
+ V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object);
+
+ V8_INLINE bool ShouldVerifyObject(HeapObject heap_object);
+
+ template <typename TSlot>
+ void VerifyPointersImpl(TSlot start, TSlot end);
+
+ void VerifyMap(Map map) override;
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override;
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override;
+ void VerifyCodePointer(CodeObjectSlot slot) override;
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override;
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override;
+ void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override;
+};
+
+class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
+ public:
+ explicit YoungGenerationEvacuationVerifier(Heap* heap);
+
+ void Run() override;
+
+ protected:
+ V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object);
+
+ template <typename TSlot>
+ void VerifyPointersImpl(TSlot start, TSlot end);
+
+ void VerifyMap(Map map) override;
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override;
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override;
+ void VerifyCodePointer(CodeObjectSlot slot) override;
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override;
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override;
+ void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override;
+};
+
+#endif // VERIFY_HEAP
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_EVACUATION_VERIFIER_H_
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index 9533456935..0dec79f034 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -216,7 +216,7 @@ Handle<ByteArray> FactoryBase<Impl>::NewByteArray(int length,
UNREACHABLE();
}
if (length == 0) return impl()->empty_byte_array();
- int size = ByteArray::SizeFor(length);
+ int size = ALIGN_TO_ALLOCATION_ALIGNMENT(ByteArray::SizeFor(length));
HeapObject result = AllocateRawWithImmortalMap(
size, allocation, read_only_roots().byte_array_map());
DisallowGarbageCollection no_gc;
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 7afbc9b683..bcb2f6475e 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -351,7 +351,7 @@ HeapObject Factory::AllocateRawWithAllocationSite(
int size = map->instance_size();
if (!allocation_site.is_null()) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
- size += AllocationMemento::kSize;
+ size += ALIGN_TO_ALLOCATION_ALIGNMENT(AllocationMemento::kSize);
}
HeapObject result = allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
size, allocation);
@@ -360,8 +360,9 @@ HeapObject Factory::AllocateRawWithAllocationSite(
: UPDATE_WRITE_BARRIER;
result.set_map_after_allocation(*map, write_barrier_mode);
if (!allocation_site.is_null()) {
- AllocationMemento alloc_memento = AllocationMemento::unchecked_cast(
- Object(result.ptr() + map->instance_size()));
+ int aligned_size = ALIGN_TO_ALLOCATION_ALIGNMENT(map->instance_size());
+ AllocationMemento alloc_memento =
+ AllocationMemento::unchecked_cast(Object(result.ptr() + aligned_size));
InitializeAllocationMemento(alloc_memento, *allocation_site);
}
return result;
@@ -774,6 +775,11 @@ MaybeHandle<String> NewStringFromUtf8Variant(Isolate* isolate,
MaybeHandle<String> Factory::NewStringFromUtf8(
const base::Vector<const uint8_t>& string,
unibrow::Utf8Variant utf8_variant, AllocationType allocation) {
+ if (string.size() > kMaxInt) {
+ // The Utf8Decode can't handle longer inputs, and we couldn't create
+ // strings from them anyway.
+ THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
+ }
auto peek_bytes = [&]() -> base::Vector<const uint8_t> { return string; };
return NewStringFromUtf8Variant(isolate(), peek_bytes, utf8_variant,
allocation);
@@ -792,6 +798,8 @@ MaybeHandle<String> Factory::NewStringFromUtf8(
DCHECK_EQ(sizeof(uint8_t), array->type()->element_type().value_kind_size());
DCHECK_LE(start, end);
DCHECK_LE(end, array->length());
+ // {end - start} can never be more than what the Utf8Decoder can handle.
+ static_assert(WasmArray::MaxLength(sizeof(uint8_t)) <= kMaxInt);
auto peek_bytes = [&]() -> base::Vector<const uint8_t> {
const uint8_t* contents =
reinterpret_cast<const uint8_t*>(array->ElementAddress(0));
@@ -806,6 +814,8 @@ MaybeHandle<String> Factory::NewStringFromUtf8(
unibrow::Utf8Variant utf8_variant, AllocationType allocation) {
DCHECK_LE(start, end);
DCHECK_LE(end, array->length());
+ // {end - start} can never be more than what the Utf8Decoder can handle.
+ static_assert(ByteArray::kMaxLength <= kMaxInt);
auto peek_bytes = [&]() -> base::Vector<const uint8_t> {
const uint8_t* contents =
reinterpret_cast<const uint8_t*>(array->GetDataStartAddress());
@@ -838,6 +848,8 @@ MaybeHandle<String> Factory::NewStringFromUtf16(Handle<WasmArray> array,
DCHECK_EQ(sizeof(uint16_t), array->type()->element_type().value_kind_size());
DCHECK_LE(start, end);
DCHECK_LE(end, array->length());
+ // {end - start} can never be more than what the Utf8Decoder can handle.
+ static_assert(WasmArray::MaxLength(sizeof(uint16_t)) <= kMaxInt);
auto peek_bytes = [&]() -> base::Vector<const uint16_t> {
const uint16_t* contents =
reinterpret_cast<const uint16_t*>(array->ElementAddress(0));
@@ -2036,7 +2048,7 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
DisallowGarbageCollection no_gc;
Heap* roots = allocation_type == AllocationType::kMap
? isolate()->heap()
- : isolate()->shared_isolate()->heap();
+ : isolate()->shared_heap_isolate()->heap();
result.set_map_after_allocation(ReadOnlyRoots(roots).meta_map(),
SKIP_WRITE_BARRIER);
return handle(InitializeMap(Map::cast(result), type, instance_size,
@@ -2119,10 +2131,12 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
DCHECK(site.is_null() || AllocationSite::CanTrack(instance_type));
int object_size = map->instance_size();
- int adjusted_object_size = object_size;
+ int aligned_object_size = ALIGN_TO_ALLOCATION_ALIGNMENT(object_size);
+ int adjusted_object_size = aligned_object_size;
if (!site.is_null()) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
- adjusted_object_size += AllocationMemento::kSize;
+ adjusted_object_size +=
+ ALIGN_TO_ALLOCATION_ALIGNMENT(AllocationMemento::kSize);
}
HeapObject raw_clone =
allocator()->AllocateRawWith<HeapAllocator::kRetryOrFail>(
@@ -2142,7 +2156,7 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
}
if (!site.is_null()) {
AllocationMemento alloc_memento = AllocationMemento::unchecked_cast(
- Object(raw_clone.ptr() + object_size));
+ Object(raw_clone.ptr() + aligned_object_size));
InitializeAllocationMemento(alloc_memento, *site);
}
@@ -2716,6 +2730,10 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
InitializeJSObjectFromMap(js_obj, *empty_fixed_array(), *map);
DCHECK(js_obj.HasFastElements() ||
+ (isolate()->bootstrapper()->IsActive() ||
+ *map == isolate()
+ ->raw_native_context()
+ .js_array_template_literal_object_map()) ||
js_obj.HasTypedArrayOrRabGsabTypedArrayElements() ||
js_obj.HasFastStringWrapperElements() ||
js_obj.HasFastArgumentsElements() || js_obj.HasDictionaryElements() ||
@@ -2788,7 +2806,9 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
AllocationType allocation) {
Handle<JSArray> array = NewJSArrayWithUnverifiedElements(
elements, elements_kind, length, allocation);
+#ifdef ENABLE_SLOW_DCHECKS
JSObject::ValidateElements(*array);
+#endif
return array;
}
@@ -2802,8 +2822,14 @@ Handle<JSArray> Factory::NewJSArrayWithUnverifiedElements(
JSFunction array_function = native_context.array_function();
map = array_function.initial_map();
}
- Handle<JSArray> array = Handle<JSArray>::cast(
- NewJSObjectFromMap(handle(map, isolate()), allocation));
+ return NewJSArrayWithUnverifiedElements(handle(map, isolate()), elements,
+ length, allocation);
+}
+
+Handle<JSArray> Factory::NewJSArrayWithUnverifiedElements(
+ Handle<Map> map, Handle<FixedArrayBase> elements, int length,
+ AllocationType allocation) {
+ auto array = Handle<JSArray>::cast(NewJSObjectFromMap(map, allocation));
DisallowGarbageCollection no_gc;
JSArray raw = *array;
raw.set_elements(*elements);
@@ -2811,6 +2837,23 @@ Handle<JSArray> Factory::NewJSArrayWithUnverifiedElements(
return array;
}
+Handle<JSArray> Factory::NewJSArrayForTemplateLiteralArray(
+ Handle<FixedArray> cooked_strings, Handle<FixedArray> raw_strings) {
+ Handle<JSArray> raw_object =
+ NewJSArrayWithElements(raw_strings, PACKED_ELEMENTS,
+ raw_strings->length(), AllocationType::kOld);
+ JSObject::SetIntegrityLevel(raw_object, FROZEN, kThrowOnError).ToChecked();
+
+ Handle<NativeContext> native_context = isolate()->native_context();
+ Handle<JSArray> template_object = NewJSArrayWithUnverifiedElements(
+ handle(native_context->js_array_template_literal_object_map(), isolate()),
+ cooked_strings, cooked_strings->length(), AllocationType::kOld);
+ TemplateLiteralObject::SetRaw(template_object, raw_object);
+ DCHECK_EQ(template_object->map(),
+ native_context->js_array_template_literal_object_map());
+ return template_object;
+}
+
void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
ArrayStorageAllocationMode mode) {
DCHECK(capacity >= length);
@@ -3011,13 +3054,14 @@ MaybeHandle<JSArrayBuffer> Factory::NewJSArrayBufferAndBackingStore(
Handle<JSArrayBuffer> Factory::NewJSSharedArrayBuffer(
std::shared_ptr<BackingStore> backing_store) {
- DCHECK_IMPLIES(backing_store->is_resizable(), v8_flags.harmony_rab_gsab);
+ DCHECK_IMPLIES(backing_store->is_resizable_by_js(),
+ v8_flags.harmony_rab_gsab);
Handle<Map> map(
isolate()->native_context()->shared_array_buffer_fun().initial_map(),
isolate());
auto result = Handle<JSArrayBuffer>::cast(
NewJSObjectFromMap(map, AllocationType::kYoung));
- ResizableFlag resizable = backing_store->is_resizable()
+ ResizableFlag resizable = backing_store->is_resizable_by_js()
? ResizableFlag::kResizable
: ResizableFlag::kNotResizable;
result->Setup(SharedFlag::kShared, resizable, std::move(backing_store));
@@ -3133,7 +3177,8 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
raw.set_length(length);
raw.SetOffHeapDataPtr(isolate(), buffer->backing_store(), byte_offset);
raw.set_is_length_tracking(false);
- raw.set_is_backed_by_rab(!buffer->is_shared() && buffer->is_resizable());
+ raw.set_is_backed_by_rab(!buffer->is_shared() &&
+ buffer->is_resizable_by_js());
return typed_array;
}
@@ -3148,7 +3193,8 @@ Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
isolate(), static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
// TODO(v8:11111): Support creating length tracking DataViews via the API.
obj->set_is_length_tracking(false);
- obj->set_is_backed_by_rab(!buffer->is_shared() && buffer->is_resizable());
+ obj->set_is_backed_by_rab(!buffer->is_shared() &&
+ buffer->is_resizable_by_js());
return obj;
}
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 75676b4624..6c9cc2d4d8 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -606,6 +606,9 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
AllocationType allocation = AllocationType::kYoung);
+ Handle<JSArray> NewJSArrayForTemplateLiteralArray(
+ Handle<FixedArray> cooked_strings, Handle<FixedArray> raw_strings);
+
void NewJSArrayStorage(
Handle<JSArray> array, int length, int capacity,
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
@@ -1141,6 +1144,9 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<JSArray> NewJSArrayWithUnverifiedElements(
Handle<FixedArrayBase> elements, ElementsKind elements_kind, int length,
AllocationType allocation = AllocationType::kYoung);
+ Handle<JSArray> NewJSArrayWithUnverifiedElements(
+ Handle<Map> map, Handle<FixedArrayBase> elements, int length,
+ AllocationType allocation = AllocationType::kYoung);
// Creates the backing storage for a JSArray. This handle must be discarded
// before returning the JSArray reference to code outside Factory, which might
diff --git a/deps/v8/src/heap/gc-tracer-inl.h b/deps/v8/src/heap/gc-tracer-inl.h
index 9dc46c7431..248c3490fb 100644
--- a/deps/v8/src/heap/gc-tracer-inl.h
+++ b/deps/v8/src/heap/gc-tracer-inl.h
@@ -121,6 +121,10 @@ bool GCTracer::IsInObservablePause() const {
return 0.0 < start_of_observable_pause_;
}
+bool GCTracer::IsInAtomicPause() const {
+ return current_.state == Event::State::ATOMIC;
+}
+
bool GCTracer::IsConsistentWithCollector(GarbageCollector collector) const {
return (collector == GarbageCollector::SCAVENGER &&
current_.type == Event::SCAVENGER) ||
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 47a97f91a1..b9dc605f5d 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -76,7 +76,7 @@ const char* GCTracer::Event::TypeName(bool short_name) const {
return (short_name) ? "s" : "Scavenge";
case MARK_COMPACTOR:
case INCREMENTAL_MARK_COMPACTOR:
- return (short_name) ? "ms" : "Mark-sweep";
+ return (short_name) ? "mc" : "Mark-Compact";
case MINOR_MARK_COMPACTOR:
case INCREMENTAL_MINOR_MARK_COMPACTOR:
return (short_name) ? "mmc" : "Minor Mark-Compact";
@@ -223,14 +223,6 @@ void GCTracer::ResetForTesting() {
}
}
-void GCTracer::NotifyYoungGenerationHandling(
- YoungGenerationHandling young_generation_handling) {
- DCHECK_GE(1, start_counter_);
- DCHECK_EQ(Event::SCAVENGER, current_.type);
- heap_->isolate()->counters()->young_generation_handling()->AddSample(
- static_cast<int>(young_generation_handling));
-}
-
void GCTracer::StartObservablePause() {
DCHECK_EQ(0, start_counter_);
start_counter_++;
@@ -269,6 +261,8 @@ void GCTracer::StartCycle(GarbageCollector collector,
DCHECK_IMPLIES(young_gc_while_full_gc_,
Heap::IsYoungGenerationCollector(collector) &&
!Event::IsYoungGenerationEvent(current_.type));
+ DCHECK_IMPLIES(collector != GarbageCollector::SCAVENGER,
+ !young_gc_while_full_gc_);
Event::Type type;
switch (collector) {
@@ -468,6 +462,7 @@ void GCTracer::StopCycle(GarbageCollector collector) {
// If a young generation GC interrupted an unfinished full GC cycle, restore
// the event corresponding to the full GC cycle.
if (young_gc_while_full_gc_) {
+ DCHECK_EQ(current_.type, Event::Type::SCAVENGER);
std::swap(current_, previous_);
young_gc_while_full_gc_ = false;
}
@@ -517,7 +512,7 @@ void GCTracer::NotifySweepingCompleted() {
DCHECK((current_.type == Event::MARK_COMPACTOR ||
current_.type == Event::INCREMENTAL_MARK_COMPACTOR) &&
(current_.state == Event::State::SWEEPING ||
- (v8_flags.verify_heap && current_.state == Event::State::ATOMIC)));
+ current_.state == Event::State::ATOMIC));
} else {
DCHECK(IsSweepingInProgress());
}
@@ -762,14 +757,14 @@ void GCTracer::PrintNVP() const {
"holes_size_after=%zu "
"allocated=%zu "
"promoted=%zu "
- "semi_space_copied=%zu "
+ "new_space_survived=%zu "
"nodes_died_in_new=%d "
"nodes_copied_in_new=%d "
"nodes_promoted=%d "
"promotion_ratio=%.1f%% "
"average_survival_ratio=%.1f%% "
"promotion_rate=%.1f%% "
- "semi_space_copy_rate=%.1f%% "
+ "new_space_survive_rate_=%.1f%% "
"new_space_allocation_throughput=%.1f "
"unmapper_chunks=%d\n",
duration, spent_in_mutator, current_.TypeName(true),
@@ -800,11 +795,11 @@ void GCTracer::PrintNVP() const {
current_.end_object_size, current_.start_holes_size,
current_.end_holes_size, allocated_since_last_gc,
heap_->promoted_objects_size(),
- heap_->semi_space_copied_object_size(),
+ heap_->new_space_surviving_object_size(),
heap_->nodes_died_in_new_space_, heap_->nodes_copied_in_new_space_,
heap_->nodes_promoted_, heap_->promotion_ratio_,
AverageSurvivalRatio(), heap_->promotion_rate_,
- heap_->semi_space_copied_rate_,
+ heap_->new_space_surviving_rate_,
NewSpaceAllocationThroughputInBytesPerMillisecond(),
heap_->memory_allocator()->unmapper()->NumberOfChunks());
break;
@@ -817,46 +812,92 @@ void GCTracer::PrintNVP() const {
"minor_mc=%.2f "
"time_to_safepoint=%.2f "
"mark=%.2f "
+ "mark.incremental_roots=%.2f "
+ "mark.finish_incremental=%.2f "
"mark.seed=%.2f "
- "mark.roots=%.2f "
- "mark.weak=%.2f "
+ "mark.closure_parallel=%.2f "
+ "mark.closure=%.2f "
"mark.global_handles=%.2f "
"clear=%.2f "
"clear.string_table=%.2f "
- "clear.weak_lists=%.2f "
+ "complete.sweep_array_buffers=%.2f "
"evacuate=%.2f "
+ "evacuate.clean_up=%.2f "
"evacuate.copy=%.2f "
+ "evacuate.prologue=%.2f "
+ "evacuate.epilogue=%.2f "
+ "evacuate.rebalance=%.2f "
"evacuate.update_pointers=%.2f "
"evacuate.update_pointers.slots=%.2f "
+ "evacuate.update_pointers.weak=%.2f "
+ "sweep=%.2f "
+ "sweep.new=%.2f "
+ "sweep.new_lo=%.2f "
+ "finish=%.2f "
+ "finish.sweep_array_buffers=%.2f "
"background.mark=%.2f "
+ "background.sweep=%.2f "
"background.evacuate.copy=%.2f "
"background.evacuate.update_pointers=%.2f "
"background.unmapper=%.2f "
"unmapper=%.2f "
- "update_marking_deque=%.2f "
- "reset_liveness=%.2f\n",
+ "total_size_before=%zu "
+ "total_size_after=%zu "
+ "holes_size_before=%zu "
+ "holes_size_after=%zu "
+ "allocated=%zu "
+ "promoted=%zu "
+ "new_space_survived=%zu "
+ "nodes_died_in_new=%d "
+ "nodes_copied_in_new=%d "
+ "nodes_promoted=%d "
+ "promotion_ratio=%.1f%% "
+ "average_survival_ratio=%.1f%% "
+ "promotion_rate=%.1f%% "
+ "new_space_survive_rate_=%.1f%% "
+ "new_space_allocation_throughput=%.1f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
current_scope(Scope::MINOR_MC),
current_scope(Scope::TIME_TO_SAFEPOINT),
current_scope(Scope::MINOR_MC_MARK),
- current_scope(Scope::MINOR_MC_MARK_SEED),
current_scope(Scope::MINOR_MC_MARK_ROOTS),
- current_scope(Scope::MINOR_MC_MARK_WEAK),
+ current_scope(Scope::MINOR_MC_MARK_FINISH_INCREMENTAL),
+ current_scope(Scope::MINOR_MC_MARK_SEED),
+ current_scope(Scope::MINOR_MC_MARK_CLOSURE_PARALLEL),
+ current_scope(Scope::MINOR_MC_MARK_CLOSURE),
current_scope(Scope::MINOR_MC_MARK_GLOBAL_HANDLES),
current_scope(Scope::MINOR_MC_CLEAR),
current_scope(Scope::MINOR_MC_CLEAR_STRING_TABLE),
- current_scope(Scope::MINOR_MC_CLEAR_WEAK_LISTS),
+ current_scope(Scope::MINOR_MC_COMPLETE_SWEEP_ARRAY_BUFFERS),
current_scope(Scope::MINOR_MC_EVACUATE),
+ current_scope(Scope::MINOR_MC_EVACUATE_CLEAN_UP),
current_scope(Scope::MINOR_MC_EVACUATE_COPY),
+ current_scope(Scope::MINOR_MC_EVACUATE_PROLOGUE),
+ current_scope(Scope::MINOR_MC_EVACUATE_EPILOGUE),
+ current_scope(Scope::MINOR_MC_EVACUATE_REBALANCE),
current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS),
current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS),
+ current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK),
+ current_scope(Scope::MINOR_MC_SWEEP),
+ current_scope(Scope::MINOR_MC_SWEEP_NEW),
+ current_scope(Scope::MINOR_MC_SWEEP_NEW_LO),
+ current_scope(Scope::MINOR_MC_FINISH),
+ current_scope(Scope::MINOR_MC_FINISH_SWEEP_ARRAY_BUFFERS),
current_scope(Scope::MINOR_MC_BACKGROUND_MARKING),
+ current_scope(Scope::MINOR_MC_BACKGROUND_SWEEPING),
current_scope(Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY),
current_scope(Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS),
current_scope(Scope::BACKGROUND_UNMAPPER),
- current_scope(Scope::UNMAPPER),
- current_scope(Scope::MINOR_MC_MARKING_DEQUE),
- current_scope(Scope::MINOR_MC_RESET_LIVENESS));
+ current_scope(Scope::UNMAPPER), current_.start_object_size,
+ current_.end_object_size, current_.start_holes_size,
+ current_.end_holes_size, allocated_since_last_gc,
+ heap_->promoted_objects_size(),
+ heap_->new_space_surviving_object_size(),
+ heap_->nodes_died_in_new_space_, heap_->nodes_copied_in_new_space_,
+ heap_->nodes_promoted_, heap_->promotion_ratio_,
+ AverageSurvivalRatio(), heap_->promotion_rate_,
+ heap_->new_space_surviving_rate_,
+ NewSpaceAllocationThroughputInBytesPerMillisecond());
break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
@@ -912,6 +953,8 @@ void GCTracer::PrintNVP() const {
"sweep=%.1f "
"sweep.code=%.1f "
"sweep.map=%.1f "
+ "sweep.new=%.1f "
+ "sweep.new_lo=%.1f "
"sweep.old=%.1f "
"incremental=%.1f "
"incremental.finalize=%.1f "
@@ -939,14 +982,14 @@ void GCTracer::PrintNVP() const {
"holes_size_after=%zu "
"allocated=%zu "
"promoted=%zu "
- "semi_space_copied=%zu "
+ "new_space_survived=%zu "
"nodes_died_in_new=%d "
"nodes_copied_in_new=%d "
"nodes_promoted=%d "
"promotion_ratio=%.1f%% "
"average_survival_ratio=%.1f%% "
"promotion_rate=%.1f%% "
- "semi_space_copy_rate=%.1f%% "
+ "new_space_survive_rate=%.1f%% "
"new_space_allocation_throughput=%.1f "
"unmapper_chunks=%d "
"compaction_speed=%.f\n",
@@ -996,6 +1039,8 @@ void GCTracer::PrintNVP() const {
current_scope(Scope::MC_PROLOGUE), current_scope(Scope::MC_SWEEP),
current_scope(Scope::MC_SWEEP_CODE),
current_scope(Scope::MC_SWEEP_MAP),
+ current_scope(Scope::MC_SWEEP_NEW),
+ current_scope(Scope::MC_SWEEP_NEW_LO),
current_scope(Scope::MC_SWEEP_OLD),
current_scope(Scope::MC_INCREMENTAL),
current_scope(Scope::MC_INCREMENTAL_FINALIZE),
@@ -1021,11 +1066,11 @@ void GCTracer::PrintNVP() const {
current_.end_object_size, current_.start_holes_size,
current_.end_holes_size, allocated_since_last_gc,
heap_->promoted_objects_size(),
- heap_->semi_space_copied_object_size(),
+ heap_->new_space_surviving_object_size(),
heap_->nodes_died_in_new_space_, heap_->nodes_copied_in_new_space_,
heap_->nodes_promoted_, heap_->promotion_ratio_,
AverageSurvivalRatio(), heap_->promotion_rate_,
- heap_->semi_space_copied_rate_,
+ heap_->new_space_surviving_rate_,
NewSpaceAllocationThroughputInBytesPerMillisecond(),
heap_->memory_allocator()->unmapper()->NumberOfChunks(),
CompactionSpeedInBytesPerMillisecond());
@@ -1320,29 +1365,6 @@ void GCTracer::RecordGCPhasesHistograms(RecordGCPhasesInfo::Mode mode) {
heap_->isolate()->counters()->gc_marking_sum()->AddSample(
static_cast<int>(overall_marking_time));
- // Filter out samples where
- // - we don't have high-resolution timers;
- // - size of marked objects is very small;
- // - marking time is rounded to 0;
- constexpr size_t kMinObjectSizeForReportingThroughput = 1024 * 1024;
- if (base::TimeTicks::IsHighResolution() &&
- heap_->SizeOfObjects() > kMinObjectSizeForReportingThroughput &&
- overall_marking_time > 0) {
- const double overall_v8_marking_time =
- overall_marking_time -
- current_.scopes[Scope::MC_MARK_EMBEDDER_TRACING];
- if (overall_v8_marking_time > 0) {
- const int main_thread_marking_throughput_mb_per_s =
- static_cast<int>(static_cast<double>(heap_->SizeOfObjects()) /
- overall_v8_marking_time * 1000 / 1024 / 1024);
- heap_->isolate()
- ->counters()
- ->gc_main_thread_marking_throughput()
- ->AddSample(
- static_cast<int>(main_thread_marking_throughput_mb_per_s));
- }
- }
-
DCHECK_EQ(Scope::LAST_TOP_MC_SCOPE, Scope::MC_SWEEP);
} else if (mode == RecordGCPhasesInfo::Mode::Scavenger) {
counters->gc_scavenger_scavenge_main()->AddSample(
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 9be60cf7c5..586aa86bf1 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -11,7 +11,6 @@
#include "src/base/optional.h"
#include "src/base/ring-buffer.h"
#include "src/common/globals.h"
-#include "src/heap/heap.h"
#include "src/init/heap-symbols.h"
#include "src/logging/counters.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
@@ -120,8 +119,8 @@ class V8_EXPORT_PRIVATE GCTracer {
MARK_COMPACTOR = 1,
INCREMENTAL_MARK_COMPACTOR = 2,
MINOR_MARK_COMPACTOR = 3,
- START = 4,
- INCREMENTAL_MINOR_MARK_COMPACTOR = 5,
+ INCREMENTAL_MINOR_MARK_COMPACTOR = 4,
+ START = 5,
};
// Returns true if the event corresponds to a young generation GC.
@@ -270,11 +269,9 @@ class V8_EXPORT_PRIVATE GCTracer {
void NotifyYoungCppGCRunning();
void NotifyYoungCppGCCompleted();
- void NotifyYoungGenerationHandling(
- YoungGenerationHandling young_generation_handling);
-
#ifdef DEBUG
V8_INLINE bool IsInObservablePause() const;
+ V8_INLINE bool IsInAtomicPause() const;
// Checks if the current event is consistent with a collector.
V8_INLINE bool IsConsistentWithCollector(GarbageCollector collector) const;
@@ -402,6 +399,10 @@ class V8_EXPORT_PRIVATE GCTracer {
V8_INLINE WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats();
#endif // defined(V8_RUNTIME_CALL_STATS)
+ bool IsCurrentGCDueToAllocationFailure() const {
+ return current_.gc_reason == GarbageCollectionReason::kAllocationFailure;
+ }
+
private:
FRIEND_TEST(GCTracer, AverageSpeed);
FRIEND_TEST(GCTracerTest, AllocationThroughput);
diff --git a/deps/v8/src/heap/global-handle-marking-visitor.cc b/deps/v8/src/heap/global-handle-marking-visitor.cc
index fc0d669fce..b466051380 100644
--- a/deps/v8/src/heap/global-handle-marking-visitor.cc
+++ b/deps/v8/src/heap/global-handle-marking-visitor.cc
@@ -4,16 +4,16 @@
#include "src/heap/global-handle-marking-visitor.h"
+#include "src/heap/marking-state-inl.h"
#include "src/heap/marking-worklist-inl.h"
namespace v8 {
namespace internal {
GlobalHandleMarkingVisitor::GlobalHandleMarkingVisitor(
- Heap& heap, MarkingState& marking_state,
- MarkingWorklists::Local& local_marking_worklist)
+ Heap& heap, MarkingWorklists::Local& local_marking_worklist)
: heap_(heap),
- marking_state_(marking_state),
+ marking_state_(*heap_.marking_state()),
local_marking_worklist_(local_marking_worklist),
traced_node_bounds_(
heap.isolate()->global_handles()->GetTracedNodeBounds()) {}
diff --git a/deps/v8/src/heap/global-handle-marking-visitor.h b/deps/v8/src/heap/global-handle-marking-visitor.h
index 1b2fbd9cbb..71e805810e 100644
--- a/deps/v8/src/heap/global-handle-marking-visitor.h
+++ b/deps/v8/src/heap/global-handle-marking-visitor.h
@@ -18,7 +18,7 @@ namespace internal {
// which requires them to be kept alive.
class GlobalHandleMarkingVisitor final : public ::heap::base::StackVisitor {
public:
- GlobalHandleMarkingVisitor(Heap&, MarkingState&, MarkingWorklists::Local&);
+ GlobalHandleMarkingVisitor(Heap&, MarkingWorklists::Local&);
~GlobalHandleMarkingVisitor() override = default;
void VisitPointer(const void*) override;
diff --git a/deps/v8/src/heap/heap-allocator-inl.h b/deps/v8/src/heap/heap-allocator-inl.h
index 0abf92fb10..06783b5ac6 100644
--- a/deps/v8/src/heap/heap-allocator-inl.h
+++ b/deps/v8/src/heap/heap-allocator-inl.h
@@ -225,6 +225,7 @@ V8_WARN_UNUSED_RESULT V8_INLINE HeapObject HeapAllocator::AllocateRawWith(
AllocationAlignment alignment) {
AllocationResult result;
HeapObject object;
+ size = ALIGN_TO_ALLOCATION_ALIGNMENT(size);
if (allocation == AllocationType::kYoung) {
result = AllocateRaw<AllocationType::kYoung>(size, origin, alignment);
if (result.To(&object)) {
diff --git a/deps/v8/src/heap/heap-allocator.cc b/deps/v8/src/heap/heap-allocator.cc
index c78098ef28..8824d45bcc 100644
--- a/deps/v8/src/heap/heap-allocator.cc
+++ b/deps/v8/src/heap/heap-allocator.cc
@@ -27,11 +27,11 @@ void HeapAllocator::Setup() {
? static_cast<PagedSpace*>(spaces_[MAP_SPACE])
: static_cast<PagedSpace*>(spaces_[OLD_SPACE]);
- shared_old_allocator_ = heap_->shared_old_allocator_.get();
+ shared_old_allocator_ = heap_->shared_space_allocator_.get();
shared_map_allocator_ = heap_->shared_map_allocator_
? heap_->shared_map_allocator_.get()
: shared_old_allocator_;
- shared_lo_space_ = heap_->shared_lo_space();
+ shared_lo_space_ = heap_->shared_lo_allocation_space();
}
void HeapAllocator::SetReadOnlySpace(ReadOnlySpace* read_only_space) {
@@ -90,7 +90,8 @@ AllocationResult HeapAllocator::AllocateRawWithLightRetrySlowPath(
// Two GCs before returning failure.
for (int i = 0; i < 2; i++) {
if (IsSharedAllocationType(allocation)) {
- heap_->CollectSharedGarbage(GarbageCollectionReason::kAllocationFailure);
+ heap_->CollectGarbageShared(heap_->main_thread_local_heap(),
+ GarbageCollectionReason::kAllocationFailure);
} else {
AllocationSpace space_to_gc = AllocationTypeToGCSpace(allocation);
if (v8_flags.minor_mc && i > 0) {
@@ -117,12 +118,13 @@ AllocationResult HeapAllocator::AllocateRawWithRetryOrFailSlowPath(
if (!result.IsFailure()) return result;
if (IsSharedAllocationType(allocation)) {
- heap_->CollectSharedGarbage(GarbageCollectionReason::kLastResort);
+ heap_->CollectGarbageShared(heap_->main_thread_local_heap(),
+ GarbageCollectionReason::kLastResort);
// We need always_allocate() to be true both on the client- and
// server-isolate. It is used in both code paths.
AlwaysAllocateScope shared_scope(
- heap_->isolate()->shared_isolate()->heap());
+ heap_->isolate()->shared_heap_isolate()->heap());
AlwaysAllocateScope client_scope(heap_);
result = AllocateRaw(size, allocation, origin, alignment);
} else {
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 6991a6dca5..64c075f269 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -14,7 +14,6 @@
#include "src/base/atomicops.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
-#include "src/base/sanitizer/msan.h"
#include "src/common/assert-scope.h"
#include "src/common/code-memory-access-inl.h"
#include "src/execution/isolate-data.h"
@@ -26,6 +25,7 @@
#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
#include "src/heap/large-spaces.h"
+#include "src/heap/marking-state-inl.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-chunk.h"
@@ -99,16 +99,15 @@ base::EnumSet<CodeFlushMode> Heap::GetCodeFlushMode(Isolate* isolate) {
Isolate* Heap::isolate() const { return Isolate::FromHeap(this); }
-#ifdef DEBUG
bool Heap::IsMainThread() const {
return isolate()->thread_id() == ThreadId::Current();
}
bool Heap::IsSharedMainThread() const {
- Isolate* shared_isolate = isolate()->shared_isolate();
- return shared_isolate && shared_isolate->thread_id() == ThreadId::Current();
+ if (!isolate()->has_shared_heap()) return false;
+ Isolate* shared_heap_isolate = isolate()->shared_heap_isolate();
+ return shared_heap_isolate->thread_id() == ThreadId::Current();
}
-#endif
int64_t Heap::external_memory() { return external_memory_.total(); }
@@ -123,7 +122,7 @@ PagedSpace* Heap::space_for_maps() {
ConcurrentAllocator* Heap::concurrent_allocator_for_maps() {
return V8_LIKELY(shared_map_allocator_) ? shared_map_allocator_.get()
- : shared_old_allocator_.get();
+ : shared_space_allocator_.get();
}
RootsTable& Heap::roots_table() { return isolate()->roots_table(); }
@@ -171,11 +170,12 @@ void Heap::SetPendingOptimizeForTestBytecode(Object hash_table) {
}
PagedSpace* Heap::paged_space(int idx) {
- DCHECK(idx == OLD_SPACE || idx == CODE_SPACE || idx == MAP_SPACE);
- return static_cast<PagedSpace*>(space_[idx]);
+ DCHECK(idx == OLD_SPACE || idx == CODE_SPACE || idx == MAP_SPACE ||
+ idx == SHARED_SPACE);
+ return static_cast<PagedSpace*>(space_[idx].get());
}
-Space* Heap::space(int idx) { return space_[idx]; }
+Space* Heap::space(int idx) { return space_[idx].get(); }
Address* Heap::NewSpaceAllocationTopAddress() {
return new_space_ ? new_space_->allocation_top_address() : nullptr;
@@ -353,93 +353,6 @@ void Heap::CopyBlock(Address dst, Address src, int byte_size) {
CopyTagged(dst, src, static_cast<size_t>(byte_size / kTaggedSize));
}
-template <Heap::FindMementoMode mode>
-AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
- Address object_address = object.address();
- Address memento_address = object_address + object.SizeFromMap(map);
- Address last_memento_word_address = memento_address + kTaggedSize;
- // If the memento would be on another page, bail out immediately.
- if (!Page::OnSamePage(object_address, last_memento_word_address)) {
- return AllocationMemento();
- }
- HeapObject candidate = HeapObject::FromAddress(memento_address);
- ObjectSlot candidate_map_slot = candidate.map_slot();
- // This fast check may peek at an uninitialized word. However, the slow check
- // below (memento_address == top) ensures that this is safe. Mark the word as
- // initialized to silence MemorySanitizer warnings.
- MSAN_MEMORY_IS_INITIALIZED(candidate_map_slot.address(), kTaggedSize);
- if (!candidate_map_slot.contains_map_value(
- ReadOnlyRoots(this).allocation_memento_map().ptr())) {
- return AllocationMemento();
- }
-
- // Bail out if the memento is below the age mark, which can happen when
- // mementos survived because a page got moved within new space.
- Page* object_page = Page::FromAddress(object_address);
- if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
- Address age_mark =
- reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
- if (!object_page->Contains(age_mark)) {
- return AllocationMemento();
- }
- // Do an exact check in the case where the age mark is on the same page.
- if (object_address < age_mark) {
- return AllocationMemento();
- }
- }
-
- AllocationMemento memento_candidate = AllocationMemento::cast(candidate);
-
- // Depending on what the memento is used for, we might need to perform
- // additional checks.
- Address top;
- switch (mode) {
- case Heap::kForGC:
- return memento_candidate;
- case Heap::kForRuntime:
- if (memento_candidate.is_null()) return AllocationMemento();
- // Either the object is the last object in the new space, or there is
- // another object of at least word size (the header map word) following
- // it, so suffices to compare ptr and top here.
- top = NewSpaceTop();
- DCHECK(memento_address >= new_space()->limit() ||
- memento_address + AllocationMemento::kSize <= top);
- if ((memento_address != top) && memento_candidate.IsValid()) {
- return memento_candidate;
- }
- return AllocationMemento();
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
-}
-
-void Heap::UpdateAllocationSite(Map map, HeapObject object,
- PretenuringFeedbackMap* pretenuring_feedback) {
- DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
-#ifdef DEBUG
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
- DCHECK_IMPLIES(chunk->IsToPage(),
- v8_flags.minor_mc ||
- chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
- DCHECK_IMPLIES(!chunk->InYoungGeneration(),
- chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
-#endif
- if (!v8_flags.allocation_site_pretenuring ||
- !AllocationSite::CanTrack(map.instance_type())) {
- return;
- }
- AllocationMemento memento_candidate =
- FindAllocationMemento<kForGC>(map, object);
- if (memento_candidate.is_null()) return;
-
- // Entering cached feedback is used in the parallel case. We are not allowed
- // to dereference the allocation site and rather have to postpone all checks
- // till actually merging the data.
- Address key = memento_candidate.GetAllocationSiteUnchecked();
- (*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
-}
-
bool Heap::IsPendingAllocationInternal(HeapObject object) {
DCHECK(deserialization_complete());
@@ -485,6 +398,8 @@ bool Heap::IsPendingAllocationInternal(HeapObject object) {
return addr == large_space->pending_object();
}
+ case SHARED_SPACE:
+ case SHARED_LO_SPACE:
case RO_SPACE:
UNREACHABLE();
}
diff --git a/deps/v8/src/heap/heap-verifier.cc b/deps/v8/src/heap/heap-verifier.cc
index 28061588c4..2c4e3fa870 100644
--- a/deps/v8/src/heap/heap-verifier.cc
+++ b/deps/v8/src/heap/heap-verifier.cc
@@ -200,7 +200,6 @@ class SlotVerifyingVisitor : public ObjectVisitorWithCageBases {
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
CHECK(InTypedSet(SlotType::kEmbeddedObjectFull, rinfo->pc()) ||
InTypedSet(SlotType::kEmbeddedObjectCompressed, rinfo->pc()) ||
- InTypedSet(SlotType::kEmbeddedObjectData, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
InTypedSet(SlotType::kConstPoolEmbeddedObjectCompressed,
rinfo->constant_pool_entry_address())) ||
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index 5423eaaadb..e56924cb9c 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -115,7 +115,7 @@ inline void CombinedWriteBarrierInternal(HeapObject host, HeapObjectSlot slot,
}
// Marking barrier: mark value & record slots when marking is on.
- if (is_marking) {
+ if (V8_UNLIKELY(is_marking)) {
#ifdef V8_EXTERNAL_CODE_SPACE
// CodePageHeaderModificationScope is not required because the only case
// when a Code value is stored somewhere is during creation of a new Code
@@ -259,7 +259,7 @@ base::Optional<Heap*> WriteBarrier::GetHeapIfMarking(HeapObject object) {
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return {};
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
- if (!chunk->IsMarking()) return {};
+ if (V8_LIKELY(!chunk->IsMarking())) return {};
return chunk->GetHeap();
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index d966d979c8..c607af9880 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -16,6 +16,7 @@
#include "src/base/bits.h"
#include "src/base/flags.h"
#include "src/base/logging.h"
+#include "src/base/macros.h"
#include "src/base/once.h"
#include "src/base/platform/memory.h"
#include "src/base/platform/mutex.h"
@@ -33,6 +34,7 @@
#include "src/execution/microtask-queue.h"
#include "src/execution/v8threads.h"
#include "src/execution/vm-state-inl.h"
+#include "src/flags/flags.h"
#include "src/handles/global-handles-inl.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/base/stack.h"
@@ -46,6 +48,7 @@
#include "src/heap/concurrent-marking.h"
#include "src/heap/cppgc-js/cpp-heap.h"
#include "src/heap/embedder-tracing.h"
+#include "src/heap/evacuation-verifier-inl.h"
#include "src/heap/finalization-registry-cleanup-task.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer-inl.h"
@@ -62,6 +65,8 @@
#include "src/heap/mark-compact.h"
#include "src/heap/marking-barrier-inl.h"
#include "src/heap/marking-barrier.h"
+#include "src/heap/marking-state-inl.h"
+#include "src/heap/marking-state.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/memory-chunk-layout.h"
#include "src/heap/memory-measurement.h"
@@ -72,6 +77,7 @@
#include "src/heap/objects-visiting.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/parked-scope.h"
+#include "src/heap/pretenuring-handler.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/safepoint.h"
@@ -199,7 +205,7 @@ class MinorMCTaskObserver final : public AllocationObserver {
: AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
- if (v8_flags.concurrent_minor_mc) {
+ if (v8_flags.concurrent_minor_mc_marking) {
if (heap_->incremental_marking()->IsMinorMarking()) {
heap_->concurrent_marking()->RescheduleJobIfNeeded(
GarbageCollector::MINOR_MARK_COMPACTOR);
@@ -217,13 +223,16 @@ Heap::Heap()
: isolate_(isolate()),
heap_allocator_(this),
memory_pressure_level_(MemoryPressureLevel::kNone),
- global_pretenuring_feedback_(kInitialFeedbackCapacity),
safepoint_(std::make_unique<IsolateSafepoint>(this)),
external_string_table_(this),
allocation_type_for_in_place_internalizable_strings_(
isolate()->OwnsStringTables() ? AllocationType::kOld
: AllocationType::kSharedOld),
- collection_barrier_(new CollectionBarrier(this)) {
+ collection_barrier_(new CollectionBarrier(this)),
+ marking_state_(isolate_),
+ non_atomic_marking_state_(isolate_),
+ atomic_marking_state_(isolate_),
+ pretenuring_handler_(this) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size() & (Page::kPageSize - 1));
@@ -1066,8 +1075,8 @@ void Heap::GarbageCollectionPrologue(
// Reset GC statistics.
promoted_objects_size_ = 0;
- previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
- semi_space_copied_object_size_ = 0;
+ previous_new_space_surviving_object_size_ = new_space_surviving_object_size_;
+ new_space_surviving_object_size_ = 0;
nodes_died_in_new_space_ = 0;
nodes_copied_in_new_space_ = 0;
nodes_promoted_ = 0;
@@ -1082,9 +1091,9 @@ void Heap::GarbageCollectionPrologue(
#endif // DEBUG
if (new_space_ && new_space_->IsAtMaximumCapacity()) {
- maximum_size_scavenges_++;
+ maximum_size_minor_gcs_++;
} else {
- maximum_size_scavenges_ = 0;
+ maximum_size_minor_gcs_ = 0;
}
memory_allocator()->unmapper()->PrepareForGC();
}
@@ -1126,31 +1135,6 @@ size_t Heap::UsedGlobalHandlesSize() {
return isolate_->global_handles()->UsedSize();
}
-void Heap::MergeAllocationSitePretenuringFeedback(
- const PretenuringFeedbackMap& local_pretenuring_feedback) {
- PtrComprCageBase cage_base(isolate());
- AllocationSite site;
- for (auto& site_and_count : local_pretenuring_feedback) {
- site = site_and_count.first;
- MapWord map_word = site.map_word(cage_base, kRelaxedLoad);
- if (map_word.IsForwardingAddress()) {
- site = AllocationSite::cast(map_word.ToForwardingAddress());
- }
-
- // We have not validated the allocation site yet, since we have not
- // dereferenced the site during collecting information.
- // This is an inlined check of AllocationMemento::IsValid.
- if (!site.IsAllocationSite() || site.IsZombie()) continue;
-
- const int value = static_cast<int>(site_and_count.second);
- DCHECK_LT(0, value);
- if (site.IncrementMementoFoundCount(value)) {
- // For sites in the global map the count is accessed through the site.
- global_pretenuring_feedback_.insert(std::make_pair(site, 0));
- }
- }
-}
-
void Heap::AddAllocationObserversToAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
@@ -1192,197 +1176,6 @@ void Heap::PublishPendingAllocations() {
code_lo_space_->ResetPendingObject();
}
-namespace {
-inline bool MakePretenureDecision(
- AllocationSite site, AllocationSite::PretenureDecision current_decision,
- double ratio, bool maximum_size_scavenge) {
- // Here we just allow state transitions from undecided or maybe tenure
- // to don't tenure, maybe tenure, or tenure.
- if ((current_decision == AllocationSite::kUndecided ||
- current_decision == AllocationSite::kMaybeTenure)) {
- if (ratio >= AllocationSite::kPretenureRatio) {
- // We just transition into tenure state when the semi-space was at
- // maximum capacity.
- if (maximum_size_scavenge) {
- site.set_deopt_dependent_code(true);
- site.set_pretenure_decision(AllocationSite::kTenure);
- // Currently we just need to deopt when we make a state transition to
- // tenure.
- return true;
- }
- site.set_pretenure_decision(AllocationSite::kMaybeTenure);
- } else {
- site.set_pretenure_decision(AllocationSite::kDontTenure);
- }
- }
- return false;
-}
-
-// Clear feedback calculation fields until the next gc.
-inline void ResetPretenuringFeedback(AllocationSite site) {
- site.set_memento_found_count(0);
- site.set_memento_create_count(0);
-}
-
-inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
- bool maximum_size_scavenge) {
- bool deopt = false;
- int create_count = site.memento_create_count();
- int found_count = site.memento_found_count();
- bool minimum_mementos_created =
- create_count >= AllocationSite::kPretenureMinimumCreated;
- double ratio =
- minimum_mementos_created || v8_flags.trace_pretenuring_statistics
- ? static_cast<double>(found_count) / create_count
- : 0.0;
- AllocationSite::PretenureDecision current_decision =
- site.pretenure_decision();
-
- if (minimum_mementos_created) {
- deopt = MakePretenureDecision(site, current_decision, ratio,
- maximum_size_scavenge);
- }
-
- if (v8_flags.trace_pretenuring_statistics) {
- PrintIsolate(isolate,
- "pretenuring: AllocationSite(%p): (created, found, ratio) "
- "(%d, %d, %f) %s => %s\n",
- reinterpret_cast<void*>(site.ptr()), create_count, found_count,
- ratio, site.PretenureDecisionName(current_decision),
- site.PretenureDecisionName(site.pretenure_decision()));
- }
-
- ResetPretenuringFeedback(site);
- return deopt;
-}
-
-bool PretenureAllocationSiteManually(Isolate* isolate, AllocationSite site) {
- AllocationSite::PretenureDecision current_decision =
- site.pretenure_decision();
- bool deopt = true;
- if (current_decision == AllocationSite::kUndecided ||
- current_decision == AllocationSite::kMaybeTenure) {
- site.set_deopt_dependent_code(true);
- site.set_pretenure_decision(AllocationSite::kTenure);
- } else {
- deopt = false;
- }
- if (v8_flags.trace_pretenuring_statistics) {
- PrintIsolate(isolate,
- "pretenuring manually requested: AllocationSite(%p): "
- "%s => %s\n",
- reinterpret_cast<void*>(site.ptr()),
- site.PretenureDecisionName(current_decision),
- site.PretenureDecisionName(site.pretenure_decision()));
- }
-
- ResetPretenuringFeedback(site);
- return deopt;
-}
-
-} // namespace
-
-void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
- global_pretenuring_feedback_.erase(site);
-}
-
-bool Heap::DeoptMaybeTenuredAllocationSites() {
- return new_space_ && new_space_->IsAtMaximumCapacity() &&
- maximum_size_scavenges_ == 0;
-}
-
-void Heap::ProcessPretenuringFeedback() {
- bool trigger_deoptimization = false;
- if (v8_flags.allocation_site_pretenuring) {
- int tenure_decisions = 0;
- int dont_tenure_decisions = 0;
- int allocation_mementos_found = 0;
- int allocation_sites = 0;
- int active_allocation_sites = 0;
-
- AllocationSite site;
-
- // Step 1: Digest feedback for recorded allocation sites.
- bool maximum_size_scavenge = MaximumSizeScavenge();
- for (auto& site_and_count : global_pretenuring_feedback_) {
- allocation_sites++;
- site = site_and_count.first;
- // Count is always access through the site.
- DCHECK_EQ(0, site_and_count.second);
- int found_count = site.memento_found_count();
- // An entry in the storage does not imply that the count is > 0 because
- // allocation sites might have been reset due to too many objects dying
- // in old space.
- if (found_count > 0) {
- DCHECK(site.IsAllocationSite());
- active_allocation_sites++;
- allocation_mementos_found += found_count;
- if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
- trigger_deoptimization = true;
- }
- if (site.GetAllocationType() == AllocationType::kOld) {
- tenure_decisions++;
- } else {
- dont_tenure_decisions++;
- }
- }
- }
-
- // Step 2: Pretenure allocation sites for manual requests.
- if (allocation_sites_to_pretenure_) {
- while (!allocation_sites_to_pretenure_->empty()) {
- auto pretenure_site = allocation_sites_to_pretenure_->Pop();
- if (PretenureAllocationSiteManually(isolate_, pretenure_site)) {
- trigger_deoptimization = true;
- }
- }
- allocation_sites_to_pretenure_.reset();
- }
-
- // Step 3: Deopt maybe tenured allocation sites if necessary.
- bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
- if (deopt_maybe_tenured) {
- ForeachAllocationSite(
- allocation_sites_list(),
- [&allocation_sites, &trigger_deoptimization](AllocationSite site) {
- DCHECK(site.IsAllocationSite());
- allocation_sites++;
- if (site.IsMaybeTenure()) {
- site.set_deopt_dependent_code(true);
- trigger_deoptimization = true;
- }
- });
- }
-
- if (trigger_deoptimization) {
- isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
- }
-
- if (v8_flags.trace_pretenuring_statistics &&
- (allocation_mementos_found > 0 || tenure_decisions > 0 ||
- dont_tenure_decisions > 0)) {
- PrintIsolate(isolate(),
- "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
- "active_sites=%d "
- "mementos=%d tenured=%d not_tenured=%d\n",
- deopt_maybe_tenured ? 1 : 0, allocation_sites,
- active_allocation_sites, allocation_mementos_found,
- tenure_decisions, dont_tenure_decisions);
- }
-
- global_pretenuring_feedback_.clear();
- global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
- }
-}
-
-void Heap::PretenureAllocationSiteOnNextCollection(AllocationSite site) {
- if (!allocation_sites_to_pretenure_) {
- allocation_sites_to_pretenure_.reset(
- new GlobalHandleVector<AllocationSite>(this));
- }
- allocation_sites_to_pretenure_->Push(site);
-}
-
void Heap::InvalidateCodeDeoptimizationData(Code code) {
CodePageMemoryModificationScope modification_scope(code);
code.set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
@@ -1485,6 +1278,10 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
ReduceNewSpaceSize();
+ if (!v8_flags.minor_mc) {
+ SemiSpaceNewSpace::From(new_space())->MakeAllPagesInFromSpaceIterable();
+ }
+
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
new_space()->ClearUnusedObjectStartBitmaps();
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
@@ -1587,7 +1384,7 @@ size_t Heap::MinorMCTaskTriggerSize() const {
}
void Heap::StartMinorMCIncrementalMarkingIfNeeded() {
- if (v8_flags.concurrent_minor_mc && !IsTearingDown() &&
+ if (v8_flags.concurrent_minor_mc_marking && !IsTearingDown() &&
!incremental_marking()->IsMarking() &&
incremental_marking()->CanBeStarted() && V8_LIKELY(!v8_flags.gc_global) &&
(new_space()->Size() >= MinorMCTaskTriggerSize())) {
@@ -2035,16 +1832,26 @@ void Heap::StartIncrementalMarking(int gc_flags,
GarbageCollector collector) {
DCHECK(incremental_marking()->IsStopped());
- // Sweeping needs to be completed such that markbits are all cleared before
- // starting marking again.
- CompleteSweepingFull();
+ if (IsYoungGenerationCollector(collector)) {
+ CompleteSweepingYoung(collector);
+ } else {
+ // Sweeping needs to be completed such that markbits are all cleared before
+ // starting marking again.
+ CompleteSweepingFull();
+ }
+ base::Optional<GlobalSafepointScope> global_safepoint_scope;
base::Optional<SafepointScope> safepoint_scope;
{
AllowGarbageCollection allow_shared_gc;
IgnoreLocalGCRequests ignore_gc_requests(this);
- safepoint_scope.emplace(this);
+
+ if (isolate()->is_shared_heap_isolate()) {
+ global_safepoint_scope.emplace(isolate());
+ } else {
+ safepoint_scope.emplace(this);
+ }
}
#ifdef DEBUG
@@ -2062,11 +1869,13 @@ void Heap::StartIncrementalMarking(int gc_flags,
}
void Heap::CompleteSweepingFull() {
- array_buffer_sweeper()->EnsureFinished();
- mark_compact_collector()->EnsureSweepingCompleted(
- MarkCompactCollector::SweepingForcedFinalizationMode::kUnifiedHeap);
+ {
+ TRACE_GC(tracer(), GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS);
+ array_buffer_sweeper()->EnsureFinished();
+ }
+ EnsureSweepingCompleted(SweepingForcedFinalizationMode::kUnifiedHeap);
- DCHECK(!mark_compact_collector()->sweeping_in_progress());
+ DCHECK(!sweeping_in_progress());
DCHECK_IMPLIES(cpp_heap(),
!CppHeap::From(cpp_heap())->sweeper().IsSweepingInProgress());
DCHECK(!tracer()->IsSweepingInProgress());
@@ -2078,7 +1887,7 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
// Do not start incremental marking while invoking GC callbacks.
// Heap::CollectGarbage already decided which GC is going to be invoked. In
// case it chose a young-gen GC, starting an incremental full GC during
- // callbacks would break the seperate GC phases guarantee.
+ // callbacks would break the separate GC phases guarantee.
return;
}
if (incremental_marking()->IsStopped()) {
@@ -2230,12 +2039,26 @@ void Heap::CheckCollectionRequested() {
#if V8_ENABLE_WEBASSEMBLY
void Heap::EnsureWasmCanonicalRttsSize(int length) {
+ HandleScope scope(isolate());
+
Handle<WeakArrayList> current_rtts = handle(wasm_canonical_rtts(), isolate_);
if (length <= current_rtts->length()) return;
- Handle<WeakArrayList> result = WeakArrayList::EnsureSpace(
+ Handle<WeakArrayList> new_rtts = WeakArrayList::EnsureSpace(
isolate(), current_rtts, length, AllocationType::kOld);
- result->set_length(length);
- set_wasm_canonical_rtts(*result);
+ new_rtts->set_length(length);
+ set_wasm_canonical_rtts(*new_rtts);
+
+ // Wrappers are indexed by canonical rtt length, and an additional boolean
+ // storing whether the corresponding function is imported or not.
+ int required_wrapper_length = 2 * length;
+ Handle<WeakArrayList> current_wrappers =
+ handle(js_to_wasm_wrappers(), isolate_);
+ if (required_wrapper_length <= current_wrappers->length()) return;
+ Handle<WeakArrayList> new_wrappers =
+ WeakArrayList::EnsureSpace(isolate(), current_wrappers,
+ required_wrapper_length, AllocationType::kOld);
+ new_wrappers->set_length(required_wrapper_length);
+ set_js_to_wasm_wrappers(*new_wrappers);
}
#endif
@@ -2245,19 +2068,19 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
static_cast<double>(start_new_space_size) * 100);
- if (previous_semi_space_copied_object_size_ > 0) {
+ if (previous_new_space_surviving_object_size_ > 0) {
promotion_rate_ =
(static_cast<double>(promoted_objects_size_) /
- static_cast<double>(previous_semi_space_copied_object_size_) * 100);
+ static_cast<double>(previous_new_space_surviving_object_size_) * 100);
} else {
promotion_rate_ = 0;
}
- semi_space_copied_rate_ =
- (static_cast<double>(semi_space_copied_object_size_) /
+ new_space_surviving_rate_ =
+ (static_cast<double>(new_space_surviving_object_size_) /
static_cast<double>(start_new_space_size) * 100);
- double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
+ double survival_rate = promotion_ratio_ + new_space_surviving_rate_;
tracer()->AddSurvivalRatio(survival_rate);
}
@@ -2319,18 +2142,31 @@ size_t Heap::PerformGarbageCollection(
DCHECK(tracer()->IsConsistentWithCollector(collector));
TRACE_GC_EPOCH(tracer(), CollectorScopeId(collector), ThreadKind::kMain);
- base::Optional<SafepointScope> safepoint_scope;
+ base::Optional<GlobalSafepointScope> global_safepoint_scope;
+ base::Optional<SafepointScope> isolate_safepoint_scope;
{
AllowGarbageCollection allow_shared_gc;
IgnoreLocalGCRequests ignore_gc_requests(this);
- safepoint_scope.emplace(this);
+
+ if (isolate()->is_shared_heap_isolate()) {
+ global_safepoint_scope.emplace(isolate());
+ } else {
+ isolate_safepoint_scope.emplace(this);
+ }
}
collection_barrier_->StopTimeToCollectionTimer();
HeapVerifier::VerifyHeapIfEnabled(this);
+ if (isolate()->is_shared_heap_isolate()) {
+ isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
+ if (client->is_shared_heap_isolate()) return;
+ HeapVerifier::VerifyHeapIfEnabled(client->heap());
+ });
+ }
+
tracer()->StartInSafepoint();
GarbageCollectionPrologueInSafepoint();
@@ -2349,7 +2185,7 @@ size_t Heap::PerformGarbageCollection(
Scavenge();
}
- ProcessPretenuringFeedback();
+ pretenuring_handler_.ProcessPretenuringFeedback();
UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size));
ConfigureInitialOldGenerationSize();
@@ -2395,8 +2231,9 @@ size_t Heap::PerformGarbageCollection(
if (cpp_heap() && IsYoungGenerationCollector(collector)) {
const bool with_stack = (gc_reason != GarbageCollectionReason::kTask);
CppHeap::From(cpp_heap())
- ->RunMinorGC(with_stack ? CppHeap::StackState::kMayContainHeapPointers
- : CppHeap::StackState::kNoHeapPointers);
+ ->RunMinorGCIfNeeded(with_stack
+ ? CppHeap::StackState::kMayContainHeapPointers
+ : CppHeap::StackState::kNoHeapPointers);
}
#endif // defined(CPPGC_YOUNG_GENERATION)
@@ -2408,16 +2245,59 @@ size_t Heap::PerformGarbageCollection(
HeapVerifier::VerifyHeapIfEnabled(this);
+ if (isolate()->is_shared_heap_isolate()) {
+ isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
+ if (client->is_shared_heap_isolate()) return;
+ HeapVerifier::VerifyHeapIfEnabled(client->heap());
+ });
+ }
+
return freed_global_handles;
}
-void Heap::CollectSharedGarbage(GarbageCollectionReason gc_reason) {
+bool Heap::CollectGarbageShared(LocalHeap* local_heap,
+ GarbageCollectionReason gc_reason) {
CHECK(deserialization_complete());
- DCHECK(!IsShared());
- DCHECK_NOT_NULL(isolate()->shared_isolate());
+ DCHECK(isolate()->has_shared_heap());
+
+ if (v8_flags.shared_space) {
+ Isolate* shared_space_isolate = isolate()->shared_space_isolate();
+ return shared_space_isolate->heap()->CollectGarbageFromAnyThread(local_heap,
+ gc_reason);
+
+ } else {
+ DCHECK(!IsShared());
+ DCHECK_NOT_NULL(isolate()->shared_isolate());
+
+ isolate()->shared_isolate()->heap()->PerformSharedGarbageCollection(
+ isolate(), gc_reason);
+ return true;
+ }
+}
+
+bool Heap::CollectGarbageFromAnyThread(LocalHeap* local_heap,
+ GarbageCollectionReason gc_reason) {
+ DCHECK(local_heap->IsRunning());
+
+ if (isolate() == local_heap->heap()->isolate() &&
+ local_heap->is_main_thread()) {
+ CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
+ return true;
+ } else {
+ if (!collection_barrier_->TryRequestGC()) return false;
+
+ const LocalHeap::ThreadState old_state =
+ main_thread_local_heap()->state_.SetCollectionRequested();
- isolate()->shared_isolate()->heap()->PerformSharedGarbageCollection(
- isolate(), gc_reason);
+ if (old_state.IsRunning()) {
+ const bool performed_gc =
+ collection_barrier_->AwaitCollectionBackground(local_heap);
+ return performed_gc;
+ } else {
+ DCHECK(old_state.IsParked());
+ return false;
+ }
+ }
}
void Heap::PerformSharedGarbageCollection(Isolate* initiator,
@@ -2486,19 +2366,29 @@ void Heap::CompleteSweepingYoung(GarbageCollector collector) {
array_buffer_sweeper()->EnsureFinished();
}
- // If sweeping is in progress and there are no sweeper tasks running, finish
- // the sweeping here, to avoid having to pause and resume during the young
- // generation GC.
- mark_compact_collector()->FinishSweepingIfOutOfWork();
+ if (v8_flags.minor_mc) {
+ DCHECK(v8_flags.separate_gc_phases);
+ // Do not interleave sweeping.
+ EnsureSweepingCompleted(SweepingForcedFinalizationMode::kV8Only);
+ } else {
+ // If sweeping is in progress and there are no sweeper tasks running, finish
+ // the sweeping here, to avoid having to pause and resume during the young
+ // generation GC.
+ FinishSweepingIfOutOfWork();
+ }
#if defined(CPPGC_YOUNG_GENERATION)
// Always complete sweeping if young generation is enabled.
- if (cpp_heap()) CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
+ if (cpp_heap()) {
+ if (auto* iheap = CppHeap::From(cpp_heap());
+ iheap->generational_gc_supported())
+ iheap->FinishSweepingIfRunning();
+ }
#endif // defined(CPPGC_YOUNG_GENERATION)
}
-void Heap::EnsureSweepingCompleted(HeapObject object) {
- if (!mark_compact_collector()->sweeping_in_progress()) return;
+void Heap::EnsureSweepingCompletedForObject(HeapObject object) {
+ if (!sweeping_in_progress()) return;
BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromHeapObject(object);
if (basic_chunk->InReadOnlySpace()) return;
@@ -2510,7 +2400,7 @@ void Heap::EnsureSweepingCompleted(HeapObject object) {
DCHECK(!chunk->IsLargePage());
Page* page = Page::cast(chunk);
- mark_compact_collector()->EnsurePageIsSwept(page);
+ sweeper()->EnsurePageIsSwept(page);
}
void Heap::RecomputeLimits(GarbageCollector collector) {
@@ -2700,9 +2590,6 @@ void Heap::Scavenge() {
"[IncrementalMarking] Scavenge during marking.\n");
}
- tracer()->NotifyYoungGenerationHandling(
- YoungGenerationHandling::kRegularScavenge);
-
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
base::MutexGuard guard(relocation_mutex());
// Young generation garbage collection is orthogonal from full GC marking. It
@@ -3043,7 +2930,8 @@ void Heap::ResetAllAllocationSitesDependentCode(AllocationType allocation) {
site.ResetPretenureDecision();
site.set_deopt_dependent_code(true);
marked = true;
- RemoveAllocationSitePretenuringFeedback(site);
+ pretenuring_handler_
+ .RemoveAllocationSitePretenuringFeedback(site);
return;
}
});
@@ -3114,6 +3002,7 @@ static_assert(!USE_ALLOCATION_ALIGNMENT_BOOL ||
(HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
+ if (V8_COMPRESS_POINTERS_8GB_BOOL) return 0;
switch (alignment) {
case kTaggedAligned:
return 0;
@@ -3127,10 +3016,12 @@ int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
// static
int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
+ if (V8_COMPRESS_POINTERS_8GB_BOOL) return 0;
if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0)
return kTaggedSize;
- if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0)
+ if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0) {
return kDoubleSize - kTaggedSize; // No fill if double is always aligned.
+ }
return 0;
}
@@ -3229,6 +3120,12 @@ namespace {
void CreateFillerObjectAtImpl(Heap* heap, Address addr, int size,
ClearFreedMemoryMode clear_memory_mode) {
if (size == 0) return;
+ DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
+ IsAligned(addr, kObjectAlignment8GbHeap));
+ DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
+ IsAligned(size, kObjectAlignment8GbHeap));
+ // TODO(v8:13070): Filler sizes are irrelevant for 8GB+ heaps. Adding them
+ // should be avoided in this mode.
HeapObject filler = HeapObject::FromAddress(addr);
ReadOnlyRoots roots(heap);
if (size == kTaggedSize) {
@@ -3569,6 +3466,7 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
if (MayContainRecordedSlots(object)) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
+ DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_SHARED>(object));
DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
}
#endif
@@ -3587,9 +3485,9 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
// Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway.
if (incremental_marking()->black_allocation() &&
- incremental_marking()->marking_state()->IsBlackOrGrey(filler)) {
+ marking_state()->IsBlackOrGrey(filler)) {
Page* page = Page::FromAddress(new_end);
- incremental_marking()->marking_state()->bitmap(page)->ClearRange(
+ marking_state()->bitmap(page)->ClearRange(
page->AddressToMarkbitIndex(new_end),
page->AddressToMarkbitIndex(new_end + bytes_to_trim));
}
@@ -3613,8 +3511,7 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
}
void Heap::MakeHeapIterable() {
- mark_compact_collector()->EnsureSweepingCompleted(
- MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only);
+ EnsureSweepingCompleted(SweepingForcedFinalizationMode::kV8Only);
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MakeLinearAllocationAreaIterable();
@@ -3626,6 +3523,9 @@ void Heap::MakeHeapIterable() {
space->MakeLinearAllocationAreaIterable();
}
+ if (v8_flags.shared_space && shared_space_allocator_) {
+ shared_space_allocator_->MakeLinearAllocationAreaIterable();
+ }
if (new_space()) new_space()->MakeLinearAllocationAreaIterable();
}
@@ -3633,17 +3533,26 @@ void Heap::FreeLinearAllocationAreas() {
safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
+ if (isolate()->is_shared_space_isolate()) {
+ isolate()->global_safepoint()->IterateClientIsolates([](Isolate* client) {
+ client->heap()->FreeSharedLinearAllocationAreas();
+ });
+ }
+
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
space->FreeLinearAllocationArea();
}
+ if (v8_flags.shared_space && shared_space_allocator_) {
+ shared_space_allocator_->FreeLinearAllocationArea();
+ }
if (new_space()) new_space()->FreeLinearAllocationArea();
}
void Heap::FreeSharedLinearAllocationAreas() {
- if (!isolate()->shared_isolate()) return;
+ if (!isolate()->has_shared_heap()) return;
safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->FreeSharedLinearAllocationArea();
});
@@ -3651,12 +3560,34 @@ void Heap::FreeSharedLinearAllocationAreas() {
}
void Heap::FreeMainThreadSharedLinearAllocationAreas() {
- if (!isolate()->shared_isolate()) return;
- shared_old_allocator_->FreeLinearAllocationArea();
+ if (!isolate()->has_shared_heap()) return;
+ shared_space_allocator_->FreeLinearAllocationArea();
if (shared_map_allocator_) shared_map_allocator_->FreeLinearAllocationArea();
main_thread_local_heap()->FreeSharedLinearAllocationArea();
}
+void Heap::MarkSharedLinearAllocationAreasBlack() {
+ DCHECK(v8_flags.shared_space);
+ if (shared_space_allocator_) {
+ shared_space_allocator_->MarkLinearAllocationAreaBlack();
+ }
+ safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->MarkSharedLinearAllocationAreaBlack();
+ });
+ main_thread_local_heap()->MarkSharedLinearAllocationAreaBlack();
+}
+
+void Heap::UnmarkSharedLinearAllocationAreas() {
+ DCHECK(v8_flags.shared_space);
+ if (shared_space_allocator_) {
+ shared_space_allocator_->UnmarkLinearAllocationArea();
+ }
+ safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
+ local_heap->MarkSharedLinearAllocationAreaBlack();
+ });
+ main_thread_local_heap()->MarkSharedLinearAllocationAreaBlack();
+}
+
namespace {
double ComputeMutatorUtilizationImpl(double mutator_speed, double gc_speed) {
@@ -3789,19 +3720,25 @@ void Heap::ActivateMemoryReducerIfNeeded() {
}
}
-void Heap::ReduceNewSpaceSize() {
+bool Heap::ShouldReduceNewSpaceSize() const {
static const size_t kLowAllocationThroughput = 1000;
+
+ if (v8_flags.predictable) return false;
+
const double allocation_throughput =
- tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
+ tracer_->CurrentAllocationThroughputInBytesPerMillisecond();
- if (v8_flags.predictable) return;
+ return ShouldReduceMemory() ||
+ ((allocation_throughput != 0) &&
+ (allocation_throughput < kLowAllocationThroughput));
+}
- if (ShouldReduceMemory() ||
- ((allocation_throughput != 0) &&
- (allocation_throughput < kLowAllocationThroughput))) {
- new_space_->Shrink();
- new_lo_space_->SetCapacity(new_space_->Capacity());
- }
+void Heap::ReduceNewSpaceSize() {
+ if (!ShouldReduceNewSpaceSize()) return;
+
+ // MinorMC shrinks new space as part of sweeping.
+ if (!v8_flags.minor_mc) new_space_->Shrink();
+ new_lo_space_->SetCapacity(new_space_->Capacity());
}
size_t Heap::NewSpaceSize() { return new_space() ? new_space()->Size() : 0; }
@@ -3874,22 +3811,46 @@ void Heap::NotifyObjectLayoutChange(
#endif
}
-void Heap::NotifyObjectSizeChange(HeapObject object, int old_size, int new_size,
- ClearRecordedSlots clear_recorded_slots) {
+void Heap::NotifyObjectSizeChange(
+ HeapObject object, int old_size, int new_size,
+ ClearRecordedSlots clear_recorded_slots,
+ enum UpdateInvalidatedObjectSize update_invalidated_object_size) {
+ old_size = ALIGN_TO_ALLOCATION_ALIGNMENT(old_size);
+ new_size = ALIGN_TO_ALLOCATION_ALIGNMENT(new_size);
DCHECK_LE(new_size, old_size);
if (new_size == old_size) return;
- UpdateInvalidatedObjectSize(object, new_size);
+ const bool is_main_thread = LocalHeap::Current() == nullptr;
- const bool is_background = LocalHeap::Current() != nullptr;
- DCHECK_IMPLIES(is_background,
+ DCHECK_IMPLIES(!is_main_thread,
clear_recorded_slots == ClearRecordedSlots::kNo);
+ DCHECK_IMPLIES(!is_main_thread, update_invalidated_object_size ==
+ UpdateInvalidatedObjectSize::kNo);
+
+ if (update_invalidated_object_size == UpdateInvalidatedObjectSize::kYes) {
+ UpdateInvalidatedObjectSize(object, new_size);
+ } else {
+ DCHECK_EQ(update_invalidated_object_size, UpdateInvalidatedObjectSize::kNo);
- const VerifyNoSlotsRecorded verify_no_slots_recorded =
- is_background ? VerifyNoSlotsRecorded::kNo : VerifyNoSlotsRecorded::kYes;
+#if DEBUG
+ if (is_main_thread) {
+ // When running on the main thread we can actually DCHECK that this object
+ // wasn't recorded in the invalidated_slots map yet.
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
+ DCHECK(
+ !chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_SHARED>(object));
+ DCHECK_IMPLIES(
+ incremental_marking()->IsCompacting(),
+ !chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
+ }
+#endif
+ }
+
+ const auto verify_no_slots_recorded =
+ is_main_thread ? VerifyNoSlotsRecorded::kYes : VerifyNoSlotsRecorded::kNo;
- const ClearFreedMemoryMode clear_memory_mode =
- ClearFreedMemoryMode::kDontClearFreedMemory;
+ const auto clear_memory_mode = ClearFreedMemoryMode::kDontClearFreedMemory;
const Address filler = object.address() + new_size;
const int filler_size = old_size - new_size;
@@ -3900,7 +3861,12 @@ void Heap::NotifyObjectSizeChange(HeapObject object, int old_size, int new_size,
void Heap::UpdateInvalidatedObjectSize(HeapObject object, int new_size) {
if (!MayContainRecordedSlots(object)) return;
- if (incremental_marking()->IsCompacting()) {
+ // Updating invalidated_slots is unsychronized and thus needs to happen on the
+ // main thread.
+ DCHECK_NULL(LocalHeap::Current());
+ DCHECK_EQ(isolate()->thread_id(), ThreadId::Current());
+
+ if (incremental_marking()->IsCompacting() || gc_state() == MARK_COMPACT) {
MemoryChunk::FromHeapObject(object)
->UpdateInvalidatedObjectSize<OLD_TO_OLD>(object, new_size);
}
@@ -4298,12 +4264,16 @@ bool Heap::Contains(HeapObject value) const {
if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
return false;
}
- return HasBeenSetUp() &&
- ((new_space_ && new_space_->Contains(value)) ||
- old_space_->Contains(value) || code_space_->Contains(value) ||
- (map_space_ && map_space_->Contains(value)) ||
- lo_space_->Contains(value) || code_lo_space_->Contains(value) ||
- (new_lo_space_ && new_lo_space_->Contains(value)));
+
+ if (!HasBeenSetUp()) return false;
+
+ return (new_space_ && new_space_->Contains(value)) ||
+ old_space_->Contains(value) || code_space_->Contains(value) ||
+ (map_space_ && map_space_->Contains(value)) ||
+ (shared_space_ && shared_space_->Contains(value)) ||
+ lo_space_->Contains(value) || code_lo_space_->Contains(value) ||
+ (new_lo_space_ && new_lo_space_->Contains(value)) ||
+ (shared_lo_space_ && shared_lo_space_->Contains(value));
}
bool Heap::ContainsCode(HeapObject value) const {
@@ -4319,9 +4289,14 @@ bool Heap::ContainsCode(HeapObject value) const {
}
bool Heap::SharedHeapContains(HeapObject value) const {
- if (shared_old_space_)
- return shared_old_space_->Contains(value) ||
- (shared_map_space_ && shared_map_space_->Contains(value));
+ if (shared_allocation_space_) {
+ if (shared_allocation_space_->Contains(value)) return true;
+ if (shared_lo_allocation_space_->Contains(value)) return true;
+ if (shared_map_allocation_space_ &&
+ shared_map_allocation_space_->Contains(value))
+ return true;
+ }
+
return false;
}
@@ -4352,19 +4327,27 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
case MAP_SPACE:
DCHECK(map_space_);
return map_space_->Contains(value);
+ case SHARED_SPACE:
+ return shared_space_->Contains(value);
case LO_SPACE:
return lo_space_->Contains(value);
case CODE_LO_SPACE:
return code_lo_space_->Contains(value);
case NEW_LO_SPACE:
return new_lo_space_->Contains(value);
+ case SHARED_LO_SPACE:
+ return shared_lo_space_->Contains(value);
case RO_SPACE:
return ReadOnlyHeap::Contains(value);
}
UNREACHABLE();
}
-bool Heap::IsShared() { return isolate()->is_shared(); }
+bool Heap::IsShared() const { return isolate()->is_shared(); }
+
+bool Heap::ShouldMarkSharedHeap() const {
+ return isolate()->is_shared() || isolate()->is_shared_space_isolate();
+}
bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
@@ -4382,12 +4365,16 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
case MAP_SPACE:
DCHECK(map_space_);
return map_space_->ContainsSlow(addr);
+ case SHARED_SPACE:
+ return shared_space_->ContainsSlow(addr);
case LO_SPACE:
return lo_space_->ContainsSlow(addr);
case CODE_LO_SPACE:
return code_lo_space_->ContainsSlow(addr);
case NEW_LO_SPACE:
return new_lo_space_->ContainsSlow(addr);
+ case SHARED_LO_SPACE:
+ return shared_lo_space_->ContainsSlow(addr);
case RO_SPACE:
return read_only_space_->ContainsSlow(addr);
}
@@ -4400,9 +4387,11 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
case OLD_SPACE:
case CODE_SPACE:
case MAP_SPACE:
+ case SHARED_SPACE:
case LO_SPACE:
case NEW_LO_SPACE:
case CODE_LO_SPACE:
+ case SHARED_LO_SPACE:
case RO_SPACE:
return true;
default:
@@ -4426,7 +4415,7 @@ void Heap::VerifyCountersBeforeConcurrentSweeping() {
// We need to refine the counters on pages that are already swept and have
// not been moved over to the actual space. Otherwise, the AccountingStats
// are just an over approximation.
- space->RefillFreeList(mark_compact_collector()->sweeper());
+ space->RefillFreeList();
space->VerifyCountersBeforeConcurrentSweeping();
}
}
@@ -4689,13 +4678,15 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
SerializerDeserializer::IterateStartupObjectCache(isolate_, v);
v->Synchronize(VisitorSynchronization::kStartupObjectCache);
- // When shared_isolate() is null, isolate_ is either an unshared (instead of
- // a client) Isolate or the shared Isolate. In both cases isolate_ owns its
- // shared heap object cache and should iterate it.
+ // Iterate over shared heap object cache when the isolate owns this data
+ // structure. Isolates which own the shared heap object cache are:
+ // * Shared isolate
+ // * Shared space/main isolate
+ // * All isolates which do not use the shared heap feature.
//
- // When shared_isolate() is not null, isolate_ is a client Isolate, does not
- // own its shared heap object cache, and should not iterate it.
- if (isolate_->shared_isolate() == nullptr) {
+ // However, worker/client isolates do not own the shared heap object cache
+ // and should not iterate it.
+ if (isolate_->is_shared_heap_isolate() || !isolate_->has_shared_heap()) {
SerializerDeserializer::IterateSharedHeapObjectCache(isolate_, v);
v->Synchronize(VisitorSynchronization::kSharedHeapObjectCache);
}
@@ -4706,13 +4697,56 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
}
}
+class ClientRootVisitor : public RootVisitor {
+ public:
+ explicit ClientRootVisitor(RootVisitor* actual_visitor)
+ : actual_visitor_(actual_visitor) {}
+
+ void VisitRootPointers(Root root, const char* description,
+ FullObjectSlot start, FullObjectSlot end) final {
+ for (FullObjectSlot p = start; p < end; ++p) {
+ MaybeForwardSlot(root, description, p);
+ }
+ }
+
+ void VisitRootPointers(Root root, const char* description,
+ OffHeapObjectSlot start, OffHeapObjectSlot end) final {
+ actual_visitor_->VisitRootPointers(root, description, start, end);
+ }
+
+ void VisitRunningCode(FullObjectSlot slot) final {
+#if DEBUG
+ HeapObject object = HeapObject::cast(*slot);
+ DCHECK(!object.InSharedWritableHeap());
+#endif
+ }
+
+ void Synchronize(VisitorSynchronization::SyncTag tag) final {
+ actual_visitor_->Synchronize(tag);
+ }
+
+ private:
+ void MaybeForwardSlot(Root root, const char* description,
+ FullObjectSlot slot) {
+ Object object = *slot;
+ if (!object.IsHeapObject()) return;
+ HeapObject heap_object = HeapObject::cast(object);
+ if (heap_object.InSharedWritableHeap()) {
+ actual_visitor_->VisitRootPointer(root, description, slot);
+ }
+ }
+
+ RootVisitor* const actual_visitor_;
+};
+
void Heap::IterateRootsIncludingClients(RootVisitor* v,
base::EnumSet<SkipRoot> options) {
IterateRoots(v, options);
- if (isolate()->is_shared()) {
+ if (isolate()->is_shared_heap_isolate()) {
+ ClientRootVisitor client_root_visitor(v);
isolate()->global_safepoint()->IterateClientIsolates(
- [v, options](Isolate* client) {
+ [v = &client_root_visitor, options](Isolate* client) {
client->heap()->IterateRoots(v, options);
});
}
@@ -4720,9 +4754,12 @@ void Heap::IterateRootsIncludingClients(RootVisitor* v,
void Heap::IterateRootsFromStackIncludingClient(RootVisitor* v) {
IterateStackRoots(v);
- if (isolate()->is_shared()) {
+ if (isolate()->is_shared_heap_isolate()) {
+ ClientRootVisitor client_root_visitor(v);
isolate()->global_safepoint()->IterateClientIsolates(
- [v](Isolate* client) { client->heap()->IterateStackRoots(v); });
+ [v = &client_root_visitor](Isolate* client) {
+ client->heap()->IterateStackRoots(v);
+ });
}
}
@@ -4915,7 +4952,7 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
DCHECK(kMaxRegularHeapObjectSize >=
(JSArray::kHeaderSize +
FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
- AllocationMemento::kSize));
+ ALIGN_TO_ALLOCATION_ALIGNMENT(AllocationMemento::kSize)));
code_range_size_ = constraints.code_range_size_in_bytes();
@@ -5344,6 +5381,8 @@ void Heap::SetUp(LocalHeap* main_thread_local_heap) {
memory_allocator_.reset(
new MemoryAllocator(isolate_, code_page_allocator, MaxReserved()));
+ sweeper_.reset(new Sweeper(this));
+
mark_compact_collector_.reset(new MarkCompactCollector(this));
scavenger_collector_.reset(new ScavengerCollector(this));
@@ -5359,10 +5398,6 @@ void Heap::SetUp(LocalHeap* main_thread_local_heap) {
concurrent_marking_.reset(new ConcurrentMarking(this, nullptr));
}
- for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
- space_[i] = nullptr;
- }
-
// Set up layout tracing callback.
if (V8_UNLIKELY(v8_flags.trace_gc_heap_layout)) {
v8::GCType gc_type = kGCTypeMarkSweepCompact;
@@ -5381,7 +5416,7 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
DCHECK_NOT_NULL(ro_heap);
DCHECK_IMPLIES(read_only_space_ != nullptr,
read_only_space_ == ro_heap->read_only_space());
- space_[RO_SPACE] = nullptr;
+ DCHECK_NULL(space_[RO_SPACE].get());
read_only_space_ = ro_heap->read_only_space();
heap_allocator_.SetReadOnlySpace(read_only_space_);
}
@@ -5424,24 +5459,50 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info,
const bool has_young_gen = !v8_flags.single_generation && !IsShared();
if (has_young_gen) {
if (v8_flags.minor_mc) {
- space_[NEW_SPACE] = new_space_ =
- new PagedNewSpace(this, initial_semispace_size_, max_semi_space_size_,
- new_allocation_info);
+ space_[NEW_SPACE] = std::make_unique<PagedNewSpace>(
+ this, initial_semispace_size_, max_semi_space_size_,
+ new_allocation_info);
} else {
- space_[NEW_SPACE] = new_space_ =
- new SemiSpaceNewSpace(this, initial_semispace_size_,
- max_semi_space_size_, new_allocation_info);
+ space_[NEW_SPACE] = std::make_unique<SemiSpaceNewSpace>(
+ this, initial_semispace_size_, max_semi_space_size_,
+ new_allocation_info);
}
- space_[NEW_LO_SPACE] = new_lo_space_ =
- new NewLargeObjectSpace(this, NewSpaceCapacity());
+ new_space_ = static_cast<NewSpace*>(space_[NEW_SPACE].get());
+
+ space_[NEW_LO_SPACE] =
+ std::make_unique<NewLargeObjectSpace>(this, NewSpaceCapacity());
+ new_lo_space_ =
+ static_cast<NewLargeObjectSpace*>(space_[NEW_LO_SPACE].get());
}
- space_[OLD_SPACE] = old_space_ = new OldSpace(this, old_allocation_info);
- space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
+
+ space_[OLD_SPACE] = std::make_unique<OldSpace>(this, old_allocation_info);
+ old_space_ = static_cast<OldSpace*>(space_[OLD_SPACE].get());
+
+ space_[CODE_SPACE] = std::make_unique<CodeSpace>(this);
+ code_space_ = static_cast<CodeSpace*>(space_[CODE_SPACE].get());
+
if (v8_flags.use_map_space) {
- space_[MAP_SPACE] = map_space_ = new MapSpace(this);
+ space_[MAP_SPACE] = std::make_unique<MapSpace>(this);
+ map_space_ = static_cast<MapSpace*>(space_[MAP_SPACE].get());
+ }
+
+ if (isolate()->is_shared_space_isolate()) {
+ space_[SHARED_SPACE] = std::make_unique<SharedSpace>(this);
+ shared_space_ = static_cast<SharedSpace*>(space_[SHARED_SPACE].get());
+ }
+
+ space_[LO_SPACE] = std::make_unique<OldLargeObjectSpace>(this);
+ lo_space_ = static_cast<OldLargeObjectSpace*>(space_[LO_SPACE].get());
+
+ space_[CODE_LO_SPACE] = std::make_unique<CodeLargeObjectSpace>(this);
+ code_lo_space_ =
+ static_cast<CodeLargeObjectSpace*>(space_[CODE_LO_SPACE].get());
+
+ if (isolate()->is_shared_space_isolate()) {
+ space_[SHARED_LO_SPACE] = std::make_unique<SharedLargeObjectSpace>(this);
+ shared_lo_space_ =
+ static_cast<SharedLargeObjectSpace*>(space_[SHARED_LO_SPACE].get());
}
- space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
- space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
i++) {
@@ -5475,12 +5536,12 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info,
}
if (new_space()) {
- if (v8_flags.concurrent_minor_mc) {
+ if (v8_flags.concurrent_minor_mc_marking) {
// TODO(v8:13012): Atomic MinorMC should not use ScavengeJob. Instead, we
// should schedule MinorMC tasks at a soft limit, which are used by atomic
// MinorMC, and to finalize concurrent MinorMC. The condition
- // v8_flags.concurrent_minor_mc can then be changed to v8_flags.minor_mc
- // (here and at the RemoveAllocationObserver call site).
+ // v8_flags.concurrent_minor_mc_marking can then be changed to
+ // v8_flags.minor_mc (here and at the RemoveAllocationObserver call site).
minor_mc_task_observer_.reset(
new MinorMCTaskObserver(this, MinorMCTaskObserver::kStepSize));
new_space()->AddAllocationObserver(minor_mc_task_observer_.get());
@@ -5516,19 +5577,33 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info,
}
#endif // V8_HEAP_USE_PKU_JIT_WRITE_PROTECT
- if (isolate()->shared_isolate()) {
+ if (isolate()->shared_space_isolate()) {
+ Heap* heap = isolate()->shared_space_isolate()->heap();
+
+ shared_space_allocator_ = std::make_unique<ConcurrentAllocator>(
+ main_thread_local_heap(), heap->shared_space_);
+
+ DCHECK_NULL(shared_map_allocator_.get());
+
+ shared_allocation_space_ = heap->shared_space_;
+ shared_lo_allocation_space_ = heap->shared_lo_space_;
+ DCHECK(!v8_flags.use_map_space);
+ DCHECK_NULL(shared_map_allocation_space_);
+
+ } else if (isolate()->shared_isolate()) {
Heap* shared_heap = isolate()->shared_isolate()->heap();
- shared_old_space_ = shared_heap->old_space();
- shared_lo_space_ = shared_heap->lo_space();
- shared_old_allocator_.reset(
- new ConcurrentAllocator(main_thread_local_heap(), shared_old_space_));
+ shared_space_allocator_ = std::make_unique<ConcurrentAllocator>(
+ main_thread_local_heap(), shared_heap->old_space());
if (shared_heap->map_space()) {
- shared_map_space_ = shared_heap->map_space();
- shared_map_allocator_.reset(
- new ConcurrentAllocator(main_thread_local_heap(), shared_map_space_));
+ shared_map_allocator_ = std::make_unique<ConcurrentAllocator>(
+ main_thread_local_heap(), shared_heap->map_space());
}
+
+ shared_allocation_space_ = shared_heap->old_space();
+ shared_lo_allocation_space_ = shared_heap->lo_space();
+ shared_map_allocation_space_ = shared_heap->map_space();
}
main_thread_local_heap()->SetUpMainThread();
@@ -5668,8 +5743,8 @@ void Heap::AttachCppHeap(v8::CppHeap* cpp_heap) {
void Heap::DetachCppHeap() {
CppHeap::From(cpp_heap_)->DetachIsolate();
- cpp_heap_ = nullptr;
local_embedder_heap_tracer()->SetCppHeap(nullptr);
+ cpp_heap_ = nullptr;
}
const cppgc::EmbedderStackState* Heap::overriden_stack_state() const {
@@ -5738,6 +5813,17 @@ void Heap::StartTearDown() {
}
}
+void Heap::TearDownWithSharedHeap() {
+ DCHECK_EQ(gc_state(), TEAR_DOWN);
+
+ // Assert that there are no background threads left and no executable memory
+ // chunks are unprotected.
+ safepoint()->AssertMainThreadIsOnlyThread();
+
+ // Might use the external pointer which might be in the shared heap.
+ external_string_table_.TearDown();
+}
+
void Heap::TearDown() {
DCHECK_EQ(gc_state(), TEAR_DOWN);
@@ -5763,9 +5849,11 @@ void Heap::TearDown() {
}
if (new_space()) {
- if (v8_flags.concurrent_minor_mc) {
+ if (minor_mc_task_observer_) {
+ DCHECK_NULL(scavenge_task_observer_);
new_space()->RemoveAllocationObserver(minor_mc_task_observer_.get());
} else {
+ DCHECK_NOT_NULL(scavenge_task_observer_);
new_space()->RemoveAllocationObserver(scavenge_task_observer_.get());
}
}
@@ -5804,6 +5892,9 @@ void Heap::TearDown() {
minor_mark_compact_collector_.reset();
}
+ sweeper_->TearDown();
+ sweeper_.reset();
+
scavenger_collector_.reset();
array_buffer_sweeper_.reset();
incremental_marking_.reset();
@@ -5830,16 +5921,11 @@ void Heap::TearDown() {
cpp_heap_ = nullptr;
}
- external_string_table_.TearDown();
-
tracer_.reset();
- allocation_sites_to_pretenure_.reset();
-
- shared_old_space_ = nullptr;
- shared_old_allocator_.reset();
+ pretenuring_handler_.reset();
- shared_map_space_ = nullptr;
+ shared_space_allocator_.reset();
shared_map_allocator_.reset();
{
@@ -5847,8 +5933,7 @@ void Heap::TearDown() {
"Deletion of CODE_SPACE and CODE_LO_SPACE requires write access to "
"Code page headers");
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
- delete space_[i];
- space_[i] = nullptr;
+ space_[i].reset();
}
}
@@ -6060,6 +6145,7 @@ void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
// No need to update old-to-old here since that remembered set is gone
// after a full GC and not re-recorded until sweeping is finished.
RememberedSet<OLD_TO_NEW>::Remove(page, slot.address());
+ RememberedSet<OLD_TO_SHARED>::Remove(page, slot.address());
}
}
#endif
@@ -6090,6 +6176,7 @@ void Heap::VerifySlotRangeHasNoRecordedSlots(Address start, Address end) {
#ifndef V8_DISABLE_WRITE_BARRIERS
Page* page = Page::FromAddress(start);
RememberedSet<OLD_TO_NEW>::CheckNoneInRange(page, start, end);
+ RememberedSet<OLD_TO_SHARED>::CheckNoneInRange(page, start, end);
#endif
}
#endif
@@ -6104,6 +6191,8 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
if (!page->SweepingDone()) {
RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_SHARED>::RemoveRange(page, start, end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
}
}
#endif
@@ -6567,7 +6656,7 @@ void Heap::RemoveDirtyFinalizationRegistriesOnContext(NativeContext context) {
set_dirty_js_finalization_registries_list_tail(prev);
}
-void Heap::KeepDuringJob(Handle<JSReceiver> target) {
+void Heap::KeepDuringJob(Handle<HeapObject> target) {
DCHECK(weak_refs_keep_during_job().IsUndefined() ||
weak_refs_keep_during_job().IsOrderedHashSet());
Handle<OrderedHashSet> table;
@@ -6773,9 +6862,12 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
return dst == CODE_SPACE && type == CODE_TYPE;
case MAP_SPACE:
return dst == MAP_SPACE && type == MAP_TYPE;
+ case SHARED_SPACE:
+ return dst == SHARED_SPACE;
case LO_SPACE:
case CODE_LO_SPACE:
case NEW_LO_SPACE:
+ case SHARED_LO_SPACE:
case RO_SPACE:
return false;
}
@@ -7214,6 +7306,69 @@ void Heap::set_allocation_timeout(int allocation_timeout) {
}
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
+void Heap::FinishSweepingIfOutOfWork() {
+ if (sweeper()->sweeping_in_progress() && v8_flags.concurrent_sweeping &&
+ !sweeper()->AreSweeperTasksRunning()) {
+ // At this point we know that all concurrent sweeping tasks have run
+ // out of work and quit: all pages are swept. The main thread still needs
+ // to complete sweeping though.
+ EnsureSweepingCompleted(SweepingForcedFinalizationMode::kV8Only);
+ }
+ if (cpp_heap()) {
+ // Ensure that sweeping is also completed for the C++ managed heap, if one
+ // exists and it's out of work.
+ CppHeap::From(cpp_heap())->FinishSweepingIfOutOfWork();
+ }
+}
+
+void Heap::EnsureSweepingCompleted(SweepingForcedFinalizationMode mode) {
+ if (sweeper()->sweeping_in_progress()) {
+ TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
+ ThreadKind::kMain);
+
+ sweeper()->EnsureCompleted();
+ old_space()->RefillFreeList();
+ {
+ CodePageHeaderModificationScope rwx_write_scope(
+ "Updating per-page stats stored in page headers requires write "
+ "access to Code page headers");
+ code_space()->RefillFreeList();
+ }
+ if (shared_space()) {
+ shared_space()->RefillFreeList();
+ }
+ if (map_space()) {
+ map_space()->RefillFreeList();
+ map_space()->SortFreeList();
+ }
+
+ tracer()->NotifySweepingCompleted();
+
+#ifdef VERIFY_HEAP
+ if (v8_flags.verify_heap && !evacuation()) {
+ FullEvacuationVerifier verifier(this);
+ verifier.Run();
+ }
+#endif
+ }
+
+ if (mode == SweepingForcedFinalizationMode::kUnifiedHeap && cpp_heap()) {
+ // Ensure that sweeping is also completed for the C++ managed heap, if one
+ // exists.
+ CppHeap::From(cpp_heap())->FinishSweepingIfRunning();
+ DCHECK(!CppHeap::From(cpp_heap())->sweeper().IsSweepingInProgress());
+ }
+
+ DCHECK_IMPLIES(
+ mode == SweepingForcedFinalizationMode::kUnifiedHeap || !cpp_heap(),
+ !tracer()->IsSweepingInProgress());
+}
+
+void Heap::DrainSweepingWorklistForSpace(AllocationSpace space) {
+ if (!sweeper()->sweeping_in_progress()) return;
+ sweeper()->DrainSweepingWorklistForSpace(space);
+}
+
EmbedderStackStateScope::EmbedderStackStateScope(Heap* heap, Origin origin,
StackState stack_state)
: local_tracer_(heap->local_embedder_heap_tracer()),
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 6e270f246d..0cf23b5ef4 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -31,6 +31,9 @@
#include "src/heap/base/stack.h"
#include "src/heap/gc-callbacks.h"
#include "src/heap/heap-allocator.h"
+#include "src/heap/marking-state.h"
+#include "src/heap/pretenuring-handler.h"
+#include "src/heap/sweeper.h"
#include "src/init/heap-symbols.h"
#include "src/objects/allocation-site.h"
#include "src/objects/fixed-array.h"
@@ -98,8 +101,6 @@ class CppHeap;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
-template <typename T>
-class GlobalHandleVector;
class IsolateSafepoint;
class HeapObjectAllocationTracker;
class HeapObjectsFilter;
@@ -127,7 +128,9 @@ class SafepointScope;
class ScavengeJob;
class Scavenger;
class ScavengerCollector;
+class SharedLargeObjectSpace;
class SharedReadOnlySpace;
+class SharedSpace;
class Space;
class StressScavengeObserver;
class TimedHistogram;
@@ -140,6 +143,8 @@ enum ArrayStorageAllocationMode {
enum class ClearRecordedSlots { kYes, kNo };
+enum class UpdateInvalidatedObjectSize { kYes, kNo };
+
enum class InvalidateRecordedSlots { kYes, kNo };
enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
@@ -148,46 +153,6 @@ enum ExternalBackingStoreType { kArrayBuffer, kExternalString, kNumTypes };
enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
-// These values are persisted to logs. Entries should not be renumbered and
-// numeric values should never be reused. If you add new items here, update
-// src/tools/metrics/histograms/enums.xml in chromium.
-enum class GarbageCollectionReason : int {
- kUnknown = 0,
- kAllocationFailure = 1,
- kAllocationLimit = 2,
- kContextDisposal = 3,
- kCountersExtension = 4,
- kDebugger = 5,
- kDeserializer = 6,
- kExternalMemoryPressure = 7,
- kFinalizeMarkingViaStackGuard = 8,
- kFinalizeMarkingViaTask = 9,
- kFullHashtable = 10,
- kHeapProfiler = 11,
- kTask = 12,
- kLastResort = 13,
- kLowMemoryNotification = 14,
- kMakeHeapIterable = 15,
- kMemoryPressure = 16,
- kMemoryReducer = 17,
- kRuntime = 18,
- kSamplingProfiler = 19,
- kSnapshotCreator = 20,
- kTesting = 21,
- kExternalFinalize = 22,
- kGlobalAllocationLimit = 23,
- kMeasureMemory = 24,
- kBackgroundAllocationFailure = 25,
- kFinalizeMinorMC = 26,
- kCppHeapAllocationFailure = 27,
-
- kLastReason = kCppHeapAllocationFailure,
-};
-
-static_assert(kGarbageCollectionReasonMaxValue ==
- static_cast<int>(GarbageCollectionReason::kLastReason),
- "The value of kGarbageCollectionReasonMaxValue is inconsistent.");
-
enum class YoungGenerationHandling {
kRegularScavenge = 0,
kFastPromotionDuringScavenge = 1,
@@ -254,7 +219,6 @@ class Heap {
// and the key of the entry is in new-space. Such keys do not appear in the
// usual OLD_TO_NEW remembered set.
EphemeronRememberedSet ephemeron_remembered_set_;
- enum FindMementoMode { kForRuntime, kForGC };
enum class HeapGrowingMode { kSlow, kConservative, kMinimal, kDefault };
@@ -334,9 +298,6 @@ class Heap {
std::atomic<int64_t> low_since_mark_compact_{0};
};
- using PretenuringFeedbackMap =
- std::unordered_map<AllocationSite, size_t, Object::Hasher>;
-
// Taking this mutex prevents the GC from entering a phase that relocates
// object references.
base::Mutex* relocation_mutex() { return &relocation_mutex_; }
@@ -695,11 +656,6 @@ class Heap {
bool IsGCWithStack() const;
- // If an object has an AllocationMemento trailing it, return it, otherwise
- // return a null AllocationMemento.
- template <FindMementoMode mode>
- inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
-
// Performs GC after background allocation failure.
void CollectGarbageForBackground(LocalHeap* local_heap);
@@ -818,9 +774,9 @@ class Heap {
}
#if V8_ENABLE_WEBASSEMBLY
- // TODO(manoskouk): Inline this if STRONG_MUTABLE_MOVABLE_ROOT_LIST setters
- // become public.
- void EnsureWasmCanonicalRttsSize(int length);
+ // TODO(manoskouk): Consider inlining/moving this if
+ // STRONG_MUTABLE_MOVABLE_ROOT_LIST setters become public.
+ V8_EXPORT_PRIVATE void EnsureWasmCanonicalRttsSize(int length);
#endif
// ===========================================================================
@@ -861,6 +817,9 @@ class Heap {
// Sets the TearDown state, so no new GC tasks get posted.
void StartTearDown();
+ // Destroys all data that might require the shared heap.
+ void TearDownWithSharedHeap();
+
// Destroys all memory allocated by the heap.
void TearDown();
@@ -876,16 +835,23 @@ class Heap {
NewSpace* new_space() const { return new_space_; }
inline PagedNewSpace* paged_new_space() const;
OldSpace* old_space() const { return old_space_; }
- OldSpace* shared_old_space() const { return shared_old_space_; }
CodeSpace* code_space() const { return code_space_; }
+ SharedSpace* shared_space() const { return shared_space_; }
MapSpace* map_space() const { return map_space_; }
inline PagedSpace* space_for_maps();
OldLargeObjectSpace* lo_space() const { return lo_space_; }
- OldLargeObjectSpace* shared_lo_space() const { return shared_lo_space_; }
CodeLargeObjectSpace* code_lo_space() const { return code_lo_space_; }
+ SharedLargeObjectSpace* shared_lo_space() const { return shared_lo_space_; }
NewLargeObjectSpace* new_lo_space() const { return new_lo_space_; }
ReadOnlySpace* read_only_space() const { return read_only_space_; }
+ PagedSpace* shared_allocation_space() const {
+ return shared_allocation_space_;
+ }
+ OldLargeObjectSpace* shared_lo_allocation_space() const {
+ return shared_lo_allocation_space_;
+ }
+
inline PagedSpace* paged_space(int idx);
inline Space* space(int idx);
@@ -904,13 +870,11 @@ class Heap {
inline Isolate* isolate() const;
-#ifdef DEBUG
// Check if we run on isolate's main thread.
inline bool IsMainThread() const;
// Check if we run on the current main thread of the shared isolate during
// shared GC.
inline bool IsSharedMainThread() const;
-#endif
MarkCompactCollector* mark_compact_collector() {
return mark_compact_collector_.get();
@@ -920,6 +884,8 @@ class Heap {
return minor_mark_compact_collector_.get();
}
+ Sweeper* sweeper() { return sweeper_.get(); }
+
ArrayBufferSweeper* array_buffer_sweeper() {
return array_buffer_sweeper_.get();
}
@@ -991,7 +957,7 @@ class Heap {
return is_finalization_registry_cleanup_task_posted_;
}
- V8_EXPORT_PRIVATE void KeepDuringJob(Handle<JSReceiver> target);
+ V8_EXPORT_PRIVATE void KeepDuringJob(Handle<HeapObject> target);
void ClearKeptObjects();
// ===========================================================================
@@ -1030,8 +996,14 @@ class Heap {
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Performs garbage collection operation for the shared heap.
- V8_EXPORT_PRIVATE void CollectSharedGarbage(
- GarbageCollectionReason gc_reason);
+ V8_EXPORT_PRIVATE bool CollectGarbageShared(
+ LocalHeap* local_heap, GarbageCollectionReason gc_reason);
+
+ // Requests garbage collection from some other thread.
+ V8_EXPORT_PRIVATE bool CollectGarbageFromAnyThread(
+ LocalHeap* local_heap,
+ GarbageCollectionReason gc_reason =
+ GarbageCollectionReason::kBackgroundAllocationFailure);
// Reports and external memory pressure event, either performs a major GC or
// completes incremental marking in order to free external resources.
@@ -1125,7 +1097,7 @@ class Heap {
void CompleteSweepingYoung(GarbageCollector collector);
// Ensures that sweeping is finished for that object's page.
- void EnsureSweepingCompleted(HeapObject object);
+ void EnsureSweepingCompletedForObject(HeapObject object);
IncrementalMarking* incremental_marking() const {
return incremental_marking_.get();
@@ -1151,8 +1123,11 @@ class Heap {
// The runtime uses this function to inform the GC of object size changes. The
// GC will fill this area with a filler object and might clear recorded slots
// in that area.
- void NotifyObjectSizeChange(HeapObject, int old_size, int new_size,
- ClearRecordedSlots clear_recorded_slots);
+ void NotifyObjectSizeChange(
+ HeapObject, int old_size, int new_size,
+ ClearRecordedSlots clear_recorded_slots,
+ UpdateInvalidatedObjectSize update_invalidated_object_size =
+ UpdateInvalidatedObjectSize::kYes);
// ===========================================================================
// Deoptimization support API. ===============================================
@@ -1169,8 +1144,6 @@ class Heap {
void DeoptMarkedAllocationSites();
- bool DeoptMaybeTenuredAllocationSites();
-
// ===========================================================================
// Embedder heap tracer support. =============================================
// ===========================================================================
@@ -1202,7 +1175,7 @@ class Heap {
V8_EXPORT_PRIVATE void SetStackStart(void* stack_start);
- ::heap::base::Stack& stack();
+ V8_EXPORT_PRIVATE ::heap::base::Stack& stack();
// ===========================================================================
// Embedder roots optimizations. =============================================
@@ -1274,7 +1247,8 @@ class Heap {
V8_EXPORT_PRIVATE bool InSpace(HeapObject value, AllocationSpace space) const;
// Returns true when this heap is shared.
- V8_EXPORT_PRIVATE bool IsShared();
+ V8_EXPORT_PRIVATE bool IsShared() const;
+ V8_EXPORT_PRIVATE bool ShouldMarkSharedHeap() const;
// Slow methods that can be used for verification as they can also be used
// with off-heap Addresses.
@@ -1393,15 +1367,15 @@ class Heap {
}
inline size_t promoted_objects_size() { return promoted_objects_size_; }
- inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) {
- semi_space_copied_object_size_ += object_size;
+ inline void IncrementNewSpaceSurvivingObjectSize(size_t object_size) {
+ new_space_surviving_object_size_ += object_size;
}
- inline size_t semi_space_copied_object_size() {
- return semi_space_copied_object_size_;
+ inline size_t new_space_surviving_object_size() {
+ return new_space_surviving_object_size_;
}
inline size_t SurvivedYoungObjectSize() {
- return promoted_objects_size_ + semi_space_copied_object_size_;
+ return promoted_objects_size_ + new_space_surviving_object_size_;
}
inline void IncrementNodesDiedInNewSpace(int count) {
@@ -1413,7 +1387,6 @@ class Heap {
inline void IncrementNodesPromoted() { nodes_promoted_++; }
inline void IncrementYoungSurvivorsCounter(size_t survived) {
- survived_last_scavenge_ = survived;
survived_since_last_expansion_ += survived;
}
@@ -1532,27 +1505,6 @@ class Heap {
const std::function<void*(size_t)>& allocate, size_t byte_length);
// ===========================================================================
- // Allocation site tracking. =================================================
- // ===========================================================================
-
- // Updates the AllocationSite of a given {object}. The entry (including the
- // count) is cached on the local pretenuring feedback.
- inline void UpdateAllocationSite(
- Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback);
-
- // Merges local pretenuring feedback into the global one. Note that this
- // method needs to be called after evacuation, as allocation sites may be
- // evacuated and this method resolves forward pointers accordingly.
- void MergeAllocationSitePretenuringFeedback(
- const PretenuringFeedbackMap& local_pretenuring_feedback);
-
- // Adds an allocation site to the list of sites to be pretenured during the
- // next collection. Added allocation sites are pretenured independent of
- // their feedback.
- V8_EXPORT_PRIVATE void PretenureAllocationSiteOnNextCollection(
- AllocationSite site);
-
- // ===========================================================================
// Allocation tracking. ======================================================
// ===========================================================================
@@ -1627,6 +1579,28 @@ class Heap {
// it supports a forwarded map. Fails if the map is not the code map.
Map GcSafeMapOfCodeSpaceObject(HeapObject object);
+ // ===========================================================================
+ // Sweeping. =================================================================
+ // ===========================================================================
+
+ bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
+
+ void FinishSweepingIfOutOfWork();
+
+ enum class SweepingForcedFinalizationMode { kUnifiedHeap, kV8Only };
+
+ // Ensures that sweeping is finished.
+ //
+ // Note: Can only be called safely from main thread.
+ V8_EXPORT_PRIVATE void EnsureSweepingCompleted(
+ SweepingForcedFinalizationMode mode);
+
+ void DrainSweepingWorklistForSpace(AllocationSpace space);
+
+ void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
+
+ bool evacuation() const { return evacuation_; }
+
// =============================================================================
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
@@ -1683,6 +1657,16 @@ class Heap {
return (current_gc_flags_ & kReduceMemoryFootprintMask) != 0;
}
+ MarkingState* marking_state() { return &marking_state_; }
+
+ NonAtomicMarkingState* non_atomic_marking_state() {
+ return &non_atomic_marking_state_;
+ }
+
+ AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; }
+
+ PretenturingHandler* pretenuring_handler() { return &pretenuring_handler_; }
+
private:
class AllocationTrackerForDebugging;
@@ -1763,8 +1747,6 @@ class Heap {
static const int kMaxMarkCompactsInIdleRound = 7;
- static const int kInitialFeedbackCapacity = 256;
-
Heap();
~Heap();
@@ -1802,6 +1784,10 @@ class Heap {
// Free all shared LABs of main thread.
void FreeMainThreadSharedLinearAllocationAreas();
+ // Enables/Disables black allocation in shared LABs.
+ void MarkSharedLinearAllocationAreasBlack();
+ void UnmarkSharedLinearAllocationAreas();
+
// Performs garbage collection in a safepoint.
// Returns the number of freed global handles.
size_t PerformGarbageCollection(
@@ -1869,6 +1855,7 @@ class Heap {
bool HasLowOldGenerationAllocationRate();
bool HasLowEmbedderAllocationRate();
+ bool ShouldReduceNewSpaceSize() const;
void ReduceNewSpaceSize();
GCIdleTimeHeapState ComputeHeapState();
@@ -1901,18 +1888,6 @@ class Heap {
void InvokeIncrementalMarkingEpilogueCallbacks();
// ===========================================================================
- // Pretenuring. ==============================================================
- // ===========================================================================
-
- // Pretenuring decisions are made based on feedback collected during new space
- // evacuation. Note that between feedback collection and calling this method
- // object in old space must not move.
- void ProcessPretenuringFeedback();
-
- // Removes an entry from the global pretenuring storage.
- void RemoveAllocationSitePretenuringFeedback(AllocationSite site);
-
- // ===========================================================================
// Actual GC. ================================================================
// ===========================================================================
@@ -1962,7 +1937,7 @@ class Heap {
void UpdateTotalGCTime(double duration);
- bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
+ bool MaximumSizeMinorGC() { return maximum_size_minor_gcs_ > 0; }
bool IsIneffectiveMarkCompact(size_t old_generation_size,
double mutator_utilization);
@@ -2173,9 +2148,6 @@ class Heap {
// scavenge since last new space expansion.
size_t survived_since_last_expansion_ = 0;
- // ... and since the last scavenge.
- size_t survived_last_scavenge_ = 0;
-
// This is not the depth of nested AlwaysAllocateScope's but rather a single
// count, as scopes can be acquired from multiple tasks (read: threads).
std::atomic<size_t> always_allocate_scope_count_{0};
@@ -2190,24 +2162,30 @@ class Heap {
// For keeping track of context disposals.
int contexts_disposed_ = 0;
+ // Spaces owned by this heap through space_.
NewSpace* new_space_ = nullptr;
OldSpace* old_space_ = nullptr;
CodeSpace* code_space_ = nullptr;
MapSpace* map_space_ = nullptr;
+ SharedSpace* shared_space_ = nullptr;
OldLargeObjectSpace* lo_space_ = nullptr;
CodeLargeObjectSpace* code_lo_space_ = nullptr;
NewLargeObjectSpace* new_lo_space_ = nullptr;
+ SharedLargeObjectSpace* shared_lo_space_ = nullptr;
ReadOnlySpace* read_only_space_ = nullptr;
- OldSpace* shared_old_space_ = nullptr;
- OldLargeObjectSpace* shared_lo_space_ = nullptr;
- MapSpace* shared_map_space_ = nullptr;
+ // Either pointer to owned shared spaces or pointer to unowned shared spaces
+ // in another isolate.
+ PagedSpace* shared_allocation_space_ = nullptr;
+ OldLargeObjectSpace* shared_lo_allocation_space_ = nullptr;
+ PagedSpace* shared_map_allocation_space_ = nullptr;
- std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
+ // Allocators for the shared spaces.
+ std::unique_ptr<ConcurrentAllocator> shared_space_allocator_;
std::unique_ptr<ConcurrentAllocator> shared_map_allocator_;
// Map from the space id to the space.
- Space* space_[LAST_SPACE + 1];
+ std::unique_ptr<Space> space_[LAST_SPACE + 1];
LocalHeap* main_thread_local_heap_ = nullptr;
@@ -2284,9 +2262,9 @@ class Heap {
size_t promoted_objects_size_ = 0;
double promotion_ratio_ = 0.0;
double promotion_rate_ = 0.0;
- size_t semi_space_copied_object_size_ = 0;
- size_t previous_semi_space_copied_object_size_ = 0;
- double semi_space_copied_rate_ = 0.0;
+ size_t new_space_surviving_object_size_ = 0;
+ size_t previous_new_space_surviving_object_size_ = 0;
+ double new_space_surviving_rate_ = 0.0;
int nodes_died_in_new_space_ = 0;
int nodes_copied_in_new_space_ = 0;
int nodes_promoted_ = 0;
@@ -2295,7 +2273,7 @@ class Heap {
// tenure state. When we switched to the maximum new space size we deoptimize
// the code that belongs to the allocation site and derive the lifetime
// of the allocation site.
- unsigned int maximum_size_scavenges_ = 0;
+ unsigned int maximum_size_minor_gcs_ = 0;
// Total time spent in GC.
double total_gc_time_ms_ = 0.0;
@@ -2304,6 +2282,7 @@ class Heap {
double last_gc_time_ = 0.0;
std::unique_ptr<GCTracer> tracer_;
+ std::unique_ptr<Sweeper> sweeper_;
std::unique_ptr<MarkCompactCollector> mark_compact_collector_;
std::unique_ptr<MinorMarkCompactCollector> minor_mark_compact_collector_;
std::unique_ptr<ScavengerCollector> scavenger_collector_;
@@ -2359,16 +2338,6 @@ class Heap {
// The size of global memory after the last MarkCompact GC.
size_t global_memory_at_last_gc_ = 0;
- // The feedback storage is used to store allocation sites (keys) and how often
- // they have been visited (values) by finding a memento behind an object. The
- // storage is only alive temporary during a GC. The invariant is that all
- // pointers in this map are already fixed, i.e., they do not point to
- // forwarding pointers.
- PretenuringFeedbackMap global_pretenuring_feedback_;
-
- std::unique_ptr<GlobalHandleVector<AllocationSite>>
- allocation_sites_to_pretenure_;
-
char trace_ring_buffer_[kTraceRingBufferSize];
// If it's not full then the data is from 0 to ring_buffer_end_. If it's
@@ -2433,12 +2402,22 @@ class Heap {
bool is_finalization_registry_cleanup_task_posted_ = false;
+ bool evacuation_ = false;
+
std::unique_ptr<third_party_heap::Heap> tp_heap_;
+ MarkingState marking_state_;
+ NonAtomicMarkingState non_atomic_marking_state_;
+ AtomicMarkingState atomic_marking_state_;
+
+ PretenturingHandler pretenuring_handler_;
+
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
friend class ArrayBufferCollector;
friend class ArrayBufferSweeper;
+ friend class CollectorBase;
+ friend class ConcurrentAllocator;
friend class ConcurrentMarking;
friend class EvacuateVisitorBase;
friend class GCCallbacksScope;
@@ -2468,6 +2447,7 @@ class Heap {
friend class ObjectStatsCollector;
friend class Page;
friend class PagedSpaceBase;
+ friend class PretenturingHandler;
friend class ReadOnlyRoots;
friend class Scavenger;
friend class ScavengerCollector;
@@ -2887,6 +2867,18 @@ class V8_NODISCARD CppClassNamesAsHeapObjectNameScope final {
std::unique_ptr<cppgc::internal::ClassNameAsHeapObjectNameScope> scope_;
};
+class V8_NODISCARD EvacuationScope {
+ public:
+ explicit EvacuationScope(Heap* heap) : heap_(heap) {
+ heap_->set_evacuation(true);
+ }
+
+ ~EvacuationScope() { heap_->set_evacuation(false); }
+
+ private:
+ Heap* const heap_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index 797026352d..cc413f2b82 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -8,6 +8,7 @@
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/marking-state-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 780bf08da7..6f633f0733 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -58,9 +58,8 @@ IncrementalMarking::IncrementalMarking(Heap* heap, WeakObjects* weak_objects)
incremental_marking_job_(heap),
new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
old_generation_observer_(this, kOldGenerationAllocatedThreshold),
- marking_state_(heap->isolate()),
- atomic_marking_state_(heap->isolate()),
- non_atomic_marking_state_(heap->isolate()) {}
+ marking_state_(heap->marking_state()),
+ atomic_marking_state_(heap->atomic_marking_state()) {}
void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
HeapObject obj) {
@@ -131,7 +130,7 @@ bool IncrementalMarking::IsBelowActivationThresholds() const {
void IncrementalMarking::Start(GarbageCollector garbage_collector,
GarbageCollectionReason gc_reason) {
- DCHECK(!major_collector_->sweeping_in_progress());
+ DCHECK(!heap_->sweeping_in_progress());
DCHECK(!heap_->IsShared());
if (v8_flags.trace_incremental_marking) {
@@ -160,15 +159,22 @@ void IncrementalMarking::Start(GarbageCollector garbage_collector,
Counters* counters = heap_->isolate()->counters();
- counters->incremental_marking_reason()->AddSample(
- static_cast<int>(gc_reason));
+ const bool is_major = garbage_collector == GarbageCollector::MARK_COMPACTOR;
+ if (is_major) {
+ // Reasons are only reported for major GCs
+ counters->incremental_marking_reason()->AddSample(
+ static_cast<int>(gc_reason));
+ }
NestedTimedHistogramScope incremental_marking_scope(
- counters->gc_incremental_marking_start());
- TRACE_EVENT1(
- "v8", "V8.GCIncrementalMarkingStart", "epoch",
- heap_->tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL_START));
- TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_START,
- ThreadKind::kMain);
+ is_major ? counters->gc_incremental_marking_start()
+ : counters->gc_minor_incremental_marking_start());
+ const auto scope_id = is_major ? GCTracer::Scope::MC_INCREMENTAL_START
+ : GCTracer::Scope::MINOR_MC_INCREMENTAL_START;
+ TRACE_EVENT1("v8",
+ is_major ? "V8.GCIncrementalMarkingStart"
+ : "V8.GCMinorIncrementalMarkingStart",
+ "epoch", heap_->tracer()->CurrentEpoch(scope_id));
+ TRACE_GC_EPOCH(heap()->tracer(), scope_id, ThreadKind::kMain);
heap_->tracer()->NotifyIncrementalMarkingStart();
start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
@@ -181,7 +187,7 @@ void IncrementalMarking::Start(GarbageCollector garbage_collector,
schedule_update_time_ms_ = start_time_ms_;
bytes_marked_concurrently_ = 0;
- if (garbage_collector == GarbageCollector::MARK_COMPACTOR) {
+ if (is_major) {
current_collector_ = CurrentCollector::kMajorMC;
StartMarkingMajor();
heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
@@ -266,7 +272,20 @@ void IncrementalMarking::MarkRoots() {
heap()->isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
&visitor);
- // TODO(v8:13012): Do PageMarkingItem processing.
+
+ std::vector<PageMarkingItem> marking_items;
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap_, [&marking_items](MemoryChunk* chunk) {
+ marking_items.emplace_back(chunk);
+ });
+
+ V8::GetCurrentPlatform()
+ ->CreateJob(
+ v8::TaskPriority::kUserBlocking,
+ std::make_unique<YoungGenerationMarkingJob>(
+ heap_->isolate(), heap_, minor_collector_->marking_worklists(),
+ std::move(marking_items), YoungMarkingJobType::kIncremental))
+ ->Join();
}
}
@@ -389,6 +408,13 @@ void IncrementalMarking::StartBlackAllocation() {
"Marking Code objects requires write access to the Code page header");
heap()->code_space()->MarkLinearAllocationAreaBlack();
}
+ if (heap()->isolate()->is_shared_heap_isolate()) {
+ DCHECK_EQ(heap()->shared_space()->top(), kNullAddress);
+ heap()->isolate()->global_safepoint()->IterateClientIsolates(
+ [](Isolate* client) {
+ client->heap()->MarkSharedLinearAllocationAreasBlack();
+ });
+ }
heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MarkLinearAllocationAreaBlack();
});
@@ -407,6 +433,13 @@ void IncrementalMarking::PauseBlackAllocation() {
"Marking Code objects requires write access to the Code page header");
heap()->code_space()->UnmarkLinearAllocationArea();
}
+ if (heap()->isolate()->is_shared_heap_isolate()) {
+ DCHECK_EQ(heap()->shared_space()->top(), kNullAddress);
+ heap()->isolate()->global_safepoint()->IterateClientIsolates(
+ [](Isolate* client) {
+ client->heap()->UnmarkSharedLinearAllocationAreas();
+ });
+ }
heap()->safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->UnmarkLinearAllocationArea(); });
if (v8_flags.trace_incremental_marking) {
@@ -433,14 +466,13 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
- MarkingState* minor_marking_state =
- heap()->minor_mark_compact_collector()->marking_state();
+ MarkingState* marking_state = heap()->marking_state();
major_collector_->local_marking_worklists()->Publish();
MarkingBarrier::PublishAll(heap());
PtrComprCageBase cage_base(heap_->isolate());
- major_collector_->marking_worklists()->Update([this, minor_marking_state,
- cage_base, filler_map](
+ major_collector_->marking_worklists()->Update([this, marking_state, cage_base,
+ filler_map](
HeapObject obj,
HeapObject* out) -> bool {
DCHECK(obj.IsHeapObject());
@@ -458,7 +490,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
}
HeapObject dest = map_word.ToForwardingAddress();
USE(this);
- DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj.IsFreeSpaceOrFiller());
+ DCHECK_IMPLIES(marking_state->IsWhite(obj), obj.IsFreeSpaceOrFiller());
if (dest.InSharedHeap()) {
// Object got promoted into the shared heap. Drop it from the client
// heap marking worklist.
@@ -476,7 +508,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
DCHECK_IMPLIES(
v8_flags.minor_mc,
!obj.map_word(cage_base, kRelaxedLoad).IsForwardingAddress());
- if (minor_marking_state->IsWhite(obj)) {
+ if (marking_state->IsWhite(obj)) {
return false;
}
// Either a large object or an object marked by the minor
@@ -488,13 +520,13 @@ void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
// Only applicable during minor MC garbage collections.
if (!Heap::IsLargeObject(obj) &&
Page::FromHeapObject(obj)->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
- if (minor_marking_state->IsWhite(obj)) {
+ if (marking_state->IsWhite(obj)) {
return false;
}
*out = obj;
return true;
}
- DCHECK_IMPLIES(marking_state()->IsWhite(obj),
+ DCHECK_IMPLIES(marking_state->IsWhite(obj),
obj.IsFreeSpaceOrFiller(cage_base));
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
@@ -730,6 +762,7 @@ void IncrementalMarking::AdvanceAndFinalizeIfComplete() {
}
void IncrementalMarking::AdvanceAndFinalizeIfNecessary() {
+ if (!IsMajorMarking()) return;
DCHECK(!heap_->always_allocate());
AdvanceOnAllocation();
@@ -746,7 +779,7 @@ void IncrementalMarking::AdvanceForTesting(double max_step_size_in_ms) {
void IncrementalMarking::AdvanceOnAllocation() {
DCHECK_EQ(heap_->gc_state(), Heap::NOT_IN_GC);
DCHECK(v8_flags.incremental_marking);
- DCHECK(IsMarking());
+ DCHECK(IsMajorMarking());
// Code using an AlwaysAllocateScope assumes that the GC state does not
// change; that implies that no marking steps must be performed.
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index de99330123..a3fb20a0af 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -86,12 +86,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
IncrementalMarking(Heap* heap, WeakObjects* weak_objects);
- MarkingState* marking_state() { return &marking_state_; }
- AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; }
- NonAtomicMarkingState* non_atomic_marking_state() {
- return &non_atomic_marking_state_;
- }
-
void NotifyLeftTrimming(HeapObject from, HeapObject to);
bool IsStopped() const { return !IsMarking(); }
@@ -169,6 +163,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
}
private:
+ MarkingState* marking_state() { return marking_state_; }
+ AtomicMarkingState* atomic_marking_state() { return atomic_marking_state_; }
+
class IncrementalMarkingRootMarkingVisitor;
class Observer : public AllocationObserver {
@@ -270,9 +267,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
Observer new_generation_observer_;
Observer old_generation_observer_;
- MarkingState marking_state_;
- AtomicMarkingState atomic_marking_state_;
- NonAtomicMarkingState non_atomic_marking_state_;
+ MarkingState* const marking_state_;
+ AtomicMarkingState* const atomic_marking_state_;
base::Mutex background_live_bytes_mutex_;
std::unordered_map<MemoryChunk*, intptr_t> background_live_bytes_;
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 5c776ff3fb..3ab25a0c7e 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -7,6 +7,7 @@
#include "src/base/logging.h"
#include "src/heap/invalidated-slots.h"
+#include "src/heap/marking-state-inl.h"
#include "src/heap/spaces.h"
#include "src/objects/heap-object.h"
#include "src/objects/objects-inl.h"
diff --git a/deps/v8/src/heap/invalidated-slots.cc b/deps/v8/src/heap/invalidated-slots.cc
index 696b7f318a..d17394cc2c 100644
--- a/deps/v8/src/heap/invalidated-slots.cc
+++ b/deps/v8/src/heap/invalidated-slots.cc
@@ -34,7 +34,10 @@ InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToShared(
InvalidatedSlotsFilter::InvalidatedSlotsFilter(
MemoryChunk* chunk, InvalidatedSlots* invalidated_slots,
- RememberedSetType remembered_set_type, LivenessCheck liveness_check) {
+ RememberedSetType remembered_set_type, LivenessCheck liveness_check)
+ : marking_state_(liveness_check == LivenessCheck::kYes
+ ? chunk->heap()->non_atomic_marking_state()
+ : nullptr) {
USE(remembered_set_type);
invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
@@ -42,14 +45,6 @@ InvalidatedSlotsFilter::InvalidatedSlotsFilter(
iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end();
- if (liveness_check == LivenessCheck::kYes) {
- marking_state_ =
- chunk->heap()->mark_compact_collector()->non_atomic_marking_state();
- } else {
- DCHECK_EQ(LivenessCheck::kNo, liveness_check);
- marking_state_ = nullptr;
- }
-
// Invoke NextInvalidatedObject twice, to initialize
// invalidated_start_ to the first invalidated object and
// next_invalidated_object_ to the second one.
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index cace4202c6..1215664575 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -64,7 +64,7 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
Address sentinel_;
InvalidatedObjectInfo current_{kNullAddress, 0, false};
InvalidatedObjectInfo next_{kNullAddress, 0, false};
- NonAtomicMarkingState* marking_state_;
+ NonAtomicMarkingState* const marking_state_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
diff --git a/deps/v8/src/heap/large-spaces.cc b/deps/v8/src/heap/large-spaces.cc
index 74c621e81f..20697a2fd4 100644
--- a/deps/v8/src/heap/large-spaces.cc
+++ b/deps/v8/src/heap/large-spaces.cc
@@ -11,6 +11,7 @@
#include "src/heap/combined-heap.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/list.h"
+#include "src/heap/marking-state-inl.h"
#include "src/heap/marking.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
@@ -132,6 +133,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
+ object_size = ALIGN_TO_ALLOCATION_ALIGNMENT(object_size);
DCHECK(!v8_flags.enable_third_party_heap);
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
@@ -150,11 +152,10 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
heap()->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
if (heap()->incremental_marking()->black_allocation()) {
- heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
+ heap()->marking_state()->WhiteToBlack(object);
}
- DCHECK_IMPLIES(
- heap()->incremental_marking()->black_allocation(),
- heap()->incremental_marking()->marking_state()->IsBlack(object));
+ DCHECK_IMPLIES(heap()->incremental_marking()->black_allocation(),
+ heap()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
heap()->NotifyOldGenerationExpansion(identity(), page);
AdvanceAndInvokeAllocationObservers(object.address(),
@@ -169,6 +170,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
AllocationResult OldLargeObjectSpace::AllocateRawBackground(
LocalHeap* local_heap, int object_size, Executability executable) {
+ object_size = ALIGN_TO_ALLOCATION_ALIGNMENT(object_size);
DCHECK(!v8_flags.enable_third_party_heap);
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
@@ -183,11 +185,10 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
HeapObject object = page->GetObject();
heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
if (heap()->incremental_marking()->black_allocation()) {
- heap()->incremental_marking()->marking_state()->WhiteToBlack(object);
+ heap()->marking_state()->WhiteToBlack(object);
}
- DCHECK_IMPLIES(
- heap()->incremental_marking()->black_allocation(),
- heap()->incremental_marking()->marking_state()->IsBlack(object));
+ DCHECK_IMPLIES(heap()->incremental_marking()->black_allocation(),
+ heap()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
if (identity() == CODE_LO_SPACE) {
heap()->isolate()->AddCodeMemoryChunk(page);
@@ -478,6 +479,7 @@ NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
capacity_(capacity) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
+ object_size = ALIGN_TO_ALLOCATION_ALIGNMENT(object_size);
DCHECK(!v8_flags.enable_third_party_heap);
// Do not allocate more objects if promoting the existing object would exceed
// the old generation capacity.
@@ -501,10 +503,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
page->SetFlag(MemoryChunk::TO_PAGE);
UpdatePendingObject(result);
if (v8_flags.minor_mc) {
- heap()
- ->minor_mark_compact_collector()
- ->non_atomic_marking_state()
- ->ClearLiveness(page);
+ heap()->non_atomic_marking_state()->ClearLiveness(page);
}
page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
@@ -582,5 +581,15 @@ void CodeLargeObjectSpace::RemovePage(LargePage* page) {
OldLargeObjectSpace::RemovePage(page);
}
+SharedLargeObjectSpace::SharedLargeObjectSpace(Heap* heap)
+ : OldLargeObjectSpace(heap, SHARED_LO_SPACE) {}
+
+AllocationResult SharedLargeObjectSpace::AllocateRawBackground(
+ LocalHeap* local_heap, int object_size) {
+ DCHECK(!v8_flags.enable_third_party_heap);
+ return OldLargeObjectSpace::AllocateRawBackground(local_heap, object_size,
+ NOT_EXECUTABLE);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/large-spaces.h b/deps/v8/src/heap/large-spaces.h
index 70c55833e1..576c672fff 100644
--- a/deps/v8/src/heap/large-spaces.h
+++ b/deps/v8/src/heap/large-spaces.h
@@ -190,6 +190,14 @@ class OldLargeObjectSpace : public LargeObjectSpace {
LocalHeap* local_heap, int object_size, Executability executable);
};
+class SharedLargeObjectSpace : public OldLargeObjectSpace {
+ public:
+ explicit SharedLargeObjectSpace(Heap* heap);
+
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateRawBackground(LocalHeap* local_heap, int object_size);
+};
+
class NewLargeObjectSpace : public LargeObjectSpace {
public:
NewLargeObjectSpace(Heap* heap, size_t capacity);
diff --git a/deps/v8/src/heap/linear-allocation-area.h b/deps/v8/src/heap/linear-allocation-area.h
index 2b9b3a9132..873dd31f7f 100644
--- a/deps/v8/src/heap/linear-allocation-area.h
+++ b/deps/v8/src/heap/linear-allocation-area.h
@@ -98,7 +98,11 @@ class LinearAllocationArea final {
#ifdef DEBUG
SLOW_DCHECK(start_ <= top_);
SLOW_DCHECK(top_ <= limit_);
- SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
+ if (V8_COMPRESS_POINTERS_8GB_BOOL) {
+ SLOW_DCHECK(IsAligned(top_, kObjectAlignment8GbHeap));
+ } else {
+ SLOW_DCHECK(IsAligned(top_, kObjectAlignment));
+ }
#endif // DEBUG
}
diff --git a/deps/v8/src/heap/local-heap-inl.h b/deps/v8/src/heap/local-heap-inl.h
index cb6210fd57..401b7a4903 100644
--- a/deps/v8/src/heap/local-heap-inl.h
+++ b/deps/v8/src/heap/local-heap-inl.h
@@ -64,7 +64,8 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK_EQ(type, AllocationType::kSharedOld);
if (large_object) {
- return heap()->code_lo_space()->AllocateRawBackground(this, size_in_bytes);
+ return heap()->shared_lo_allocation_space()->AllocateRawBackground(
+ this, size_in_bytes);
} else {
return shared_old_space_allocator()->AllocateRaw(size_in_bytes, alignment,
origin);
@@ -74,6 +75,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
Address LocalHeap::AllocateRawOrFail(int object_size, AllocationType type,
AllocationOrigin origin,
AllocationAlignment alignment) {
+ object_size = ALIGN_TO_ALLOCATION_ALIGNMENT(object_size);
DCHECK(!v8_flags.enable_third_party_heap);
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
HeapObject object;
diff --git a/deps/v8/src/heap/local-heap.cc b/deps/v8/src/heap/local-heap.cc
index 611537708b..1541683ee5 100644
--- a/deps/v8/src/heap/local-heap.cc
+++ b/deps/v8/src/heap/local-heap.cc
@@ -85,6 +85,7 @@ LocalHeap::~LocalHeap() {
heap_->safepoint()->RemoveLocalHeap(this, [this] {
FreeLinearAllocationArea();
+ FreeSharedLinearAllocationArea();
if (!is_main_thread()) {
CodePageHeaderModificationScope rwx_write_scope(
@@ -120,9 +121,9 @@ void LocalHeap::SetUp() {
std::make_unique<ConcurrentAllocator>(this, heap_->code_space());
DCHECK_NULL(shared_old_space_allocator_);
- if (heap_->isolate()->shared_isolate()) {
- shared_old_space_allocator_ =
- std::make_unique<ConcurrentAllocator>(this, heap_->shared_old_space());
+ if (heap_->isolate()->has_shared_heap()) {
+ shared_old_space_allocator_ = std::make_unique<ConcurrentAllocator>(
+ this, heap_->shared_allocation_space());
}
DCHECK_NULL(marking_barrier_);
@@ -347,7 +348,9 @@ void LocalHeap::FreeLinearAllocationArea() {
}
void LocalHeap::FreeSharedLinearAllocationArea() {
- shared_old_space_allocator_->FreeLinearAllocationArea();
+ if (shared_old_space_allocator_) {
+ shared_old_space_allocator_->FreeLinearAllocationArea();
+ }
}
void LocalHeap::MakeLinearAllocationAreaIterable() {
@@ -365,26 +368,15 @@ void LocalHeap::UnmarkLinearAllocationArea() {
code_space_allocator_->UnmarkLinearAllocationArea();
}
-bool LocalHeap::TryPerformCollection() {
- if (is_main_thread()) {
- heap_->CollectGarbageForBackground(this);
- return true;
- } else {
- DCHECK(IsRunning());
- if (!heap_->collection_barrier_->TryRequestGC()) return false;
-
- LocalHeap* main_thread = heap_->main_thread_local_heap();
-
- const ThreadState old_state = main_thread->state_.SetCollectionRequested();
+void LocalHeap::MarkSharedLinearAllocationAreaBlack() {
+ if (shared_old_space_allocator_) {
+ shared_old_space_allocator_->MarkLinearAllocationAreaBlack();
+ }
+}
- if (old_state.IsRunning()) {
- const bool performed_gc =
- heap_->collection_barrier_->AwaitCollectionBackground(this);
- return performed_gc;
- } else {
- DCHECK(old_state.IsParked());
- return false;
- }
+void LocalHeap::UnmarkSharedLinearAllocationArea() {
+ if (shared_old_space_allocator_) {
+ shared_old_space_allocator_->UnmarkLinearAllocationArea();
}
}
@@ -395,21 +387,34 @@ Address LocalHeap::PerformCollectionAndAllocateAgain(
CHECK(!main_thread_parked_);
allocation_failed_ = true;
static const int kMaxNumberOfRetries = 3;
+ int failed_allocations = 0;
+ int parked_allocations = 0;
for (int i = 0; i < kMaxNumberOfRetries; i++) {
- if (!TryPerformCollection()) {
+ if (!heap_->CollectGarbageFromAnyThread(this)) {
main_thread_parked_ = true;
+ parked_allocations++;
}
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
- if (!result.IsFailure()) {
+ if (result.IsFailure()) {
+ failed_allocations++;
+ } else {
allocation_failed_ = false;
main_thread_parked_ = false;
return result.ToObjectChecked().address();
}
}
+ if (v8_flags.trace_gc) {
+ heap_->isolate()->PrintWithTimestamp(
+ "Background allocation failure: "
+ "allocations=%d"
+ "allocations.parked=%d",
+ failed_allocations, parked_allocations);
+ }
+
heap_->FatalProcessOutOfMemory("LocalHeap: allocation failed");
}
@@ -433,9 +438,11 @@ void LocalHeap::InvokeGCEpilogueCallbacksInSafepoint(GCType gc_type,
void LocalHeap::NotifyObjectSizeChange(
HeapObject object, int old_size, int new_size,
- ClearRecordedSlots clear_recorded_slots) {
+ ClearRecordedSlots clear_recorded_slots,
+ UpdateInvalidatedObjectSize update_invalidated_object_size) {
heap()->NotifyObjectSizeChange(object, old_size, new_size,
- clear_recorded_slots);
+ clear_recorded_slots,
+ update_invalidated_object_size);
}
} // namespace internal
diff --git a/deps/v8/src/heap/local-heap.h b/deps/v8/src/heap/local-heap.h
index a4f0e49b07..4e6437669a 100644
--- a/deps/v8/src/heap/local-heap.h
+++ b/deps/v8/src/heap/local-heap.h
@@ -115,6 +115,11 @@ class V8_EXPORT_PRIVATE LocalHeap {
void MarkLinearAllocationAreaBlack();
void UnmarkLinearAllocationArea();
+ // Mark/Unmark linear allocation areas in shared heap black. Used for black
+ // allocation.
+ void MarkSharedLinearAllocationAreaBlack();
+ void UnmarkSharedLinearAllocationArea();
+
// Give up linear allocation areas. Used for mark-compact GC.
void FreeLinearAllocationArea();
@@ -149,8 +154,11 @@ class V8_EXPORT_PRIVATE LocalHeap {
AllocationOrigin origin = AllocationOrigin::kRuntime,
AllocationAlignment alignment = kTaggedAligned);
- void NotifyObjectSizeChange(HeapObject object, int old_size, int new_size,
- ClearRecordedSlots clear_recorded_slots);
+ void NotifyObjectSizeChange(
+ HeapObject object, int old_size, int new_size,
+ ClearRecordedSlots clear_recorded_slots,
+ UpdateInvalidatedObjectSize update_invalidated_object_size =
+ UpdateInvalidatedObjectSize::kYes);
bool is_main_thread() const { return is_main_thread_; }
bool deserialization_complete() const {
@@ -158,9 +166,6 @@ class V8_EXPORT_PRIVATE LocalHeap {
}
ReadOnlySpace* read_only_space() { return heap_->read_only_space(); }
- // Requests GC and blocks until the collection finishes.
- bool TryPerformCollection();
-
// Adds a callback that is invoked with the given |data| after each GC.
// The callback is invoked on the main thread before any background thread
// resumes. The callback must not allocate or make any other calls that
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 2ce0abfe9a..1a14b388a0 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -9,7 +9,9 @@
#include "src/codegen/assembler-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/index-generator.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/marking-state-inl.h"
#include "src/heap/marking-worklist-inl.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/objects-visiting-inl.h"
@@ -42,7 +44,7 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
if (Heap::InYoungGeneration(obj) &&
- non_atomic_marking_state_.WhiteToGrey(obj)) {
+ non_atomic_marking_state()->WhiteToGrey(obj)) {
local_marking_worklists_->Push(obj);
}
}
@@ -88,6 +90,16 @@ void MarkCompactCollector::AddTransitionArray(TransitionArray array) {
local_weak_objects()->transition_arrays_local.Push(array);
}
+bool MarkCompactCollector::ShouldMarkObject(HeapObject object) const {
+ if (V8_LIKELY(!uses_shared_heap_)) return true;
+ if (v8_flags.shared_space) {
+ if (is_shared_heap_isolate_) return true;
+ return !object.InSharedHeap();
+ } else {
+ return is_shared_heap_isolate_ == object.InSharedHeap();
+ }
+}
+
template <typename MarkingState>
template <typename T, typename TBodyDescriptor>
int MainMarkingVisitor<MarkingState>::VisitJSObjectSubclass(Map map, T object) {
@@ -199,8 +211,9 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// Map might be forwarded during GC.
DCHECK(MarkCompactCollector::IsMapOrForwarded(map));
size = black_object.SizeFromMap(map);
- CHECK_LE(addr + size, chunk_->area_end());
- Address end = addr + size - kTaggedSize;
+ int aligned_size = ALIGN_TO_ALLOCATION_ALIGNMENT(size);
+ CHECK_LE(addr + aligned_size, chunk_->area_end());
+ Address end = addr + aligned_size - kTaggedSize;
// One word filler objects do not borrow the second mark bit. We have
// to jump over the advancing and clearing part.
// Note that we know that we are at a one word filler when
@@ -231,7 +244,8 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
map = Map::cast(map_object);
DCHECK(map.IsMap(cage_base));
size = object.SizeFromMap(map);
- CHECK_LE(addr + size, chunk_->area_end());
+ CHECK_LE(addr + ALIGN_TO_ALLOCATION_ALIGNMENT(size),
+ chunk_->area_end());
}
// We found a live object.
@@ -281,6 +295,64 @@ typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::end() {
Isolate* CollectorBase::isolate() { return heap()->isolate(); }
+class YoungGenerationMarkingTask;
+
+class PageMarkingItem : public ParallelWorkItem {
+ public:
+ explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
+ ~PageMarkingItem() = default;
+
+ void Process(YoungGenerationMarkingTask* task);
+
+ private:
+ inline Heap* heap() { return chunk_->heap(); }
+
+ void MarkUntypedPointers(YoungGenerationMarkingTask* task);
+
+ void MarkTypedPointers(YoungGenerationMarkingTask* task);
+
+ template <typename TSlot>
+ V8_INLINE SlotCallbackResult
+ CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot);
+
+ MemoryChunk* chunk_;
+};
+
+enum class YoungMarkingJobType { kAtomic, kIncremental };
+
+class YoungGenerationMarkingJob : public v8::JobTask {
+ public:
+ YoungGenerationMarkingJob(Isolate* isolate, Heap* heap,
+ MarkingWorklists* global_worklists,
+ std::vector<PageMarkingItem> marking_items,
+ YoungMarkingJobType young_marking_job_type)
+ : isolate_(isolate),
+ heap_(heap),
+ global_worklists_(global_worklists),
+ marking_items_(std::move(marking_items)),
+ remaining_marking_items_(marking_items_.size()),
+ generator_(marking_items_.size()),
+ young_marking_job_type_(young_marking_job_type) {}
+
+ void Run(JobDelegate* delegate) override;
+ size_t GetMaxConcurrency(size_t worker_count) const override;
+ bool incremental() const {
+ return young_marking_job_type_ == YoungMarkingJobType::kIncremental;
+ }
+
+ private:
+ void ProcessItems(JobDelegate* delegate);
+ void ProcessMarkingItems(YoungGenerationMarkingTask* task);
+
+ Isolate* isolate_;
+ Heap* heap_;
+ MarkingWorklists* global_worklists_;
+ std::vector<PageMarkingItem> marking_items_;
+ std::atomic_size_t remaining_marking_items_{0};
+ IndexGenerator generator_;
+ YoungMarkingJobType young_marking_job_type_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 5fd704b622..180aabb5f5 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -19,12 +19,14 @@
#include "src/execution/isolate-utils-inl.h"
#include "src/execution/isolate-utils.h"
#include "src/execution/vm-state-inl.h"
+#include "src/flags/flags.h"
#include "src/handles/global-handles.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/concurrent-allocator.h"
#include "src/heap/evacuation-allocator-inl.h"
+#include "src/heap/evacuation-verifier-inl.h"
#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/global-handle-marking-visitor.h"
@@ -36,6 +38,7 @@
#include "src/heap/large-spaces.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/marking-barrier.h"
+#include "src/heap/marking-state-inl.h"
#include "src/heap/marking-visitor-inl.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/memory-chunk-layout.h"
@@ -46,6 +49,8 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/parallel-work-item.h"
+#include "src/heap/pretenuring-handler-inl.h"
+#include "src/heap/pretenuring-handler.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/remembered-set.h"
@@ -218,8 +223,7 @@ class FullMarkingVerifier : public MarkingVerifier {
public:
explicit FullMarkingVerifier(Heap* heap)
: MarkingVerifier(heap),
- marking_state_(
- heap->mark_compact_collector()->non_atomic_marking_state()) {}
+ marking_state_(heap->non_atomic_marking_state()) {}
void Run() override {
VerifyRoots();
@@ -227,9 +231,11 @@ class FullMarkingVerifier : public MarkingVerifier {
VerifyMarking(heap_->new_lo_space());
VerifyMarking(heap_->old_space());
VerifyMarking(heap_->code_space());
+ if (heap_->shared_space()) VerifyMarking(heap_->shared_space());
if (heap_->map_space()) VerifyMarking(heap_->map_space());
VerifyMarking(heap_->lo_space());
VerifyMarking(heap_->code_lo_space());
+ if (heap_->shared_lo_space()) VerifyMarking(heap_->shared_lo_space());
}
protected:
@@ -281,9 +287,7 @@ class FullMarkingVerifier : public MarkingVerifier {
private:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
- if (heap_->IsShared() !=
- BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
- return;
+ if (!ShouldVerifyObject(heap_object)) return;
if (heap_->MustBeInSharedOldSpace(heap_object)) {
CHECK(heap_->SharedHeapContains(heap_object));
@@ -292,6 +296,12 @@ class FullMarkingVerifier : public MarkingVerifier {
CHECK(marking_state_->IsBlack(heap_object));
}
+ V8_INLINE bool ShouldVerifyObject(HeapObject heap_object) {
+ const bool in_shared_heap = heap_object.InSharedWritableHeap();
+ return heap_->isolate()->is_shared_heap_isolate() ? in_shared_heap
+ : !in_shared_heap;
+ }
+
template <typename TSlot>
V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
for (TSlot slot = start; slot < end; ++slot) {
@@ -303,170 +313,15 @@ class FullMarkingVerifier : public MarkingVerifier {
}
}
- NonAtomicMarkingState* marking_state_;
-};
-
-class EvacuationVerifier : public ObjectVisitorWithCageBases,
- public RootVisitor {
- public:
- virtual void Run() = 0;
-
- void VisitPointers(HeapObject host, ObjectSlot start,
- ObjectSlot end) override {
- VerifyPointers(start, end);
- }
-
- void VisitPointers(HeapObject host, MaybeObjectSlot start,
- MaybeObjectSlot end) override {
- VerifyPointers(start, end);
- }
-
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- VerifyCodePointer(slot);
- }
-
- void VisitRootPointers(Root root, const char* description,
- FullObjectSlot start, FullObjectSlot end) override {
- VerifyRootPointers(start, end);
- }
-
- void VisitMapPointer(HeapObject object) override {
- VerifyMap(object.map(cage_base()));
- }
-
- protected:
- explicit EvacuationVerifier(Heap* heap)
- : ObjectVisitorWithCageBases(heap), heap_(heap) {}
-
- inline Heap* heap() { return heap_; }
-
- virtual void VerifyMap(Map map) = 0;
- virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
- virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
- virtual void VerifyCodePointer(CodeObjectSlot slot) = 0;
- virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
-
- void VerifyRoots();
- void VerifyEvacuationOnPage(Address start, Address end);
- void VerifyEvacuation(NewSpace* new_space);
- void VerifyEvacuation(PagedSpaceBase* paged_space);
-
- Heap* heap_;
-};
-
-void EvacuationVerifier::VerifyRoots() {
- heap_->IterateRootsIncludingClients(this,
- base::EnumSet<SkipRoot>{SkipRoot::kWeak});
-}
-
-void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
- Address current = start;
- while (current < end) {
- HeapObject object = HeapObject::FromAddress(current);
- if (!object.IsFreeSpaceOrFiller(cage_base())) {
- object.Iterate(cage_base(), this);
- }
- current += object.Size(cage_base());
- }
-}
-
-void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
- if (!space) return;
- if (v8_flags.minor_mc) {
- VerifyEvacuation(PagedNewSpace::From(space)->paged_space());
- return;
- }
- PageRange range(space->first_allocatable_address(), space->top());
- for (auto it = range.begin(); it != range.end();) {
- Page* page = *(it++);
- Address current = page->area_start();
- Address limit = it != range.end() ? page->area_end() : space->top();
- CHECK(limit == space->top() || !page->Contains(space->top()));
- VerifyEvacuationOnPage(current, limit);
- }
-}
-
-void EvacuationVerifier::VerifyEvacuation(PagedSpaceBase* space) {
- for (Page* p : *space) {
- if (p->IsEvacuationCandidate()) continue;
- if (p->Contains(space->top())) {
- CodePageMemoryModificationScope memory_modification_scope(p);
- heap_->CreateFillerObjectAt(
- space->top(), static_cast<int>(space->limit() - space->top()));
- }
- VerifyEvacuationOnPage(p->area_start(), p->area_end());
- }
-}
-
-class FullEvacuationVerifier : public EvacuationVerifier {
- public:
- explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
-
- void Run() override {
- DCHECK(!heap_->mark_compact_collector()->sweeping_in_progress());
- VerifyRoots();
- VerifyEvacuation(heap_->new_space());
- VerifyEvacuation(heap_->old_space());
- VerifyEvacuation(heap_->code_space());
- if (heap_->map_space()) VerifyEvacuation(heap_->map_space());
- }
-
- protected:
- V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
- if (heap_->IsShared() !=
- BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
- return;
-
- CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
- Heap::InToPage(heap_object));
- CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
- }
-
- template <typename TSlot>
- void VerifyPointersImpl(TSlot start, TSlot end) {
- for (TSlot current = start; current < end; ++current) {
- typename TSlot::TObject object = current.load(cage_base());
- HeapObject heap_object;
- if (object.GetHeapObjectIfStrong(&heap_object)) {
- VerifyHeapObjectImpl(heap_object);
- }
- }
- }
- void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
- void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
- VerifyPointersImpl(start, end);
- }
- void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
- VerifyPointersImpl(start, end);
- }
- void VerifyCodePointer(CodeObjectSlot slot) override {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- Object maybe_code = slot.load(code_cage_base());
- HeapObject code;
- // The slot might contain smi during CodeDataContainer creation, so skip it.
- if (maybe_code.GetHeapObject(&code)) {
- VerifyHeapObjectImpl(code);
- }
- }
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- VerifyHeapObjectImpl(target);
- }
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
- }
- void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
- VerifyPointersImpl(start, end);
- }
+ NonAtomicMarkingState* const marking_state_;
};
} // namespace
#endif // VERIFY_HEAP
-// =============================================================================
-// MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
-// =============================================================================
+// ==================================================================
+// CollectorBase, MinorMarkCompactCollector, MarkCompactCollector
+// ==================================================================
namespace {
@@ -492,8 +347,8 @@ int NumberOfParallelCompactionTasks(Heap* heap) {
CollectorBase::CollectorBase(Heap* heap, GarbageCollector collector)
: heap_(heap),
garbage_collector_(collector),
- marking_state_(heap->isolate()),
- non_atomic_marking_state_(heap->isolate()) {
+ marking_state_(heap_->marking_state()),
+ non_atomic_marking_state_(heap_->non_atomic_marking_state()) {
DCHECK_NE(GarbageCollector::SCAVENGER, garbage_collector_);
}
@@ -501,16 +356,120 @@ bool CollectorBase::IsMajorMC() {
return !heap_->IsYoungGenerationCollector(garbage_collector_);
}
+void CollectorBase::StartSweepSpace(PagedSpace* space) {
+ DCHECK_NE(NEW_SPACE, space->identity());
+ space->ClearAllocatorState();
+
+ int will_be_swept = 0;
+ bool unused_page_present = false;
+
+ Sweeper* sweeper = heap()->sweeper();
+
+ // Loop needs to support deletion if live bytes == 0 for a page.
+ for (auto it = space->begin(); it != space->end();) {
+ Page* p = *(it++);
+ DCHECK(p->SweepingDone());
+
+ if (p->IsEvacuationCandidate()) {
+ DCHECK_NE(NEW_SPACE, space->identity());
+ // Will be processed in Evacuate.
+ continue;
+ }
+
+ // One unused page is kept, all further are released before sweeping them.
+ if (non_atomic_marking_state()->live_bytes(p) == 0) {
+ if (unused_page_present) {
+ if (v8_flags.gc_verbose) {
+ PrintIsolate(isolate(), "sweeping: released page: %p",
+ static_cast<void*>(p));
+ }
+ space->ReleasePage(p);
+ continue;
+ }
+ unused_page_present = true;
+ }
+
+ sweeper->AddPage(space->identity(), p, Sweeper::REGULAR);
+ will_be_swept++;
+ }
+
+ if (v8_flags.gc_verbose) {
+ PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
+ space->name(), will_be_swept);
+ }
+}
+
+void CollectorBase::StartSweepNewSpace() {
+ PagedSpaceForNewSpace* paged_space = heap()->paged_new_space()->paged_space();
+ paged_space->ClearAllocatorState();
+
+ int will_be_swept = 0;
+
+ if (heap()->ShouldReduceNewSpaceSize()) {
+ paged_space->StartShrinking();
+ is_new_space_shrinking_ = true;
+ }
+
+ Sweeper* sweeper = heap()->sweeper();
+
+ for (auto it = paged_space->begin(); it != paged_space->end();) {
+ Page* p = *(it++);
+ DCHECK(p->SweepingDone());
+
+ if (non_atomic_marking_state()->live_bytes(p) > 0) {
+ // Non-empty pages will be evacuated/promoted.
+ continue;
+ }
+
+ if (is_new_space_shrinking_ && paged_space->ShouldReleasePage()) {
+ paged_space->ReleasePage(p);
+ } else {
+ sweeper->AddNewSpacePage(p);
+ }
+ will_be_swept++;
+ }
+
+ if (v8_flags.gc_verbose) {
+ PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
+ paged_space->name(), will_be_swept);
+ }
+}
+
+void CollectorBase::SweepLargeSpace(LargeObjectSpace* space) {
+ auto* marking_state = heap()->non_atomic_marking_state();
+ PtrComprCageBase cage_base(heap()->isolate());
+ size_t surviving_object_size = 0;
+ for (auto it = space->begin(); it != space->end();) {
+ LargePage* current = *(it++);
+ HeapObject object = current->GetObject();
+ DCHECK(!marking_state->IsGrey(object));
+ if (!marking_state->IsBlack(object)) {
+ // Object is dead and page can be released.
+ space->RemovePage(current);
+ heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
+ current);
+
+ continue;
+ }
+ Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
+ current->ProgressBar().ResetIfEnabled();
+ non_atomic_marking_state()->SetLiveBytes(current, 0);
+ surviving_object_size += static_cast<size_t>(object.Size(cage_base));
+ }
+ space->set_objects_size(surviving_object_size);
+}
+
MarkCompactCollector::MarkCompactCollector(Heap* heap)
: CollectorBase(heap, GarbageCollector::MARK_COMPACTOR),
#ifdef DEBUG
state_(IDLE),
#endif
- is_shared_heap_(heap->IsShared()),
- sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
+ uses_shared_heap_(isolate()->has_shared_heap() || isolate()->is_shared()),
+ is_shared_heap_isolate_(isolate()->is_shared_heap_isolate()),
+ sweeper_(heap_->sweeper()) {
}
-MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
+MarkCompactCollector::~MarkCompactCollector() = default;
void MarkCompactCollector::SetUp() {
DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
@@ -529,7 +488,6 @@ void MarkCompactCollector::TearDown() {
local_weak_objects()->Publish();
weak_objects()->Clear();
}
- sweeper()->TearDown();
}
// static
@@ -585,6 +543,10 @@ bool MarkCompactCollector::StartCompaction(StartCompactionMode mode) {
CollectEvacuationCandidates(heap()->map_space());
}
+ if (heap()->shared_space()) {
+ CollectEvacuationCandidates(heap()->shared_space());
+ }
+
if (v8_flags.compact_code_space &&
(!heap()->IsGCWithStack() || v8_flags.compact_code_space_with_stack)) {
CollectEvacuationCandidates(heap()->code_space());
@@ -702,74 +664,6 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
#endif // VERIFY_HEAP
-void MarkCompactCollector::FinishSweepingIfOutOfWork() {
- if (sweeper()->sweeping_in_progress() && v8_flags.concurrent_sweeping &&
- !sweeper()->AreSweeperTasksRunning()) {
- // At this point we know that all concurrent sweeping tasks have run
- // out of work and quit: all pages are swept. The main thread still needs
- // to complete sweeping though.
- EnsureSweepingCompleted(SweepingForcedFinalizationMode::kV8Only);
- }
- if (heap()->cpp_heap()) {
- // Ensure that sweeping is also completed for the C++ managed heap, if one
- // exists and it's out of work.
- CppHeap::From(heap()->cpp_heap())->FinishSweepingIfOutOfWork();
- }
-}
-
-void MarkCompactCollector::EnsureSweepingCompleted(
- SweepingForcedFinalizationMode mode) {
- if (sweeper()->sweeping_in_progress()) {
- TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
- ThreadKind::kMain);
-
- sweeper()->EnsureCompleted();
- heap()->old_space()->RefillFreeList(sweeper());
- {
- CodePageHeaderModificationScope rwx_write_scope(
- "Updating per-page stats stored in page headers requires write "
- "access to Code page headers");
- heap()->code_space()->RefillFreeList(sweeper());
- }
- if (heap()->map_space()) {
- heap()->map_space()->RefillFreeList(sweeper());
- heap()->map_space()->SortFreeList();
- }
-
- heap()->tracer()->NotifySweepingCompleted();
-
-#ifdef VERIFY_HEAP
- if (v8_flags.verify_heap && !evacuation()) {
- FullEvacuationVerifier verifier(heap());
- verifier.Run();
- }
-#endif
- }
-
- if (mode == SweepingForcedFinalizationMode::kUnifiedHeap &&
- heap()->cpp_heap()) {
- // Ensure that sweeping is also completed for the C++ managed heap, if one
- // exists.
- CppHeap::From(heap()->cpp_heap())->FinishSweepingIfRunning();
- DCHECK(
- !CppHeap::From(heap()->cpp_heap())->sweeper().IsSweepingInProgress());
- }
-
- DCHECK_IMPLIES(mode == SweepingForcedFinalizationMode::kUnifiedHeap ||
- !heap()->cpp_heap(),
- !heap()->tracer()->IsSweepingInProgress());
-}
-
-void MarkCompactCollector::EnsurePageIsSwept(Page* page) {
- sweeper()->EnsurePageIsSwept(page);
-}
-
-void MarkCompactCollector::DrainSweepingWorklistForSpace(
- AllocationSpace space) {
- if (!sweeper()->sweeping_in_progress()) return;
- sweeper()->DrainSweepingWorklistForSpace(space);
-}
-
void MarkCompactCollector::ComputeEvacuationHeuristics(
size_t area_size, int* target_fragmentation_percent,
size_t* max_evacuated_bytes) {
@@ -820,7 +714,7 @@ void MarkCompactCollector::ComputeEvacuationHeuristics(
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
- space->identity() == MAP_SPACE);
+ space->identity() == MAP_SPACE || space->identity() == SHARED_SPACE);
int number_of_pages = space->CountTotalPages();
size_t area_size = space->AreaSize();
@@ -854,7 +748,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
CodePageHeaderModificationScope rwx_write_scope(
"Modification of Code page header flags requires write access");
- DCHECK(!sweeping_in_progress());
+ DCHECK(!sweeper()->sweeping_in_progress());
Page* owner_of_linear_allocation_area =
space->top() == space->limit()
? nullptr
@@ -1002,7 +896,7 @@ void MarkCompactCollector::Prepare() {
state_ = PREPARE_GC;
#endif
- DCHECK(!sweeping_in_progress());
+ DCHECK(!sweeper()->sweeping_in_progress());
// Unmapper tasks needs to be stopped during the GC, otherwise pages queued
// for freeing might get unmapped during the GC.
@@ -1031,22 +925,8 @@ void MarkCompactCollector::Prepare() {
heap_->FreeLinearAllocationAreas();
- PagedSpaceIterator spaces(heap());
- for (PagedSpace* space = spaces.Next(); space != nullptr;
- space = spaces.Next()) {
- space->PrepareForMarkCompact();
- }
-
- // All objects are guaranteed to be initialized in atomic pause
- if (heap()->new_lo_space()) {
- heap()->new_lo_space()->ResetPendingObject();
- }
-
NewSpace* new_space = heap()->new_space();
if (new_space) {
- if (v8_flags.minor_mc) {
- PagedNewSpace::From(new_space)->paged_space()->PrepareForMarkCompact();
- }
DCHECK_EQ(new_space->top(), new_space->original_top_acquire());
}
}
@@ -1081,6 +961,7 @@ void MarkCompactCollector::VerifyMarking() {
heap()->old_space()->VerifyLiveBytes();
if (heap()->map_space()) heap()->map_space()->VerifyLiveBytes();
heap()->code_space()->VerifyLiveBytes();
+ if (heap()->shared_space()) heap()->shared_space()->VerifyLiveBytes();
if (v8_flags.minor_mc && heap()->paged_new_space())
heap()->paged_new_space()->paged_space()->VerifyLiveBytes();
}
@@ -1105,16 +986,34 @@ void ShrinkPagesToObjectSizes(Heap* heap, OldLargeObjectSpace* space) {
} // namespace
void MarkCompactCollector::Finish() {
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
+ if (heap()->new_lo_space()) {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP_NEW_LO);
+ SweepLargeSpace(heap()->new_lo_space());
+ }
+
+ if (v8_flags.minor_mc && heap()->new_space()) {
+ // Keep new space sweeping atomic.
+ GCTracer::Scope sweep_scope(heap()->tracer(),
+ GCTracer::Scope::MC_SWEEP_FINISH_NEW,
+ ThreadKind::kMain);
+ sweeper()->ParallelSweepSpace(NEW_SPACE,
+ Sweeper::SweepingMode::kEagerDuringGC, 0);
+ heap()->paged_new_space()->paged_space()->RefillFreeList();
+ }
+
+#ifdef DEBUG
+ heap()->VerifyCountersBeforeConcurrentSweeping();
+#endif
+ }
+
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
heap()->isolate()->global_handles()->ClearListOfYoungNodes();
SweepArrayBufferExtensions();
-#ifdef DEBUG
- heap()->VerifyCountersBeforeConcurrentSweeping();
-#endif
-
marking_visitor_.reset();
local_marking_worklists_.reset();
marking_worklists_.ReleaseContextWorklists();
@@ -1126,23 +1025,6 @@ void MarkCompactCollector::Finish() {
local_weak_objects_.reset();
weak_objects_.next_ephemerons.Clear();
- if (heap()->new_lo_space()) {
- GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MC_FINISH_SWEEP_NEW_LO,
- ThreadKind::kMain);
- SweepLargeSpace(heap()->new_lo_space());
- }
-
- if (v8_flags.minor_mc && heap()->new_space()) {
- // Keep new space sweeping atomic.
- GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MC_FINISH_SWEEP_NEW,
- ThreadKind::kMain);
- sweeper()->ParallelSweepSpace(NEW_SPACE,
- Sweeper::SweepingMode::kEagerDuringGC, 0);
- heap()->paged_new_space()->paged_space()->RefillFreeList(sweeper());
- }
-
sweeper()->StartSweeperTasks();
// Ensure unmapper tasks are stopped such that queued pages aren't freed
@@ -1175,7 +1057,7 @@ void MarkCompactCollector::SweepArrayBufferExtensions() {
class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
public:
explicit RootMarkingVisitor(MarkCompactCollector* collector)
- : collector_(collector), is_shared_heap_(collector->is_shared_heap()) {}
+ : collector_(collector) {}
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) final {
@@ -1231,14 +1113,11 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
Object object = *p;
if (!object.IsHeapObject()) return;
HeapObject heap_object = HeapObject::cast(object);
- BasicMemoryChunk* target_page =
- BasicMemoryChunk::FromHeapObject(heap_object);
- if (is_shared_heap_ != target_page->InSharedHeap()) return;
+ if (!collector_->ShouldMarkObject(heap_object)) return;
collector_->MarkRootObject(root, heap_object);
}
MarkCompactCollector* const collector_;
- const bool is_shared_heap_;
};
// This visitor is used to visit the body of special objects held alive by
@@ -1300,12 +1179,7 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
V8_INLINE void MarkObject(HeapObject host, Object object) {
if (!object.IsHeapObject()) return;
HeapObject heap_object = HeapObject::cast(object);
- // We use this visitor both in client and shared GCs. The client GC should
- // not mark objects in the shared heap. In shared GCs we are marking each
- // client's top stack frame, so it is actually legal to encounter references
- // into the client heap here in a shared GC. We need to bail out in these
- // cases as well.
- if (collector_->is_shared_heap() != heap_object.InSharedHeap()) return;
+ if (!collector_->ShouldMarkObject(heap_object)) return;
collector_->MarkObject(host, heap_object);
}
@@ -1394,7 +1268,7 @@ class InternalizedStringTableCleaner final : public RootVisitor {
OffHeapObjectSlot end) override {
DCHECK_EQ(root, Root::kStringTable);
// Visit all HeapObject pointers in [start, end).
- auto* marking_state = heap_->mark_compact_collector()->marking_state();
+ auto* marking_state = heap_->marking_state();
Isolate* isolate = heap_->isolate();
for (OffHeapObjectSlot p = start; p < end; ++p) {
Object o = p.load(isolate);
@@ -1424,8 +1298,7 @@ class ExternalStringTableCleaner : public RootVisitor {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
- NonAtomicMarkingState* marking_state =
- heap_->mark_compact_collector()->non_atomic_marking_state();
+ NonAtomicMarkingState* marking_state = heap_->non_atomic_marking_state();
Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
for (FullObjectSlot p = start; p < end; ++p) {
Object o = *p;
@@ -1552,10 +1425,9 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
public:
explicit RecordMigratedSlotVisitor(
- MarkCompactCollector* collector,
- EphemeronRememberedSet* ephemeron_remembered_set)
- : ObjectVisitorWithCageBases(collector->isolate()),
- collector_(collector),
+ Heap* heap, EphemeronRememberedSet* ephemeron_remembered_set)
+ : ObjectVisitorWithCageBases(heap->isolate()),
+ heap_(heap),
ephemeron_remembered_set_(ephemeron_remembered_set) {}
inline void VisitPointer(HeapObject host, ObjectSlot p) final {
@@ -1623,7 +1495,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
// the old-to-new remembered set.
DCHECK(!Heap::InYoungGeneration(target));
DCHECK(!target.InSharedWritableHeap());
- collector_->RecordRelocSlot(host, rinfo, target);
+ heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
}
inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
@@ -1632,12 +1504,11 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
HeapObject object = rinfo->target_object(cage_base());
GenerationalBarrierForCode(host, rinfo, object);
WriteBarrier::Shared(host, rinfo, object);
- collector_->RecordRelocSlot(host, rinfo, object);
+ heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, object);
}
// Entries that are skipped for recording.
inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
- inline void VisitRuntimeEntry(Code host, RelocInfo* rinfo) final {}
inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
inline void VisitExternalPointer(HeapObject host, ExternalPointerSlot slot,
ExternalPointerTag tag) final {}
@@ -1667,14 +1538,14 @@ class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
MemoryChunk::FromHeapObject(host), slot);
}
- } else if (p->InSharedHeap() && !collector_->is_shared_heap()) {
+ } else if (p->InSharedHeap() && !host.InSharedWritableHeap()) {
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::NON_ATOMIC>(
MemoryChunk::FromHeapObject(host), slot);
}
}
}
- MarkCompactCollector* collector_;
+ Heap* const heap_;
EphemeronRememberedSet* ephemeron_remembered_set_;
};
@@ -1778,7 +1649,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (V8_UNLIKELY(v8_flags.minor_mc)) {
base->record_visitor_->MarkArrayBufferExtensionPromoted(dst);
}
- } else if (dest == MAP_SPACE) {
+ } else if (dest == MAP_SPACE || dest == SHARED_SPACE) {
DCHECK_OBJECT_SIZE(size);
DCHECK(IsAligned(size, kTaggedSize));
base->heap_->CopyBlock(dst_addr, src_addr, size);
@@ -1832,8 +1703,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
Map map = object.map(cage_base());
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation;
- if (ShouldPromoteIntoSharedHeap(map)) {
- DCHECK_EQ(target_space, OLD_SPACE);
+ if (target_space == OLD_SPACE && ShouldPromoteIntoSharedHeap(map)) {
DCHECK_NOT_NULL(shared_old_allocator_);
allocation = shared_old_allocator_->AllocateRaw(size, alignment,
AllocationOrigin::kGC);
@@ -1890,13 +1760,14 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
Heap* heap, EvacuationAllocator* local_allocator,
ConcurrentAllocator* shared_old_allocator,
RecordMigratedSlotVisitor* record_visitor,
- Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
+ PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
AlwaysPromoteYoung always_promote_young)
: EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
record_visitor),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
promoted_size_(0),
semispace_copied_size_(0),
+ pretenuring_handler_(heap_->pretenuring_handler()),
local_pretenuring_feedback_(local_pretenuring_feedback),
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
always_promote_young_(always_promote_young) {}
@@ -1906,8 +1777,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
HeapObject target_object;
if (always_promote_young_ == AlwaysPromoteYoung::kYes) {
- heap_->UpdateAllocationSite(object.map(), object,
- local_pretenuring_feedback_);
+ pretenuring_handler_->UpdateAllocationSite(object.map(), object,
+ local_pretenuring_feedback_);
if (!TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
heap_->FatalProcessOutOfMemory(
@@ -1918,17 +1789,18 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
return true;
}
+ DCHECK(!v8_flags.minor_mc);
+
if (heap_->new_space()->ShouldBePromoted(object.address()) &&
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
// Full GCs use AlwaysPromoteYoung::kYes above and MinorMC should never
// move objects.
- DCHECK(!v8_flags.minor_mc);
promoted_size_ += size;
return true;
}
- heap_->UpdateAllocationSite(object.map(), object,
- local_pretenuring_feedback_);
+ pretenuring_handler_->UpdateAllocationSite(object.map(), object,
+ local_pretenuring_feedback_);
HeapObject target;
AllocationSpace space = AllocateTargetObject(object, size, &target);
@@ -1990,7 +1862,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
LocalAllocationBuffer buffer_;
intptr_t promoted_size_;
intptr_t semispace_copied_size_;
- Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
+ PretenturingHandler* const pretenuring_handler_;
+ PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback_;
bool is_incremental_marking_;
AlwaysPromoteYoung always_promote_young_;
};
@@ -2000,10 +1873,11 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
public:
explicit EvacuateNewSpacePageVisitor(
Heap* heap, RecordMigratedSlotVisitor* record_visitor,
- Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
+ PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback)
: heap_(heap),
record_visitor_(record_visitor),
moved_bytes_(0),
+ pretenuring_handler_(heap_->pretenuring_handler()),
local_pretenuring_feedback_(local_pretenuring_feedback) {}
static void Move(Page* page) {
@@ -2022,12 +1896,12 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
inline bool Visit(HeapObject object, int size) override {
if (mode == NEW_TO_NEW) {
DCHECK(!v8_flags.minor_mc);
- heap_->UpdateAllocationSite(object.map(), object,
- local_pretenuring_feedback_);
+ pretenuring_handler_->UpdateAllocationSite(object.map(), object,
+ local_pretenuring_feedback_);
} else if (mode == NEW_TO_OLD) {
if (v8_flags.minor_mc) {
- heap_->UpdateAllocationSite(object.map(), object,
- local_pretenuring_feedback_);
+ pretenuring_handler_->UpdateAllocationSite(object.map(), object,
+ local_pretenuring_feedback_);
}
DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
PtrComprCageBase cage_base = GetPtrComprCageBase(object);
@@ -2046,7 +1920,8 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
Heap* heap_;
RecordMigratedSlotVisitor* record_visitor_;
intptr_t moved_bytes_;
- Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
+ PretenturingHandler* const pretenuring_handler_;
+ PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback_;
};
class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
@@ -2091,8 +1966,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
}
inline bool Visit(HeapObject object, int size) override {
- RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
- &heap_->ephemeron_remembered_set_);
+ RecordMigratedSlotVisitor visitor(heap_, &heap_->ephemeron_remembered_set_);
Map map = object.map(cage_base());
// Instead of calling object.IterateFast(cage_base(), &visitor) here
// we can shortcut and use the precomputed size value passed to the visitor.
@@ -2126,7 +2000,7 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
// Custom marking for top optimized frame.
ProcessTopOptimizedFrame(custom_root_body_visitor, isolate());
- if (isolate()->is_shared()) {
+ if (isolate()->is_shared_heap_isolate()) {
isolate()->global_safepoint()->IterateClientIsolates(
[this, custom_root_body_visitor](Isolate* client) {
ProcessTopOptimizedFrame(custom_root_body_visitor, client);
@@ -2142,7 +2016,7 @@ void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
heap_->local_embedder_heap_tracer()->embedder_stack_state() ==
cppgc::EmbedderStackState::kMayContainHeapPointers) {
GlobalHandleMarkingVisitor global_handles_marker(
- *heap_, marking_state_, *local_marking_worklists_);
+ *heap_, *local_marking_worklists_);
stack.IteratePointers(&global_handles_marker);
}
}
@@ -2246,9 +2120,14 @@ Address MarkCompactCollector::FindBasePtrForMarking(Address maybe_inner_ptr) {
if (chunk->IsLargePage()) return chunk->area_start();
// Otherwise, we have a pointer inside a normal page.
const Page* page = static_cast<const Page*>(chunk);
+ // If it is in the young generation "from" semispace, it is not used and we
+ // must ignore it, as its markbits may not be clean.
+ if (page->IsFromPage()) return kNullAddress;
+ // Try to find the address of a previous valid object on this page.
Address base_ptr =
FindPreviousObjectForConservativeMarking(page, maybe_inner_ptr);
- // If the markbit is set, then we have an object that does not need be marked.
+ // If the markbit is set, then we have an object that does not need to be
+ // marked.
if (base_ptr == kNullAddress) return kNullAddress;
// Iterate through the objects in the page forwards, until we find the object
// containing maybe_inner_ptr.
@@ -2271,10 +2150,11 @@ void MarkCompactCollector::MarkRootsFromStack(RootVisitor* root_visitor) {
}
void MarkCompactCollector::MarkObjectsFromClientHeaps() {
- if (!isolate()->is_shared()) return;
+ if (!isolate()->is_shared_heap_isolate()) return;
isolate()->global_safepoint()->IterateClientIsolates(
[collector = this](Isolate* client) {
+ if (client->is_shared_heap_isolate()) return;
collector->MarkObjectsFromClientHeap(client);
});
}
@@ -2290,6 +2170,9 @@ void MarkCompactCollector::MarkObjectsFromClientHeap(Isolate* client) {
PtrComprCageBase cage_base(client);
Heap* heap = client->heap();
+ // Ensure new space is iterable.
+ heap->MakeHeapIterable();
+
if (heap->new_space()) {
std::unique_ptr<ObjectIterator> iterator =
heap->new_space()->GetObjectIterator(heap);
@@ -2312,6 +2195,9 @@ void MarkCompactCollector::MarkObjectsFromClientHeap(Isolate* client) {
// find all incoming pointers into the shared heap.
OldGenerationMemoryChunkIterator chunk_iterator(heap);
+ // Tracking OLD_TO_SHARED requires the write barrier.
+ DCHECK(!v8_flags.disable_write_barriers);
+
for (MemoryChunk* chunk = chunk_iterator.next(); chunk;
chunk = chunk_iterator.next()) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToShared(
@@ -2333,6 +2219,18 @@ void MarkCompactCollector::MarkObjectsFromClientHeap(Isolate* client) {
},
SlotSet::FREE_EMPTY_BUCKETS);
chunk->ReleaseInvalidatedSlots<OLD_TO_SHARED>();
+
+ RememberedSet<OLD_TO_SHARED>::IterateTyped(
+ chunk, [collector = this, heap](SlotType slot_type, Address slot) {
+ HeapObject heap_object =
+ UpdateTypedSlotHelper::GetTargetObject(heap, slot_type, slot);
+ if (heap_object.InSharedWritableHeap()) {
+ collector->MarkRootObject(Root::kClientHeap, heap_object);
+ return KEEP_SLOT;
+ } else {
+ return REMOVE_SLOT;
+ }
+ });
}
#ifdef V8_COMPRESS_POINTERS
@@ -2934,8 +2832,7 @@ class StringForwardingTableCleaner final {
explicit StringForwardingTableCleaner(Heap* heap)
: heap_(heap),
isolate_(heap_->isolate()),
- marking_state_(
- heap_->mark_compact_collector()->non_atomic_marking_state()) {}
+ marking_state_(heap_->non_atomic_marking_state()) {}
void Run() {
StringForwardingTable* forwarding_table =
isolate_->string_forwarding_table();
@@ -3013,9 +2910,9 @@ class StringForwardingTableCleaner final {
ThinString::cast(original_string).RawField(ThinString::kActualOffset);
MarkCompactCollector::RecordSlot(original_string, slot, forward_string);
}
- Heap* heap_;
- Isolate* isolate_;
- NonAtomicMarkingState* marking_state_;
+ Heap* const heap_;
+ Isolate* const isolate_;
+ NonAtomicMarkingState* const marking_state_;
};
} // namespace
@@ -3192,13 +3089,16 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
// Replace bytecode array with an uncompiled data array.
HeapObject compiled_data = shared_info.GetBytecodeArray(isolate());
Address compiled_data_start = compiled_data.address();
- int compiled_data_size = compiled_data.Size();
+ int compiled_data_size = ALIGN_TO_ALLOCATION_ALIGNMENT(compiled_data.Size());
MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
// Clear any recorded slots for the compiled data as being invalid.
RememberedSet<OLD_TO_NEW>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
SlotSet::FREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_SHARED>::RemoveRange(
+ chunk, compiled_data_start, compiled_data_start + compiled_data_size,
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(
chunk, compiled_data_start, compiled_data_start + compiled_data_size,
SlotSet::FREE_EMPTY_BUCKETS);
@@ -3211,9 +3111,11 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
// Create a filler object for any left over space in the bytecode array.
if (!heap()->IsLargeObject(compiled_data)) {
+ const int aligned_filler_offset =
+ ALIGN_TO_ALLOCATION_ALIGNMENT(UncompiledDataWithoutPreparseData::kSize);
heap()->CreateFillerObjectAt(
- compiled_data.address() + UncompiledDataWithoutPreparseData::kSize,
- compiled_data_size - UncompiledDataWithoutPreparseData::kSize);
+ compiled_data.address() + aligned_filler_offset,
+ compiled_data_size - aligned_filler_offset);
}
// Initialize the uncompiled data.
@@ -3226,8 +3128,8 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
// Mark the uncompiled data as black, and ensure all fields have already been
// marked.
- DCHECK(marking_state()->IsBlackOrGrey(inferred_name) ||
- (!is_shared_heap() && inferred_name.InSharedWritableHeap()));
+ DCHECK(!ShouldMarkObject(inferred_name) ||
+ marking_state()->IsBlackOrGrey(inferred_name));
marking_state()->WhiteToBlack(uncompiled_data);
// Use the raw function data setter to avoid validity checks, since we're
@@ -3452,9 +3354,25 @@ void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
MemoryChunk* chunk = MemoryChunk::FromHeapObject(array);
RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, end,
SlotSet::FREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_SHARED>::RemoveRange(chunk, start, end,
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(chunk, start, end,
SlotSet::FREE_EMPTY_BUCKETS);
- heap()->CreateFillerObjectAt(start, static_cast<int>(end - start));
+ if (V8_COMPRESS_POINTERS_8GB_BOOL) {
+ Address aligned_start = ALIGN_TO_ALLOCATION_ALIGNMENT(start);
+ Address aligned_end = ALIGN_TO_ALLOCATION_ALIGNMENT(end);
+ if (aligned_start < aligned_end) {
+ heap()->CreateFillerObjectAt(
+ aligned_start, static_cast<int>(aligned_end - aligned_start));
+ }
+ if (Heap::ShouldZapGarbage()) {
+ Address zap_end = std::min(aligned_start, end);
+ MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(kZapValue)),
+ (zap_end - start) >> kTaggedSizeLog2);
+ }
+ } else {
+ heap()->CreateFillerObjectAt(start, static_cast<int>(end - start));
+ }
array.set_number_of_all_descriptors(new_nof_all_descriptors);
}
@@ -3509,14 +3427,14 @@ void MarkCompactCollector::ClearWeakCollections() {
if (value.IsHeapObject()) {
HeapObject heap_object = HeapObject::cast(value);
CHECK_IMPLIES(
- (!is_shared_heap_ && key.InSharedHeap()) ||
+ !ShouldMarkObject(key) ||
non_atomic_marking_state()->IsBlackOrGrey(key),
- (!is_shared_heap_ && heap_object.InSharedHeap()) ||
+ !ShouldMarkObject(heap_object) ||
non_atomic_marking_state()->IsBlackOrGrey(heap_object));
}
}
#endif
- if (!is_shared_heap_ && key.InSharedHeap()) continue;
+ if (!ShouldMarkObject(key)) continue;
if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
table.RemoveEntry(i);
}
@@ -3580,7 +3498,7 @@ void MarkCompactCollector::ClearJSWeakRefs() {
};
HeapObject target = HeapObject::cast(weak_cell.target());
if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
- DCHECK(!target.IsUndefined());
+ DCHECK(target.CanBeHeldWeakly());
// The value of the WeakCell is dead.
JSFinalizationRegistry finalization_registry =
JSFinalizationRegistry::cast(weak_cell.finalization_registry());
@@ -3602,6 +3520,7 @@ void MarkCompactCollector::ClearJSWeakRefs() {
HeapObject unregister_token = weak_cell.unregister_token();
if (!non_atomic_marking_state()->IsBlackOrGrey(unregister_token)) {
+ DCHECK(unregister_token.CanBeHeldWeakly());
// The unregister token is dead. Remove any corresponding entries in the
// key map. Multiple WeakCell with the same token will have all their
// unregister_token field set to undefined when processing the first
@@ -3610,7 +3529,7 @@ void MarkCompactCollector::ClearJSWeakRefs() {
JSFinalizationRegistry finalization_registry =
JSFinalizationRegistry::cast(weak_cell.finalization_registry());
finalization_registry.RemoveUnregisterToken(
- JSReceiver::cast(unregister_token), isolate(),
+ unregister_token, isolate(),
JSFinalizationRegistry::kKeepMatchedCellsInRegistry,
gc_notify_updated_slot);
} else {
@@ -3664,11 +3583,9 @@ MarkCompactCollector::ProcessRelocInfo(Code host, RelocInfo* rinfo,
slot_type = SlotType::kCodeEntry;
} else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
slot_type = SlotType::kEmbeddedObjectFull;
- } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
- slot_type = SlotType::kEmbeddedObjectCompressed;
} else {
- DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
- slot_type = SlotType::kEmbeddedObjectData;
+ DCHECK(RelocInfo::IsCompressedEmbeddedObject(rmode));
+ slot_type = SlotType::kEmbeddedObjectCompressed;
}
}
@@ -3959,71 +3876,6 @@ class PointersUpdatingVisitor final : public ObjectVisitorWithCageBases,
}
};
-#ifdef VERIFY_HEAP
-// Visitor for updating root pointers and to-space pointers.
-// It does not expect to encounter pointers to dead objects.
-class ClientHeapVerifier final : public ObjectVisitorWithCageBases {
- public:
- explicit ClientHeapVerifier(Heap* heap) : ObjectVisitorWithCageBases(heap) {}
-
- void VisitPointer(HeapObject host, ObjectSlot p) override {
- VerifySlot(cage_base(), p);
- }
-
- void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
- VerifySlot(cage_base(), p);
- }
-
- void VisitPointers(HeapObject host, ObjectSlot start,
- ObjectSlot end) override {
- for (ObjectSlot p = start; p < end; ++p) {
- VerifySlot(cage_base(), p);
- }
- }
-
- void VisitPointers(HeapObject host, MaybeObjectSlot start,
- MaybeObjectSlot end) final {
- for (MaybeObjectSlot p = start; p < end; ++p) {
- VerifySlot(cage_base(), p);
- }
- }
-
- void VisitMapPointer(HeapObject host) override {
- VerifySlot(cage_base(), host.map_slot());
- }
-
- void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
- VerifySlot(code_cage_base(), ObjectSlot(slot.address()));
- }
-
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override {}
-
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {}
-
- private:
- void VerifySlot(PtrComprCageBase cage_base, ObjectSlot slot) {
- HeapObject heap_object;
- if (slot.load(cage_base).GetHeapObject(&heap_object)) {
- VerifyHeapObject(heap_object);
- }
- }
-
- void VerifySlot(PtrComprCageBase cage_base, MaybeObjectSlot slot) {
- HeapObject heap_object;
- if (slot.load(cage_base).GetHeapObject(&heap_object)) {
- VerifyHeapObject(heap_object);
- }
- }
-
- void VerifyHeapObject(HeapObject heap_object) {
- if (BasicMemoryChunk::FromHeapObject(heap_object)->InReadOnlySpace())
- return;
- if (!heap_object.InSharedHeap()) return;
- CHECK(!heap_object.map_word(kRelaxedLoad).IsForwardingAddress());
- }
-};
-#endif // VERIFY_HEAP
-
static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
HeapObject old_string = HeapObject::cast(*p);
@@ -4097,6 +3949,15 @@ void MarkCompactCollector::EvacuateEpilogue() {
DCHECK_NULL((chunk->slot_set<OLD_TO_NEW, AccessMode::ATOMIC>()));
DCHECK_NULL((chunk->typed_slot_set<OLD_TO_NEW, AccessMode::ATOMIC>()));
+ // Old-to-shared slots may survive GC but there should never be any slots in
+ // new or shared spaces.
+ AllocationSpace id = chunk->owner_identity();
+ if (id == SHARED_SPACE || id == SHARED_LO_SPACE || id == NEW_SPACE ||
+ id == NEW_LO_SPACE || isolate()->is_shared()) {
+ DCHECK_NULL((chunk->slot_set<OLD_TO_SHARED, AccessMode::ATOMIC>()));
+ DCHECK_NULL((chunk->typed_slot_set<OLD_TO_SHARED, AccessMode::ATOMIC>()));
+ }
+
// GCs need to filter invalidated slots.
DCHECK_NULL(chunk->invalidated_slots<OLD_TO_OLD>());
DCHECK_NULL(chunk->invalidated_slots<OLD_TO_NEW>());
@@ -4107,8 +3968,8 @@ void MarkCompactCollector::EvacuateEpilogue() {
namespace {
ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
- if (v8_flags.shared_string_table && heap->isolate()->shared_isolate()) {
- return new ConcurrentAllocator(nullptr, heap->shared_old_space());
+ if (v8_flags.shared_string_table && heap->isolate()->has_shared_heap()) {
+ return new ConcurrentAllocator(nullptr, heap->shared_allocation_space());
}
return nullptr;
@@ -4158,7 +4019,8 @@ class Evacuator : public Malloced {
EvacuationAllocator* local_allocator,
AlwaysPromoteYoung always_promote_young)
: heap_(heap),
- local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
+ local_pretenuring_feedback_(
+ PretenturingHandler::kInitialFeedbackCapacity),
shared_old_allocator_(CreateSharedOldAllocator(heap_)),
new_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
record_visitor, &local_pretenuring_feedback_,
@@ -4191,10 +4053,8 @@ class Evacuator : public Malloced {
virtual GCTracer::Scope::ScopeId GetTracingScope() = 0;
protected:
- static const int kInitialLocalPretenuringFeedbackCapacity = 256;
-
// |saved_live_bytes| returns the live bytes of the page that was processed.
- virtual void RawEvacuatePage(MemoryChunk* chunk,
+ virtual bool RawEvacuatePage(MemoryChunk* chunk,
intptr_t* saved_live_bytes) = 0;
inline Heap* heap() { return heap_; }
@@ -4206,7 +4066,7 @@ class Evacuator : public Malloced {
Heap* heap_;
- Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
+ PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
// Allocator for the shared heap.
std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
@@ -4232,10 +4092,11 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) {
DCHECK(chunk->SweepingDone());
intptr_t saved_live_bytes = 0;
double evacuation_time = 0.0;
+ bool success = false;
{
AlwaysAllocateScope always_allocate(heap());
TimedScope timed_scope(&evacuation_time);
- RawEvacuatePage(chunk, &saved_live_bytes);
+ success = RawEvacuatePage(chunk, &saved_live_bytes);
}
ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (v8_flags.trace_evacuation) {
@@ -4249,8 +4110,7 @@ void Evacuator::EvacuatePage(MemoryChunk* chunk) {
chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
heap()->new_space()->IsPromotionCandidate(chunk),
- saved_live_bytes, evacuation_time,
- chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
+ saved_live_bytes, evacuation_time, success);
}
}
@@ -4260,7 +4120,7 @@ void Evacuator::Finalize() {
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
new_to_old_page_visitor_.moved_bytes());
- heap()->IncrementSemiSpaceCopiedObjectSize(
+ heap()->IncrementNewSpaceSurvivingObjectSize(
new_space_visitor_.semispace_copied_size() +
new_to_new_page_visitor_.moved_bytes());
heap()->IncrementYoungSurvivorsCounter(
@@ -4268,18 +4128,18 @@ void Evacuator::Finalize() {
new_space_visitor_.semispace_copied_size() +
new_to_old_page_visitor_.moved_bytes() +
new_to_new_page_visitor_.moved_bytes());
- heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
+ heap()->pretenuring_handler()->MergeAllocationSitePretenuringFeedback(
+ local_pretenuring_feedback_);
}
class FullEvacuator : public Evacuator {
public:
- explicit FullEvacuator(MarkCompactCollector* collector)
- : Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
+ explicit FullEvacuator(Heap* heap)
+ : Evacuator(heap, &record_visitor_, &local_allocator_,
AlwaysPromoteYoung::kYes),
- record_visitor_(collector, &ephemeron_remembered_set_),
+ record_visitor_(heap_, &ephemeron_remembered_set_),
local_allocator_(heap_,
- CompactionSpaceKind::kCompactionSpaceForMarkCompact),
- collector_(collector) {}
+ CompactionSpaceKind::kCompactionSpaceForMarkCompact) {}
GCTracer::Scope::ScopeId GetBackgroundTracingScope() override {
return GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY;
@@ -4307,18 +4167,16 @@ class FullEvacuator : public Evacuator {
}
protected:
- void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
+ bool RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
EphemeronRememberedSet ephemeron_remembered_set_;
RecordMigratedSlotVisitor record_visitor_;
EvacuationAllocator local_allocator_;
-
- MarkCompactCollector* collector_;
};
-void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
+bool FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
- NonAtomicMarkingState* marking_state = collector_->non_atomic_marking_state();
+ NonAtomicMarkingState* marking_state = heap_->non_atomic_marking_state();
*live_bytes = marking_state->live_bytes(chunk);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"FullEvacuator::RawEvacuatePage", "evacuation_mode",
@@ -4363,13 +4221,17 @@ void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
} else {
// Aborted compaction page. Actual processing happens on the main
// thread for simplicity reasons.
- collector_->ReportAbortedEvacuationCandidateDueToOOM(
- failed_object.address(), static_cast<Page*>(chunk));
+ heap_->mark_compact_collector()
+ ->ReportAbortedEvacuationCandidateDueToOOM(
+ failed_object.address(), static_cast<Page*>(chunk));
+ return false;
}
}
break;
}
}
+
+ return true;
}
class PageEvacuationJob : public v8::JobTask {
@@ -4433,20 +4295,19 @@ class PageEvacuationJob : public v8::JobTask {
};
namespace {
-template <class Evacuator, class Collector>
+template <class Evacuator>
size_t CreateAndExecuteEvacuationTasks(
- Collector* collector,
+ Heap* heap,
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
MigrationObserver* migration_observer) {
base::Optional<ProfilingMigrationObserver> profiling_observer;
- if (collector->isolate()->log_object_relocation()) {
- profiling_observer.emplace(collector->heap());
+ if (heap->isolate()->log_object_relocation()) {
+ profiling_observer.emplace(heap);
}
std::vector<std::unique_ptr<v8::internal::Evacuator>> evacuators;
- const int wanted_num_tasks =
- NumberOfParallelCompactionTasks(collector->heap());
+ const int wanted_num_tasks = NumberOfParallelCompactionTasks(heap);
for (int i = 0; i < wanted_num_tasks; i++) {
- auto evacuator = std::make_unique<Evacuator>(collector);
+ auto evacuator = std::make_unique<Evacuator>(heap);
if (profiling_observer) {
evacuator->AddObserver(&profiling_observer.value());
}
@@ -4458,7 +4319,7 @@ size_t CreateAndExecuteEvacuationTasks(
V8::GetCurrentPlatform()
->CreateJob(
v8::TaskPriority::kUserBlocking,
- std::make_unique<PageEvacuationJob>(collector->isolate(), &evacuators,
+ std::make_unique<PageEvacuationJob>(heap->isolate(), &evacuators,
std::move(evacuation_items)))
->Join();
for (auto& evacuator : evacuators) {
@@ -4469,13 +4330,16 @@ size_t CreateAndExecuteEvacuationTasks(
bool ShouldMovePage(Page* p, intptr_t live_bytes, intptr_t wasted_bytes,
MemoryReductionMode memory_reduction_mode,
- AlwaysPromoteYoung always_promote_young) {
+ AlwaysPromoteYoung always_promote_young,
+ PromoteUnusablePages promote_unusable_pages) {
Heap* heap = p->heap();
return v8_flags.page_promotion &&
(memory_reduction_mode == MemoryReductionMode::kNone) &&
!p->NeverEvacuate() &&
- (live_bytes + wasted_bytes >
- Evacuator::NewSpacePageEvacuationThreshold()) &&
+ ((live_bytes + wasted_bytes >
+ Evacuator::NewSpacePageEvacuationThreshold()) ||
+ (promote_unusable_pages == PromoteUnusablePages::kYes &&
+ !p->WasUsedForAllocation())) &&
(always_promote_young == AlwaysPromoteYoung::kYes ||
heap->new_space()->IsPromotionCandidate(p)) &&
heap->CanExpandOldGeneration(live_bytes);
@@ -4516,7 +4380,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
heap()->ShouldReduceMemory() ? MemoryReductionMode::kShouldReduceMemory
: MemoryReductionMode::kNone;
if (ShouldMovePage(page, live_bytes_on_page, 0, memory_reduction_mode,
- AlwaysPromoteYoung::kYes) ||
+ AlwaysPromoteYoung::kYes, PromoteUnusablePages::kNo) ||
force_page_promotion) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
DCHECK_EQ(heap()->old_space(), page->owner());
@@ -4563,8 +4427,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
// Promote young generation large objects.
if (auto* new_lo_space = heap()->new_lo_space()) {
- auto* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
+ auto* marking_state = heap()->non_atomic_marking_state();
for (auto it = new_lo_space->begin(); it != new_lo_space->end();) {
LargePage* current = *(it++);
HeapObject object = current->GetObject();
@@ -4587,7 +4450,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
evacuation_items.size());
wanted_num_tasks = CreateAndExecuteEvacuationTasks<FullEvacuator>(
- this, std::move(evacuation_items), nullptr);
+ heap(), std::move(evacuation_items), nullptr);
}
const size_t aborted_pages = PostProcessAbortedEvacuationCandidates();
@@ -4667,7 +4530,7 @@ void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
int new_live_size = 0;
for (auto object_and_size :
LiveObjectRange<kAllLiveObjects>(chunk, marking_state->bitmap(chunk))) {
- new_live_size += object_and_size.second;
+ new_live_size += ALIGN_TO_ALLOCATION_ALIGNMENT(object_and_size.second);
}
marking_state->SetLiveBytes(chunk, new_live_size);
}
@@ -4683,19 +4546,12 @@ void MarkCompactCollector::Evacuate() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
- EvacuationScope evacuation_scope(this);
+ EvacuationScope evacuation_scope(heap());
EvacuatePagesInParallel();
}
UpdatePointersAfterEvacuation();
- if (heap()->new_space()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
- if (!heap()->new_space()->EnsureCurrentCapacity()) {
- heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
- }
- }
-
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
@@ -4709,11 +4565,31 @@ void MarkCompactCollector::Evacuate() {
} else if (v8_flags.minor_mc) {
// Sweep non-promoted pages to add them back to the free list.
DCHECK_EQ(NEW_SPACE, p->owner_identity());
- sweeper()->AddPage(NEW_SPACE, p, Sweeper::REGULAR);
+ DCHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
+ DCHECK(p->SweepingDone());
+ PagedNewSpace* space = heap()->paged_new_space();
+ if (is_new_space_shrinking_ && space->ShouldReleasePage()) {
+ space->ReleasePage(p);
+ } else {
+ sweeper()->AddNewSpacePage(p);
+ }
}
}
new_space_evacuation_pages_.clear();
+ if (is_new_space_shrinking_) {
+ DCHECK(v8_flags.minor_mc);
+ heap()->paged_new_space()->FinishShrinking();
+ is_new_space_shrinking_ = false;
+ }
+
+ if (heap()->new_space()) {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
+ if (!heap()->new_space()->EnsureCurrentCapacity()) {
+ heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
+ }
+ }
+
for (LargePage* p : promoted_large_pages_) {
DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
@@ -4874,7 +4750,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
: heap_(heap),
marking_state_(marking_state),
chunk_(chunk),
- updating_mode_(updating_mode) {}
+ updating_mode_(updating_mode),
+ record_old_to_shared_slots_(heap->isolate()->has_shared_heap() &&
+ !chunk->InSharedHeap()) {}
~RememberedSetUpdatingItem() override = default;
void Process() override {
@@ -4988,7 +4866,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
void UpdateUntypedPointers() {
- const bool has_shared_isolate = this->heap_->isolate()->shared_isolate();
const PtrComprCageBase cage_base = heap_->isolate();
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
// Marking bits are cleared already when the page is already swept. This
@@ -5003,12 +4880,12 @@ class RememberedSetUpdatingItem : public UpdatingItem {
InvalidatedSlotsFilter::OldToNew(chunk_, liveness_check);
int slots = RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
- [this, &filter, has_shared_isolate, cage_base](MaybeObjectSlot slot) {
+ [this, &filter, cage_base](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
SlotCallbackResult result = CheckAndUpdateOldToNewSlot(slot);
// A new space string might have been promoted into the shared heap
// during GC.
- if (has_shared_isolate) {
+ if (record_old_to_shared_slots_) {
CheckSlotForOldToSharedUntyped(cage_base, chunk_, slot);
}
return result;
@@ -5034,12 +4911,12 @@ class RememberedSetUpdatingItem : public UpdatingItem {
chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo);
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
- [this, has_shared_isolate, &filter, cage_base](MaybeObjectSlot slot) {
+ [this, &filter, cage_base](MaybeObjectSlot slot) {
if (filter.IsValid(slot.address())) {
UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
// A string might have been promoted into the shared heap during
// GC.
- if (has_shared_isolate) {
+ if (record_old_to_shared_slots_) {
CheckSlotForOldToSharedUntyped(cage_base, chunk_, slot);
}
}
@@ -5104,7 +4981,6 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
void UpdateTypedPointers() {
- const bool has_shared_isolate = heap_->isolate()->shared_isolate();
if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
nullptr) {
CHECK_NE(chunk_->owner(), heap_->map_space());
@@ -5113,14 +4989,13 @@ class RememberedSetUpdatingItem : public UpdatingItem {
return CheckAndUpdateOldToNewSlot(slot);
};
RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk_,
- [this, has_shared_isolate, &check_and_update_old_to_new_slot_fn](
- SlotType slot_type, Address slot) {
+ chunk_, [this, &check_and_update_old_to_new_slot_fn](
+ SlotType slot_type, Address slot) {
SlotCallbackResult result = UpdateTypedSlotHelper::UpdateTypedSlot(
heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
// A new space string might have been promoted into the shared heap
// during GC.
- if (has_shared_isolate) {
+ if (record_old_to_shared_slots_) {
CheckSlotForOldToSharedTyped(chunk_, slot_type, slot);
}
return result;
@@ -5131,7 +5006,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
nullptr)) {
CHECK_NE(chunk_->owner(), heap_->map_space());
RememberedSet<OLD_TO_OLD>::IterateTyped(
- chunk_, [this, has_shared_isolate](SlotType slot_type, Address slot) {
+ chunk_, [this](SlotType slot_type, Address slot) {
// Using UpdateStrongSlot is OK here, because there are no weak
// typed slots.
PtrComprCageBase cage_base = heap_->isolate();
@@ -5143,7 +5018,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
return KEEP_SLOT;
});
// A string might have been promoted into the shared heap during GC.
- if (has_shared_isolate) {
+ if (record_old_to_shared_slots_) {
CheckSlotForOldToSharedTyped(chunk_, slot_type, slot);
}
return result;
@@ -5156,6 +5031,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
MarkingState* marking_state_;
MemoryChunk* chunk_;
RememberedSetUpdatingMode updating_mode_;
+ const bool record_old_to_shared_slots_;
};
std::unique_ptr<UpdatingItem>
@@ -5294,11 +5170,21 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
CollectRememberedSetUpdatingItems(this, &updating_items,
heap()->code_space(),
RememberedSetUpdatingMode::ALL);
+ if (heap()->shared_space()) {
+ CollectRememberedSetUpdatingItems(this, &updating_items,
+ heap()->shared_space(),
+ RememberedSetUpdatingMode::ALL);
+ }
CollectRememberedSetUpdatingItems(this, &updating_items, heap()->lo_space(),
RememberedSetUpdatingMode::ALL);
CollectRememberedSetUpdatingItems(this, &updating_items,
heap()->code_lo_space(),
RememberedSetUpdatingMode::ALL);
+ if (heap()->shared_lo_space()) {
+ CollectRememberedSetUpdatingItems(this, &updating_items,
+ heap()->shared_lo_space(),
+ RememberedSetUpdatingMode::ALL);
+ }
if (heap()->map_space()) {
CollectRememberedSetUpdatingItems(this, &updating_items,
heap()->map_space(),
@@ -5336,10 +5222,12 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
void MarkCompactCollector::UpdatePointersInClientHeaps() {
- if (!isolate()->is_shared()) return;
+ if (!isolate()->is_shared_heap_isolate()) return;
- isolate()->global_safepoint()->IterateClientIsolates(
- [this](Isolate* client) { UpdatePointersInClientHeap(client); });
+ isolate()->global_safepoint()->IterateClientIsolates([this](Isolate* client) {
+ if (client->is_shared_heap_isolate()) return;
+ UpdatePointersInClientHeap(client);
+ });
}
void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
@@ -5372,25 +5260,10 @@ void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
});
if (chunk->InYoungGeneration()) chunk->ReleaseTypedSlotSet<OLD_TO_SHARED>();
}
-
-#ifdef VERIFY_HEAP
- if (v8_flags.verify_heap) {
- ClientHeapVerifier verifier_visitor(client->heap());
-
- HeapObjectIterator iterator(client->heap(),
- HeapObjectIterator::kNoFiltering);
- for (HeapObject obj = iterator.Next(); !obj.is_null();
- obj = iterator.Next()) {
- obj.IterateFast(cage_base, &verifier_visitor);
- }
- }
-#endif // VERIFY_HEAP
}
void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToOOM(
Address failed_start, Page* page) {
- DCHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
- page->SetFlag(Page::COMPACTION_WAS_ABORTED);
base::MutexGuard guard(&mutex_);
aborted_evacuation_candidates_due_to_oom_.push_back(
std::make_pair(failed_start, page));
@@ -5407,11 +5280,10 @@ void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToFlags(
namespace {
-void ReRecordPage(Heap* heap,
- v8::internal::NonAtomicMarkingState* marking_state,
- Address failed_start, Page* page) {
+void ReRecordPage(Heap* heap, Address failed_start, Page* page) {
DCHECK(page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
+ NonAtomicMarkingState* marking_state = heap->non_atomic_marking_state();
// Aborted compaction page. We have to record slots here, since we
// might not have recorded them in first place.
@@ -5457,12 +5329,15 @@ size_t MarkCompactCollector::PostProcessAbortedEvacuationCandidates() {
CHECK_IMPLIES(v8_flags.crash_on_aborted_evacuation,
aborted_evacuation_candidates_due_to_oom_.empty());
for (auto start_and_page : aborted_evacuation_candidates_due_to_oom_) {
- ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
- start_and_page.second);
+ Page* page = start_and_page.second;
+ DCHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
+ page->SetFlag(Page::COMPACTION_WAS_ABORTED);
+ }
+ for (auto start_and_page : aborted_evacuation_candidates_due_to_oom_) {
+ ReRecordPage(heap(), start_and_page.first, start_and_page.second);
}
for (auto start_and_page : aborted_evacuation_candidates_due_to_flags_) {
- ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
- start_and_page.second);
+ ReRecordPage(heap(), start_and_page.first, start_and_page.second);
}
const size_t aborted_pages =
aborted_evacuation_candidates_due_to_oom_.size() +
@@ -5492,147 +5367,56 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
non_atomic_marking_state()->SetLiveBytes(p, 0);
CHECK(p->SweepingDone());
- space->memory_chunk_list().Remove(p);
space->ReleasePage(p);
}
old_space_evacuation_pages_.clear();
compacting_ = false;
}
-void MarkCompactCollector::SweepLargeSpace(LargeObjectSpace* space) {
- auto* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
- PtrComprCageBase cage_base(heap()->isolate());
- size_t surviving_object_size = 0;
- for (auto it = space->begin(); it != space->end();) {
- LargePage* current = *(it++);
- HeapObject object = current->GetObject();
- DCHECK(!marking_state->IsGrey(object));
- if (!marking_state->IsBlack(object)) {
- // Object is dead and page can be released.
- space->RemovePage(current);
- heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
- current);
-
- continue;
- }
- Marking::MarkWhite(non_atomic_marking_state()->MarkBitFrom(object));
- current->ProgressBar().ResetIfEnabled();
- non_atomic_marking_state()->SetLiveBytes(current, 0);
- surviving_object_size += static_cast<size_t>(object.Size(cage_base));
- }
- space->set_objects_size(surviving_object_size);
-}
-
-void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
- space->ClearAllocatorState();
-
- int will_be_swept = 0;
- bool unused_page_present = false;
-
- // Loop needs to support deletion if live bytes == 0 for a page.
- for (auto it = space->begin(); it != space->end();) {
- Page* p = *(it++);
- DCHECK(p->SweepingDone());
-
- if (p->IsEvacuationCandidate()) {
- // Will be processed in Evacuate.
- DCHECK(!evacuation_candidates_.empty());
- continue;
- }
-
- // One unused page is kept, all further are released before sweeping them.
- if (non_atomic_marking_state()->live_bytes(p) == 0) {
- if (unused_page_present) {
- if (v8_flags.gc_verbose) {
- PrintIsolate(isolate(), "sweeping: released page: %p",
- static_cast<void*>(p));
- }
- space->memory_chunk_list().Remove(p);
- space->ReleasePage(p);
- continue;
- }
- unused_page_present = true;
- }
-
- sweeper()->AddPage(space->identity(), p, Sweeper::REGULAR);
- will_be_swept++;
- }
-
- if (v8_flags.gc_verbose) {
- PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
- space->name(), will_be_swept);
- }
-}
-
-void MarkCompactCollector::StartSweepNewSpace() {
- PagedSpaceBase* paged_space = heap()->paged_new_space()->paged_space();
- paged_space->ClearAllocatorState();
-
- int will_be_swept = 0;
-
- for (auto it = paged_space->begin(); it != paged_space->end();) {
- Page* p = *(it++);
- DCHECK(p->SweepingDone());
-
- if (non_atomic_marking_state()->live_bytes(p) > 0) {
- // Non-empty pages will be evacuated/promoted.
- continue;
- }
-
- // New space preallocates all its pages. Don't free empty pages since they
- // will just be reallocated.
- DCHECK_EQ(NEW_SPACE, paged_space->identity());
- sweeper_->AddPage(NEW_SPACE, p, Sweeper::REGULAR);
- will_be_swept++;
- }
-
- if (v8_flags.gc_verbose) {
- PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
- paged_space->name(), will_be_swept);
- }
-}
-
void MarkCompactCollector::Sweep() {
+ DCHECK(!sweeper()->sweeping_in_progress());
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
{
- {
- GCTracer::Scope sweep_scope(
- heap()->tracer(), GCTracer::Scope::MC_SWEEP_LO, ThreadKind::kMain);
- SweepLargeSpace(heap()->lo_space());
- }
- {
- GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_CODE_LO,
- ThreadKind::kMain);
- SweepLargeSpace(heap()->code_lo_space());
- }
- {
- GCTracer::Scope sweep_scope(
- heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLD, ThreadKind::kMain);
- StartSweepSpace(heap()->old_space());
- }
- {
- GCTracer::Scope sweep_scope(
- heap()->tracer(), GCTracer::Scope::MC_SWEEP_CODE, ThreadKind::kMain);
- StartSweepSpace(heap()->code_space());
- }
- if (heap()->map_space()) {
- GCTracer::Scope sweep_scope(
- heap()->tracer(), GCTracer::Scope::MC_SWEEP_MAP, ThreadKind::kMain);
- StartSweepSpace(heap()->map_space());
- }
- if (v8_flags.minor_mc && heap()->new_space()) {
- GCTracer::Scope sweep_scope(
- heap()->tracer(), GCTracer::Scope::MC_SWEEP_NEW, ThreadKind::kMain);
- StartSweepNewSpace();
- }
- sweeper()->StartSweeping();
+ GCTracer::Scope sweep_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP_LO,
+ ThreadKind::kMain);
+ SweepLargeSpace(heap()->lo_space());
+ }
+ {
+ GCTracer::Scope sweep_scope(
+ heap()->tracer(), GCTracer::Scope::MC_SWEEP_CODE_LO, ThreadKind::kMain);
+ SweepLargeSpace(heap()->code_lo_space());
+ }
+ {
+ GCTracer::Scope sweep_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLD,
+ ThreadKind::kMain);
+ StartSweepSpace(heap()->old_space());
}
+ {
+ GCTracer::Scope sweep_scope(
+ heap()->tracer(), GCTracer::Scope::MC_SWEEP_CODE, ThreadKind::kMain);
+ StartSweepSpace(heap()->code_space());
+ }
+ if (heap()->map_space()) {
+ GCTracer::Scope sweep_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP_MAP,
+ ThreadKind::kMain);
+ StartSweepSpace(heap()->map_space());
+ }
+ if (heap()->shared_space()) {
+ GCTracer::Scope sweep_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP_MAP,
+ ThreadKind::kMain);
+ StartSweepSpace(heap()->shared_space());
+ }
+ if (v8_flags.minor_mc && heap()->new_space()) {
+ GCTracer::Scope sweep_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP_NEW,
+ ThreadKind::kMain);
+ StartSweepNewSpace();
+ }
+
+ sweeper()->StartSweeping(garbage_collector_);
}
namespace {
@@ -5643,8 +5427,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
public:
explicit YoungGenerationMarkingVerifier(Heap* heap)
: MarkingVerifier(heap),
- marking_state_(
- heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
+ marking_state_(heap->non_atomic_marking_state()) {}
ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const MemoryChunk* chunk) override {
@@ -5708,75 +5491,15 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
}
}
- NonAtomicMarkingState* marking_state_;
-};
-
-class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
- public:
- explicit YoungGenerationEvacuationVerifier(Heap* heap)
- : EvacuationVerifier(heap) {}
-
- void Run() override {
- DCHECK(!heap_->mark_compact_collector()->sweeping_in_progress());
- DCHECK(!heap_->minor_mark_compact_collector()->sweeping_in_progress());
- VerifyRoots();
- VerifyEvacuation(heap_->new_space());
- VerifyEvacuation(heap_->old_space());
- VerifyEvacuation(heap_->code_space());
- if (heap_->map_space()) VerifyEvacuation(heap_->map_space());
- }
-
- protected:
- V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
- CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
- Heap::InToPage(heap_object));
- }
-
- template <typename TSlot>
- void VerifyPointersImpl(TSlot start, TSlot end) {
- for (TSlot current = start; current < end; ++current) {
- typename TSlot::TObject object = current.load(cage_base());
- HeapObject heap_object;
- if (object.GetHeapObject(&heap_object)) {
- VerifyHeapObjectImpl(heap_object);
- }
- }
- }
- void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
- void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
- VerifyPointersImpl(start, end);
- }
- void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
- VerifyPointersImpl(start, end);
- }
- void VerifyCodePointer(CodeObjectSlot slot) override {
- CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
- Object maybe_code = slot.load(code_cage_base());
- HeapObject code;
- // The slot might contain smi during CodeDataContainer creation, so skip it.
- if (maybe_code.GetHeapObject(&code)) {
- VerifyHeapObjectImpl(code);
- }
- }
- void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
- Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- VerifyHeapObjectImpl(target);
- }
- void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
- }
- void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
- VerifyPointersImpl(start, end);
- }
+ NonAtomicMarkingState* const marking_state_;
};
#endif // VERIFY_HEAP
bool IsUnmarkedObjectForYoungGeneration(Heap* heap, FullObjectSlot p) {
DCHECK_IMPLIES(Heap::InYoungGeneration(*p), Heap::InToPage(*p));
- return Heap::InYoungGeneration(*p) && !heap->minor_mark_compact_collector()
- ->non_atomic_marking_state()
- ->IsBlack(HeapObject::cast(*p));
+ return Heap::InYoungGeneration(*p) &&
+ !heap->non_atomic_marking_state()->IsBlack(HeapObject::cast(*p));
}
} // namespace
@@ -5821,7 +5544,7 @@ constexpr size_t MinorMarkCompactCollector::kMaxParallelTasks;
MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
: CollectorBase(heap, GarbageCollector::MINOR_MARK_COMPACTOR),
page_parallel_job_semaphore_(0),
- sweeper_(std::make_unique<Sweeper>(heap_, non_atomic_marking_state())) {}
+ sweeper_(heap_->sweeper()) {}
std::pair<size_t, size_t> MinorMarkCompactCollector::ProcessMarkingWorklist(
size_t bytes_to_process) {
@@ -5872,8 +5595,7 @@ class YoungGenerationMigrationObserver final : public MigrationObserver {
// Migrate color to old generation marking in case the object survived
// young generation garbage collection.
if (heap_->incremental_marking()->IsMarking()) {
- DCHECK(
- heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
+ DCHECK(heap_->atomic_marking_state()->IsWhite(dst));
heap_->incremental_marking()->TransferColor(src, dst);
}
}
@@ -5886,9 +5608,8 @@ class YoungGenerationMigrationObserver final : public MigrationObserver {
class YoungGenerationRecordMigratedSlotVisitor final
: public RecordMigratedSlotVisitor {
public:
- explicit YoungGenerationRecordMigratedSlotVisitor(
- MarkCompactCollector* collector)
- : RecordMigratedSlotVisitor(collector, nullptr) {}
+ explicit YoungGenerationRecordMigratedSlotVisitor(Heap* heap)
+ : RecordMigratedSlotVisitor(heap, nullptr) {}
void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
@@ -5904,7 +5625,7 @@ class YoungGenerationRecordMigratedSlotVisitor final
// Only record slots for host objects that are considered as live by the
// full collector.
inline bool IsLive(HeapObject object) {
- return collector_->non_atomic_marking_state()->IsBlack(object);
+ return heap_->non_atomic_marking_state()->IsBlack(object);
}
inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
@@ -5928,6 +5649,10 @@ class YoungGenerationRecordMigratedSlotVisitor final
RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
MemoryChunk::FromHeapObject(host), slot);
}
+ } else if (p->InSharedHeap()) {
+ DCHECK(!host.InSharedWritableHeap());
+ RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::NON_ATOMIC>(
+ MemoryChunk::FromHeapObject(host), slot);
}
}
}
@@ -5939,25 +5664,28 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
std::vector<std::unique_ptr<UpdatingItem>> updating_items;
- // Create batches of global handles.
- CollectRememberedSetUpdatingItems(this, &updating_items, heap()->old_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- CollectRememberedSetUpdatingItems(this, &updating_items, heap()->code_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- if (heap()->map_space()) {
- CollectRememberedSetUpdatingItems(
- this, &updating_items, heap()->map_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- }
- CollectRememberedSetUpdatingItems(this, &updating_items, heap()->lo_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- CollectRememberedSetUpdatingItems(this, &updating_items,
- heap()->code_lo_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
-
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
+ // Create batches of global handles.
+ CollectRememberedSetUpdatingItems(
+ this, &updating_items, heap()->old_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ CollectRememberedSetUpdatingItems(
+ this, &updating_items, heap()->code_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ if (heap()->map_space()) {
+ CollectRememberedSetUpdatingItems(
+ this, &updating_items, heap()->map_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ }
+ CollectRememberedSetUpdatingItems(
+ this, &updating_items, heap()->lo_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ CollectRememberedSetUpdatingItems(
+ this, &updating_items, heap()->code_lo_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+
V8::GetCurrentPlatform()
->CreateJob(
v8::TaskPriority::kUserBlocking,
@@ -6027,17 +5755,26 @@ void MinorMarkCompactCollector::StartMarking() {
}
void MinorMarkCompactCollector::Finish() {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_FINISH);
-
{
- // Keep new space sweeping atomic.
- GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MC_FINISH_SWEEP_NEW,
- ThreadKind::kMain);
- sweeper_->EnsureCompleted(Sweeper::SweepingMode::kEagerDuringGC);
- heap()->paged_new_space()->paged_space()->RefillFreeList(sweeper());
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEP);
+ {
+ DCHECK_NOT_NULL(heap()->new_lo_space());
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEP_NEW_LO);
+ SweepLargeSpace(heap()->new_lo_space());
+ }
+
+ {
+ // Keep new space sweeping atomic.
+ GCTracer::Scope sweep_scope(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_SWEEP_FINISH_NEW,
+ ThreadKind::kMain);
+ sweeper_->EnsureCompleted(Sweeper::SweepingMode::kEagerDuringGC);
+ heap()->paged_new_space()->paged_space()->RefillFreeList();
+ }
}
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_FINISH);
+
local_marking_worklists_.reset();
main_marking_visitor_.reset();
}
@@ -6070,25 +5807,6 @@ void MinorMarkCompactCollector::CollectGarbage() {
}
#endif // VERIFY_HEAP
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
- heap()->incremental_marking()->UpdateMarkingWorklistAfterYoungGenGC();
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
- // Since we promote all surviving large objects immediately, all remaining
- // large objects must be dead.
- NonAtomicMarkingState* marking_state = non_atomic_marking_state();
- heap()->new_lo_space()->FreeDeadObjects([marking_state](HeapObject obj) {
- // New large object space is not swept and markbits for non-promoted
- // objects are still in tact.
- USE(marking_state);
- DCHECK(marking_state->IsWhite(obj));
- return true;
- });
- }
-
CleanupPromotedPages();
SweepArrayBufferExtensions();
@@ -6101,7 +5819,6 @@ void MinorMarkCompactCollector::MakeIterable(
CHECK(!p->IsLargePage());
// We have to clear the full collectors markbits for the areas that we
// remove here.
- MarkCompactCollector* full_collector = heap()->mark_compact_collector();
Address free_start = p->area_start();
for (auto object_and_size :
@@ -6112,7 +5829,7 @@ void MinorMarkCompactCollector::MakeIterable(
if (free_end != free_start) {
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
- full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
+ heap()->non_atomic_marking_state()->bitmap(p)->ClearRange(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(free_end));
if (free_space_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
@@ -6129,7 +5846,7 @@ void MinorMarkCompactCollector::MakeIterable(
if (free_start != p->area_end()) {
CHECK_GT(p->area_end(), free_start);
size_t size = static_cast<size_t>(p->area_end() - free_start);
- full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
+ heap()->non_atomic_marking_state()->bitmap(p)->ClearRange(
p->AddressToMarkbitIndex(free_start),
p->AddressToMarkbitIndex(p->area_end()));
if (free_space_mode == FreeSpaceTreatmentMode::kZapFreeSpace) {
@@ -6144,10 +5861,8 @@ namespace {
// Helper class for pruning the string table.
class YoungGenerationExternalStringTableCleaner : public RootVisitor {
public:
- explicit YoungGenerationExternalStringTableCleaner(
- MinorMarkCompactCollector* collector)
- : heap_(collector->heap()),
- marking_state_(collector->non_atomic_marking_state()) {}
+ explicit YoungGenerationExternalStringTableCleaner(Heap* heap)
+ : heap_(heap), marking_state_(heap_->non_atomic_marking_state()) {}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
@@ -6173,8 +5888,8 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
}
private:
- Heap* heap_;
- NonAtomicMarkingState* marking_state_;
+ Heap* const heap_;
+ NonAtomicMarkingState* const marking_state_;
};
} // namespace
@@ -6186,7 +5901,7 @@ void MinorMarkCompactCollector::ClearNonLiveReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
// Internalized strings are always stored in old space, so there is no
// need to clean them here.
- YoungGenerationExternalStringTableCleaner external_visitor(this);
+ YoungGenerationExternalStringTableCleaner external_visitor(heap());
heap()->external_string_table_.IterateYoung(&external_visitor);
heap()->external_string_table_.CleanUpYoung();
}
@@ -6220,16 +5935,14 @@ MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
class PageMarkingItem;
class RootMarkingItem;
-class YoungGenerationMarkingTask;
class YoungGenerationMarkingTask {
public:
- YoungGenerationMarkingTask(Isolate* isolate,
- MinorMarkCompactCollector* collector,
+ YoungGenerationMarkingTask(Isolate* isolate, Heap* heap,
MarkingWorklists* global_worklists)
: marking_worklists_local_(
std::make_unique<MarkingWorklists::Local>(global_worklists)),
- marking_state_(collector->marking_state()),
+ marking_state_(heap->marking_state()),
visitor_(isolate, marking_state_, marking_worklists_local()) {}
void MarkObject(Object object) {
@@ -6250,6 +5963,8 @@ class YoungGenerationMarkingTask {
}
}
+ void PublishMarkingWorklist() { marking_worklists_local_->Publish(); }
+
MarkingWorklists::Local* marking_worklists_local() {
return marking_worklists_local_.get();
}
@@ -6260,155 +5975,138 @@ class YoungGenerationMarkingTask {
YoungGenerationMainMarkingVisitor visitor_;
};
-class PageMarkingItem : public ParallelWorkItem {
- public:
- explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
- ~PageMarkingItem() = default;
-
- void Process(YoungGenerationMarkingTask* task) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "PageMarkingItem::Process");
- base::MutexGuard guard(chunk_->mutex());
- MarkUntypedPointers(task);
- MarkTypedPointers(task);
- }
-
- private:
- inline Heap* heap() { return chunk_->heap(); }
-
- void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
- InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(
- chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo);
- RememberedSet<OLD_TO_NEW>::Iterate(
- chunk_,
- [this, task, &filter](MaybeObjectSlot slot) {
- if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
- return CheckAndMarkObject(task, slot);
- },
- SlotSet::FREE_EMPTY_BUCKETS);
- }
+void PageMarkingItem::Process(YoungGenerationMarkingTask* task) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "PageMarkingItem::Process");
+ base::MutexGuard guard(chunk_->mutex());
+ MarkUntypedPointers(task);
+ MarkTypedPointers(task);
+}
+
+void PageMarkingItem::MarkUntypedPointers(YoungGenerationMarkingTask* task) {
+ InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(
+ chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo);
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk_,
+ [this, task, &filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+ return CheckAndMarkObject(task, slot);
+ },
+ SlotSet::FREE_EMPTY_BUCKETS);
+}
- void MarkTypedPointers(YoungGenerationMarkingTask* task) {
- RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk_, [=](SlotType slot_type, Address slot) {
- return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) {
- return CheckAndMarkObject(task, slot);
- });
- });
- }
+void PageMarkingItem::MarkTypedPointers(YoungGenerationMarkingTask* task) {
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ chunk_, [=](SlotType slot_type, Address slot) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) {
+ return CheckAndMarkObject(task, slot);
+ });
+ });
+}
- template <typename TSlot>
- V8_INLINE SlotCallbackResult
- CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot) {
- static_assert(
- std::is_same<TSlot, FullMaybeObjectSlot>::value ||
- std::is_same<TSlot, MaybeObjectSlot>::value,
- "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
- MaybeObject object = *slot;
- if (Heap::InYoungGeneration(object)) {
- // Marking happens before flipping the young generation, so the object
- // has to be in a to page.
- DCHECK(Heap::InToPage(object));
- HeapObject heap_object;
- bool success = object.GetHeapObject(&heap_object);
- USE(success);
- DCHECK(success);
- task->MarkObject(heap_object);
- return KEEP_SLOT;
- }
- return REMOVE_SLOT;
+template <typename TSlot>
+V8_INLINE SlotCallbackResult PageMarkingItem::CheckAndMarkObject(
+ YoungGenerationMarkingTask* task, TSlot slot) {
+ static_assert(
+ std::is_same<TSlot, FullMaybeObjectSlot>::value ||
+ std::is_same<TSlot, MaybeObjectSlot>::value,
+ "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
+ MaybeObject object = *slot;
+ if (Heap::InYoungGeneration(object)) {
+ // Marking happens before flipping the young generation, so the object
+ // has to be in a to page.
+ DCHECK(Heap::InToPage(object));
+ HeapObject heap_object;
+ bool success = object.GetHeapObject(&heap_object);
+ USE(success);
+ DCHECK(success);
+ task->MarkObject(heap_object);
+ return KEEP_SLOT;
}
+ return REMOVE_SLOT;
+}
- MemoryChunk* chunk_;
-};
-
-class YoungGenerationMarkingJob : public v8::JobTask {
- public:
- YoungGenerationMarkingJob(Isolate* isolate,
- MinorMarkCompactCollector* collector,
- MarkingWorklists* global_worklists,
- std::vector<PageMarkingItem> marking_items)
- : isolate_(isolate),
- collector_(collector),
- global_worklists_(global_worklists),
- marking_items_(std::move(marking_items)),
- remaining_marking_items_(marking_items_.size()),
- generator_(marking_items_.size()) {}
-
- void Run(JobDelegate* delegate) override {
- if (delegate->IsJoiningThread()) {
- TRACE_GC(collector_->heap()->tracer(),
- GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
- ProcessItems(delegate);
- } else {
- TRACE_GC_EPOCH(collector_->heap()->tracer(),
- GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
- ThreadKind::kBackground);
- ProcessItems(delegate);
- }
+void YoungGenerationMarkingJob::Run(JobDelegate* delegate) {
+ if (delegate->IsJoiningThread()) {
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
+ ProcessItems(delegate);
+ } else {
+ TRACE_GC_EPOCH(heap_->tracer(),
+ GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
+ ThreadKind::kBackground);
+ ProcessItems(delegate);
}
+}
- size_t GetMaxConcurrency(size_t worker_count) const override {
- // Pages are not private to markers but we can still use them to estimate
- // the amount of marking that is required.
- const int kPagesPerTask = 2;
- size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
- size_t num_tasks = std::max(
+size_t YoungGenerationMarkingJob::GetMaxConcurrency(size_t worker_count) const {
+ // Pages are not private to markers but we can still use them to estimate
+ // the amount of marking that is required.
+ const int kPagesPerTask = 2;
+ size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
+ size_t num_tasks;
+ if (!incremental()) {
+ num_tasks = std::max(
(items + 1) / kPagesPerTask,
global_worklists_->shared()->Size() +
global_worklists_->on_hold()
->Size()); // TODO(v8:13012): If this is used with concurrent
// marking, we need to remove on_hold() here.
- if (!v8_flags.parallel_marking) {
- num_tasks = std::min<size_t>(1, num_tasks);
- }
- return std::min<size_t>(num_tasks,
- MinorMarkCompactCollector::kMaxParallelTasks);
+ } else {
+ num_tasks = (items + 1) / kPagesPerTask;
}
- private:
- void ProcessItems(JobDelegate* delegate) {
- double marking_time = 0.0;
- {
- TimedScope scope(&marking_time);
- YoungGenerationMarkingTask task(isolate_, collector_, global_worklists_);
- ProcessMarkingItems(&task);
- task.EmptyMarkingWorklist();
- }
- if (v8_flags.trace_minor_mc_parallel_marking) {
- PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
- static_cast<void*>(this), marking_time);
- }
+ if (!v8_flags.parallel_marking) {
+ num_tasks = std::min<size_t>(1, num_tasks);
}
+ return std::min<size_t>(num_tasks,
+ MinorMarkCompactCollector::kMaxParallelTasks);
+}
- void ProcessMarkingItems(YoungGenerationMarkingTask* task) {
- while (remaining_marking_items_.load(std::memory_order_relaxed) > 0) {
- base::Optional<size_t> index = generator_.GetNext();
- if (!index) return;
- for (size_t i = *index; i < marking_items_.size(); ++i) {
- auto& work_item = marking_items_[i];
- if (!work_item.TryAcquire()) break;
- work_item.Process(task);
+void YoungGenerationMarkingJob::ProcessItems(JobDelegate* delegate) {
+ double marking_time = 0.0;
+ {
+ TimedScope scope(&marking_time);
+ YoungGenerationMarkingTask task(isolate_, heap_, global_worklists_);
+ ProcessMarkingItems(&task);
+ if (!incremental()) {
+ task.EmptyMarkingWorklist();
+ } else {
+ task.PublishMarkingWorklist();
+ }
+ }
+ if (v8_flags.trace_minor_mc_parallel_marking) {
+ PrintIsolate(isolate_, "marking[%p]: time=%f\n", static_cast<void*>(this),
+ marking_time);
+ }
+}
+
+void YoungGenerationMarkingJob::ProcessMarkingItems(
+ YoungGenerationMarkingTask* task) {
+ // TODO(v8:13012): YoungGenerationMarkingJob is generally used to compute the
+ // transitive closure. In the context of concurrent MinorMC, it currently only
+ // seeds the worklists from the old-to-new remembered set, but does not empty
+ // them (this is done concurrently). The class should be refactored to make
+ // this clearer.
+ while (remaining_marking_items_.load(std::memory_order_relaxed) > 0) {
+ base::Optional<size_t> index = generator_.GetNext();
+ if (!index) return;
+ for (size_t i = *index; i < marking_items_.size(); ++i) {
+ auto& work_item = marking_items_[i];
+ if (!work_item.TryAcquire()) break;
+ work_item.Process(task);
+ if (!incremental()) {
task->EmptyMarkingWorklist();
- if (remaining_marking_items_.fetch_sub(1, std::memory_order_relaxed) <=
- 1) {
- return;
- }
+ }
+ if (remaining_marking_items_.fetch_sub(1, std::memory_order_relaxed) <=
+ 1) {
+ return;
}
}
}
-
- Isolate* isolate_;
- MinorMarkCompactCollector* collector_;
- MarkingWorklists* global_worklists_;
- std::vector<PageMarkingItem> marking_items_;
- std::atomic_size_t remaining_marking_items_{0};
- IndexGenerator generator_;
-};
+}
void MinorMarkCompactCollector::MarkRootSetInParallel(
- RootMarkingVisitor* root_visitor) {
+ RootMarkingVisitor* root_visitor, bool was_marked_incrementally) {
{
std::vector<PageMarkingItem> marking_items;
@@ -6426,11 +6124,14 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
SkipRoot::kOldGeneration});
isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
root_visitor);
- // Create items for each page.
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- heap(), [&marking_items](MemoryChunk* chunk) {
- marking_items.emplace_back(chunk);
- });
+
+ if (!was_marked_incrementally) {
+ // Create items for each page.
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap(), [&marking_items](MemoryChunk* chunk) {
+ marking_items.emplace_back(chunk);
+ });
+ }
}
// Add tasks and run in parallel.
@@ -6439,12 +6140,14 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
// 0. Flush to ensure these items are visible globally and picked up
// by the job.
local_marking_worklists_->Publish();
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_MARK_CLOSURE_PARALLEL);
V8::GetCurrentPlatform()
- ->CreateJob(v8::TaskPriority::kUserBlocking,
- std::make_unique<YoungGenerationMarkingJob>(
- isolate(), this, marking_worklists(),
- std::move(marking_items)))
+ ->CreateJob(
+ v8::TaskPriority::kUserBlocking,
+ std::make_unique<YoungGenerationMarkingJob>(
+ isolate(), heap(), marking_worklists(),
+ std::move(marking_items), YoungMarkingJobType::kAtomic))
->Join();
DCHECK(local_marking_worklists_->IsEmpty());
@@ -6462,7 +6165,8 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
bool was_marked_incrementally = false;
{
- // TODO(v8:13012): TRACE_GC with MINOR_MC_MARK_FINISH_INCREMENTAL.
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_MARK_FINISH_INCREMENTAL);
if (heap_->incremental_marking()->Stop()) {
MarkingBarrier::PublishAll(heap());
// TODO(v8:13012): TRACE_GC with MINOR_MC_MARK_FULL_CLOSURE_PARALLEL_JOIN.
@@ -6475,11 +6179,11 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
RootMarkingVisitor root_visitor(this);
- MarkRootSetInParallel(&root_visitor);
+ MarkRootSetInParallel(&root_visitor, was_marked_incrementally);
// Mark rest on the main thread.
{
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_CLOSURE);
DrainMarkingWorklist();
}
@@ -6577,19 +6281,13 @@ void MinorMarkCompactCollector::Evacuate() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
+ EvacuationScope evacuation_scope(heap());
EvacuatePagesInParallel();
}
UpdatePointersAfterEvacuation();
{
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
- if (!heap()->new_space()->EnsureCurrentCapacity()) {
- heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
- }
- }
-
- {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
for (Page* p : new_space_evacuation_pages_) {
DCHECK(!p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
@@ -6598,12 +6296,24 @@ void MinorMarkCompactCollector::Evacuate() {
} else {
// Page was not promoted. Sweep it instead.
DCHECK_EQ(NEW_SPACE, p->owner_identity());
- sweeper()->AddPage(NEW_SPACE, p, Sweeper::REGULAR);
+ sweeper()->AddNewSpacePage(p);
}
}
new_space_evacuation_pages_.clear();
}
+ if (is_new_space_shrinking_) {
+ heap()->paged_new_space()->FinishShrinking();
+ is_new_space_shrinking_ = false;
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
+ if (!heap()->new_space()->EnsureCurrentCapacity()) {
+ heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
+ }
+ }
+
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
EvacuateEpilogue();
@@ -6614,13 +6324,13 @@ namespace {
class YoungGenerationEvacuator : public Evacuator {
public:
- explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector)
- : Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
+ explicit YoungGenerationEvacuator(Heap* heap)
+ : Evacuator(heap, &record_visitor_, &local_allocator_,
AlwaysPromoteYoung::kNo),
- record_visitor_(collector->heap()->mark_compact_collector()),
+ record_visitor_(heap_),
local_allocator_(
heap_, CompactionSpaceKind::kCompactionSpaceForMinorMarkCompact),
- collector_(collector) {}
+ collector_(heap_->minor_mark_compact_collector()) {}
GCTracer::Scope::ScopeId GetBackgroundTracingScope() override {
return GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
@@ -6631,18 +6341,18 @@ class YoungGenerationEvacuator : public Evacuator {
}
protected:
- void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
+ bool RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
YoungGenerationRecordMigratedSlotVisitor record_visitor_;
EvacuationAllocator local_allocator_;
MinorMarkCompactCollector* collector_;
};
-void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
+bool YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
intptr_t* live_bytes) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"YoungGenerationEvacuator::RawEvacuatePage");
- NonAtomicMarkingState* marking_state = collector_->non_atomic_marking_state();
+ NonAtomicMarkingState* marking_state = heap_->non_atomic_marking_state();
*live_bytes = marking_state->live_bytes(chunk);
DCHECK_EQ(kPageNewToOld, ComputeEvacuationMode(chunk));
LiveObjectVisitor::VisitBlackObjectsNoFail(chunk, marking_state,
@@ -6662,6 +6372,8 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
FreeSpaceTreatmentMode::kIgnoreFreeSpace);
}
}
+
+ return true;
}
} // namespace
@@ -6675,7 +6387,10 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
DCHECK_LT(0, live_bytes_on_page);
live_bytes += live_bytes_on_page;
if (ShouldMovePage(page, live_bytes_on_page, page->wasted_memory(),
- MemoryReductionMode::kNone, AlwaysPromoteYoung::kNo)) {
+ MemoryReductionMode::kNone, AlwaysPromoteYoung::kNo,
+ heap()->tracer()->IsCurrentGCDueToAllocationFailure()
+ ? PromoteUnusablePages::kYes
+ : PromoteUnusablePages::kNo)) {
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
evacuation_items.emplace_back(ParallelWorkItem{}, page);
}
@@ -6687,7 +6402,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
LargePage* current = *it;
it++;
HeapObject object = current->GetObject();
- if (non_atomic_marking_state_.IsBlack(object)) {
+ if (non_atomic_marking_state()->IsBlack(object)) {
heap_->lo_space()->PromoteNewLargeObject(current);
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
promoted_large_pages_.push_back(current);
@@ -6702,43 +6417,15 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
const auto pages_count = evacuation_items.size();
const auto wanted_num_tasks =
CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
- this, std::move(evacuation_items), &observer);
+ heap(), std::move(evacuation_items), &observer);
if (v8_flags.trace_evacuation) {
TraceEvacuation(isolate(), pages_count, wanted_num_tasks, live_bytes, 0);
}
}
-void MinorMarkCompactCollector::StartSweepNewSpace() {
- PagedSpaceBase* paged_space = heap()->paged_new_space()->paged_space();
- paged_space->ClearAllocatorState();
-
- int will_be_swept = 0;
-
- // Loop needs to support deletion if live bytes == 0 for a page.
- for (auto it = paged_space->begin(); it != paged_space->end();) {
- Page* p = *(it++);
- DCHECK(p->SweepingDone());
-
- if (non_atomic_marking_state()->live_bytes(p) > 0) {
- // Non-empty pages will be evacuated/promoted.
- continue;
- }
-
- // New space preallocates all its pages. Don't free empty pages since they
- // will just be reallocated.
- DCHECK_EQ(NEW_SPACE, paged_space->identity());
- sweeper_->AddPage(NEW_SPACE, p, Sweeper::REGULAR);
- will_be_swept++;
- }
-
- if (v8_flags.gc_verbose) {
- PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
- paged_space->name(), will_be_swept);
- }
-}
-
void MinorMarkCompactCollector::Sweep() {
+ DCHECK(!sweeper()->sweeping_in_progress());
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEP);
{
GCTracer::Scope sweep_scope(heap()->tracer(),
@@ -6746,7 +6433,7 @@ void MinorMarkCompactCollector::Sweep() {
ThreadKind::kMain);
StartSweepNewSpace();
}
- sweeper_->StartSweeping();
+ sweeper_->StartSweeping(garbage_collector_);
}
} // namespace internal
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 9d960cd360..40b6019de5 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -11,6 +11,7 @@
#include "include/v8-internal.h"
#include "src/heap/base/worklist.h"
#include "src/heap/concurrent-marking.h"
+#include "src/heap/marking-state.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/marking.h"
@@ -173,79 +174,11 @@ class LiveObjectVisitor : AllStatic {
};
enum class AlwaysPromoteYoung { kYes, kNo };
+enum class PromoteUnusablePages { kYes, kNo };
enum class MemoryReductionMode { kNone, kShouldReduceMemory };
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
-// This is used by marking visitors.
-class MarkingState final
- : public MarkingStateBase<MarkingState, AccessMode::ATOMIC> {
- public:
- explicit MarkingState(PtrComprCageBase cage_base)
- : MarkingStateBase(cage_base) {}
-
- ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
- const BasicMemoryChunk* chunk) const {
- return chunk->marking_bitmap<AccessMode::ATOMIC>();
- }
-
- // Concurrent marking uses local live bytes so we may do these accesses
- // non-atomically.
- void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
- }
-
- intptr_t live_bytes(const MemoryChunk* chunk) const {
- return chunk->live_byte_count_.load(std::memory_order_relaxed);
- }
-
- void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
- chunk->live_byte_count_.store(value, std::memory_order_relaxed);
- }
-};
-
-// This is used by Scavenger and Evacuator in TransferColor.
-// Live byte increments have to be atomic.
-class AtomicMarkingState final
- : public MarkingStateBase<AtomicMarkingState, AccessMode::ATOMIC> {
- public:
- explicit AtomicMarkingState(PtrComprCageBase cage_base)
- : MarkingStateBase(cage_base) {}
-
- ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
- const BasicMemoryChunk* chunk) const {
- return chunk->marking_bitmap<AccessMode::ATOMIC>();
- }
-
- void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- chunk->live_byte_count_.fetch_add(by);
- }
-};
-
-class NonAtomicMarkingState final
- : public MarkingStateBase<NonAtomicMarkingState, AccessMode::NON_ATOMIC> {
- public:
- explicit NonAtomicMarkingState(PtrComprCageBase cage_base)
- : MarkingStateBase(cage_base) {}
-
- ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
- const BasicMemoryChunk* chunk) const {
- return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
- }
-
- void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
- chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
- }
-
- intptr_t live_bytes(const MemoryChunk* chunk) const {
- return chunk->live_byte_count_.load(std::memory_order_relaxed);
- }
-
- void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
- chunk->live_byte_count_.store(value, std::memory_order_relaxed);
- }
-};
-
// This visitor is used for marking on the main thread. It is cheaper than
// the concurrent marking visitor because it does not snapshot JSObjects.
template <typename MarkingState>
@@ -328,15 +261,6 @@ class CollectorBase {
virtual void Prepare() = 0;
virtual void StartMarking() = 0;
- MarkingState* marking_state() { return &marking_state_; }
-
- NonAtomicMarkingState* non_atomic_marking_state() {
- return &non_atomic_marking_state_;
- }
-
- inline Heap* heap() const { return heap_; }
- inline Isolate* isolate();
-
MarkingWorklists* marking_worklists() { return &marking_worklists_; }
MarkingWorklists::Local* local_marking_worklists() {
@@ -352,8 +276,6 @@ class CollectorBase {
// Used by incremental marking for object that change their layout.
virtual void VisitObject(HeapObject obj) = 0;
- virtual bool sweeping_in_progress() const = 0;
-
virtual void Finish() = 0;
bool IsMajorMC();
@@ -363,14 +285,29 @@ class CollectorBase {
std::vector<LargePage*> promoted_large_pages_;
protected:
+ inline Heap* heap() const { return heap_; }
+ inline Isolate* isolate();
+
+ MarkingState* marking_state() { return marking_state_; }
+
+ NonAtomicMarkingState* non_atomic_marking_state() {
+ return non_atomic_marking_state_;
+ }
+
+ void StartSweepSpace(PagedSpace* space);
+ void StartSweepNewSpace();
+ void SweepLargeSpace(LargeObjectSpace* space);
+
Heap* heap_;
GarbageCollector garbage_collector_;
MarkingWorklists marking_worklists_;
std::unique_ptr<MarkingWorklists::Local> local_marking_worklists_;
- MarkingState marking_state_;
- NonAtomicMarkingState non_atomic_marking_state_;
+ MarkingState* const marking_state_;
+ NonAtomicMarkingState* const non_atomic_marking_state_;
+
+ bool is_new_space_shrinking_ = false;
explicit CollectorBase(Heap* heap, GarbageCollector collector);
virtual ~CollectorBase() = default;
@@ -459,35 +396,9 @@ class MarkCompactCollector final : public CollectorBase {
void RecordLiveSlotsOnPage(Page* page);
bool is_compacting() const { return compacting_; }
- bool is_shared_heap() const { return is_shared_heap_; }
-
- void FinishSweepingIfOutOfWork();
-
- enum class SweepingForcedFinalizationMode { kUnifiedHeap, kV8Only };
-
- // Ensures that sweeping is finished.
- //
- // Note: Can only be called safely from main thread.
- V8_EXPORT_PRIVATE void EnsureSweepingCompleted(
- SweepingForcedFinalizationMode mode);
-
- void EnsurePageIsSwept(Page* page);
-
- void DrainSweepingWorklistForSpace(AllocationSpace space);
-
- // Checks if sweeping is in progress right now on any space.
- bool sweeping_in_progress() const final {
- return sweeper_->sweeping_in_progress();
- }
-
- void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
-
- bool evacuation() const { return evacuation_; }
inline void AddTransitionArray(TransitionArray array);
- Sweeper* sweeper() { return sweeper_; }
-
#ifdef DEBUG
// Checks whether performing mark-compact collection.
bool in_use() { return state_ > PREPARE_GC; }
@@ -544,10 +455,12 @@ class MarkCompactCollector final : public CollectorBase {
// `kNullAddress` if the parameter does not point to (the interior of) a valid
// heap object, or if it points to (the interior of) some object that is
// already marked as live (black or grey).
- Address FindBasePtrForMarking(Address maybe_inner_ptr);
+ V8_EXPORT_PRIVATE Address FindBasePtrForMarking(Address maybe_inner_ptr);
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_MB
private:
+ Sweeper* sweeper() { return sweeper_; }
+
void ComputeEvacuationHeuristics(size_t area_size,
int* target_fragmentation_percent,
size_t* max_evacuated_bytes);
@@ -560,9 +473,6 @@ class MarkCompactCollector final : public CollectorBase {
// Free unmarked ArrayBufferExtensions.
void SweepArrayBufferExtensions();
- // Free unmarked entries in the ExternalPointerTable.
- void SweepExternalPointerTable();
-
void MarkLiveObjects();
// Marks the object grey and adds it to the marking work list.
@@ -677,9 +587,6 @@ class MarkCompactCollector final : public CollectorBase {
// Starts sweeping of spaces by contributing on the main thread and setting
// up other pages for sweeping. Does not start sweeper tasks.
void Sweep();
- void StartSweepSpace(PagedSpace* space);
- void StartSweepNewSpace();
- void SweepLargeSpace(LargeObjectSpace* space);
void EvacuatePrologue();
void EvacuateEpilogue();
@@ -701,6 +608,8 @@ class MarkCompactCollector final : public CollectorBase {
void RightTrimDescriptorArray(DescriptorArray array, int descriptors_to_trim);
+ V8_INLINE bool ShouldMarkObject(HeapObject) const;
+
base::Mutex mutex_;
base::Semaphore page_parallel_job_semaphore_{0};
@@ -717,9 +626,9 @@ class MarkCompactCollector final : public CollectorBase {
CollectorState state_;
#endif
- const bool is_shared_heap_;
+ const bool uses_shared_heap_;
+ const bool is_shared_heap_isolate_;
- bool evacuation_ = false;
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
bool compacting_ = false;
@@ -746,7 +655,7 @@ class MarkCompactCollector final : public CollectorBase {
aborted_evacuation_candidates_due_to_flags_;
std::vector<LargePage*> promoted_large_pages_;
- Sweeper* sweeper_;
+ Sweeper* const sweeper_;
// Counts the number of major mark-compact collections. The counter is
// incremented right after marking. This is used for:
@@ -765,19 +674,6 @@ class MarkCompactCollector final : public CollectorBase {
friend class RecordMigratedSlotVisitor;
};
-class V8_NODISCARD EvacuationScope {
- public:
- explicit EvacuationScope(MarkCompactCollector* collector)
- : collector_(collector) {
- collector_->set_evacuation(true);
- }
-
- ~EvacuationScope() { collector_->set_evacuation(false); }
-
- private:
- MarkCompactCollector* collector_;
-};
-
// Collector for young-generation only.
class MinorMarkCompactCollector final : public CollectorBase {
public:
@@ -807,9 +703,6 @@ class MinorMarkCompactCollector final : public CollectorBase {
void Finish() final;
- Sweeper* sweeper() { return sweeper_.get(); }
- bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
-
void VisitObject(HeapObject obj) final;
private:
@@ -818,15 +711,17 @@ class MinorMarkCompactCollector final : public CollectorBase {
static const int kNumMarkers = 8;
static const int kMainMarker = 0;
+ Sweeper* sweeper() { return sweeper_; }
+
void MarkLiveObjects();
- void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
+ void MarkRootSetInParallel(RootMarkingVisitor* root_visitor,
+ bool was_marked_incrementally);
V8_INLINE void MarkRootObject(HeapObject obj);
void DrainMarkingWorklist();
void TraceFragmentation();
void ClearNonLiveReferences();
void Sweep();
- void StartSweepNewSpace();
void EvacuatePrologue();
void EvacuateEpilogue();
@@ -844,7 +739,7 @@ class MinorMarkCompactCollector final : public CollectorBase {
std::vector<Page*> promoted_pages_;
std::vector<LargePage*> promoted_large_pages_;
- std::unique_ptr<Sweeper> sweeper_;
+ Sweeper* const sweeper_;
friend class YoungGenerationMarkingTask;
friend class YoungGenerationMarkingJob;
diff --git a/deps/v8/src/heap/marking-barrier-inl.h b/deps/v8/src/heap/marking-barrier-inl.h
index 5f50081c4e..4d83a533e0 100644
--- a/deps/v8/src/heap/marking-barrier-inl.h
+++ b/deps/v8/src/heap/marking-barrier-inl.h
@@ -28,8 +28,7 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
// visits the host object.
return false;
}
- BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(value);
- if (is_shared_heap_ != target_page->InSharedHeap()) return false;
+ if (!ShouldMarkObject(value)) return false;
if (is_minor()) {
// We do not need to insert into RememberedSet<OLD_TO_NEW> here because the
@@ -48,6 +47,16 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
}
}
+bool MarkingBarrier::ShouldMarkObject(HeapObject object) const {
+ if (V8_LIKELY(!uses_shared_heap_)) return true;
+ if (v8_flags.shared_space) {
+ if (is_shared_heap_isolate_) return true;
+ return !object.InSharedHeap();
+ } else {
+ return is_shared_heap_isolate_ == object.InSharedHeap();
+ }
+}
+
template <typename TSlot>
inline void MarkingBarrier::MarkRange(HeapObject host, TSlot start, TSlot end) {
auto* isolate = heap_->isolate();
diff --git a/deps/v8/src/heap/marking-barrier.cc b/deps/v8/src/heap/marking-barrier.cc
index e028a67295..c66bf9d4b2 100644
--- a/deps/v8/src/heap/marking-barrier.cc
+++ b/deps/v8/src/heap/marking-barrier.cc
@@ -31,7 +31,8 @@ MarkingBarrier::MarkingBarrier(LocalHeap* local_heap)
minor_worklist_(*minor_collector_->marking_worklists()->shared()),
marking_state_(heap_->isolate()),
is_main_thread_barrier_(local_heap->is_main_thread()),
- is_shared_heap_(heap_->IsShared()) {}
+ uses_shared_heap_(heap_->isolate()->has_shared_heap()),
+ is_shared_heap_isolate_(heap_->isolate()->is_shared_heap_isolate()) {}
MarkingBarrier::~MarkingBarrier() { DCHECK(typed_slots_map_.empty()); }
@@ -212,6 +213,9 @@ void MarkingBarrier::Deactivate() {
if (heap_->map_space()) DeactivateSpace(heap_->map_space());
DeactivateSpace(heap_->code_space());
DeactivateSpace(heap_->new_space());
+ if (heap_->shared_space()) {
+ DeactivateSpace(heap_->shared_space());
+ }
for (LargePage* p : *heap_->new_lo_space()) {
p->SetYoungGenerationPageFlags(false);
DCHECK(p->IsLargePage());
@@ -222,6 +226,11 @@ void MarkingBarrier::Deactivate() {
for (LargePage* p : *heap_->code_lo_space()) {
p->SetOldGenerationPageFlags(false);
}
+ if (heap_->shared_lo_space()) {
+ for (LargePage* p : *heap_->shared_lo_space()) {
+ p->SetOldGenerationPageFlags(false);
+ }
+ }
}
DCHECK(typed_slots_map_.empty());
DCHECK(current_worklist_->IsLocalEmpty());
@@ -259,6 +268,9 @@ void MarkingBarrier::Activate(bool is_compacting,
ActivateSpace(heap_->code_space());
}
ActivateSpace(heap_->new_space());
+ if (heap_->shared_space()) {
+ ActivateSpace(heap_->shared_space());
+ }
for (LargePage* p : *heap_->new_lo_space()) {
p->SetYoungGenerationPageFlags(true);
@@ -276,6 +288,12 @@ void MarkingBarrier::Activate(bool is_compacting,
p->SetOldGenerationPageFlags(true);
}
}
+
+ if (heap_->shared_lo_space()) {
+ for (LargePage* p : *heap_->shared_lo_space()) {
+ p->SetOldGenerationPageFlags(true);
+ }
+ }
}
}
diff --git a/deps/v8/src/heap/marking-barrier.h b/deps/v8/src/heap/marking-barrier.h
index 8d04a25d10..96d0b32926 100644
--- a/deps/v8/src/heap/marking-barrier.h
+++ b/deps/v8/src/heap/marking-barrier.h
@@ -50,6 +50,7 @@ class MarkingBarrier {
}
private:
+ inline bool ShouldMarkObject(HeapObject value) const;
inline bool WhiteToGreyAndPush(HeapObject value);
void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
@@ -83,7 +84,8 @@ class MarkingBarrier {
bool is_compacting_ = false;
bool is_activated_ = false;
bool is_main_thread_barrier_;
- bool is_shared_heap_;
+ const bool uses_shared_heap_;
+ const bool is_shared_heap_isolate_;
MarkingBarrierType marking_barrier_type_;
};
diff --git a/deps/v8/src/heap/marking-state-inl.h b/deps/v8/src/heap/marking-state-inl.h
new file mode 100644
index 0000000000..0ab19a91a1
--- /dev/null
+++ b/deps/v8/src/heap/marking-state-inl.h
@@ -0,0 +1,155 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_MARKING_STATE_INL_H_
+#define V8_HEAP_MARKING_STATE_INL_H_
+
+#include "src/heap/marking-state.h"
+#include "src/heap/memory-chunk.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename ConcreteState, AccessMode access_mode>
+MarkBit MarkingStateBase<ConcreteState, access_mode>::MarkBitFrom(
+ const HeapObject obj) const {
+ return MarkBitFrom(BasicMemoryChunk::FromHeapObject(obj), obj.ptr());
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+MarkBit MarkingStateBase<ConcreteState, access_mode>::MarkBitFrom(
+ const BasicMemoryChunk* p, Address addr) const {
+ return static_cast<const ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
+ p->AddressToMarkbitIndex(addr));
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+Marking::ObjectColor MarkingStateBase<ConcreteState, access_mode>::Color(
+ const HeapObject obj) const {
+ return Marking::Color(MarkBitFrom(obj));
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+bool MarkingStateBase<ConcreteState, access_mode>::IsImpossible(
+ const HeapObject obj) const {
+ return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+bool MarkingStateBase<ConcreteState, access_mode>::IsBlack(
+ const HeapObject obj) const {
+ return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+bool MarkingStateBase<ConcreteState, access_mode>::IsWhite(
+ const HeapObject obj) const {
+ return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+bool MarkingStateBase<ConcreteState, access_mode>::IsGrey(
+ const HeapObject obj) const {
+ return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+bool MarkingStateBase<ConcreteState, access_mode>::IsBlackOrGrey(
+ const HeapObject obj) const {
+ return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+bool MarkingStateBase<ConcreteState, access_mode>::WhiteToGrey(HeapObject obj) {
+ return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+bool MarkingStateBase<ConcreteState, access_mode>::WhiteToBlack(
+ HeapObject obj) {
+ return WhiteToGrey(obj) && GreyToBlack(obj);
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+bool MarkingStateBase<ConcreteState, access_mode>::GreyToBlack(HeapObject obj) {
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(obj);
+ MarkBit markbit = MarkBitFrom(chunk, obj.address());
+ if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
+ static_cast<ConcreteState*>(this)->IncrementLiveBytes(
+ MemoryChunk::cast(chunk),
+ ALIGN_TO_ALLOCATION_ALIGNMENT(obj.Size(cage_base())));
+ return true;
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+bool MarkingStateBase<ConcreteState, access_mode>::GreyToBlackUnaccounted(
+ HeapObject obj) {
+ return Marking::GreyToBlack<access_mode>(MarkBitFrom(obj));
+}
+
+template <typename ConcreteState, AccessMode access_mode>
+void MarkingStateBase<ConcreteState, access_mode>::ClearLiveness(
+ MemoryChunk* chunk) {
+ static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
+ static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
+}
+
+ConcurrentBitmap<AccessMode::ATOMIC>* MarkingState::bitmap(
+ const BasicMemoryChunk* chunk) const {
+ return chunk->marking_bitmap<AccessMode::ATOMIC>();
+}
+
+void MarkingState::IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
+ DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
+ IsAligned(by, kObjectAlignment8GbHeap));
+ chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
+}
+
+intptr_t MarkingState::live_bytes(const MemoryChunk* chunk) const {
+ return chunk->live_byte_count_.load(std::memory_order_relaxed);
+}
+
+void MarkingState::SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
+ DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
+ IsAligned(value, kObjectAlignment8GbHeap));
+ chunk->live_byte_count_.store(value, std::memory_order_relaxed);
+}
+
+ConcurrentBitmap<AccessMode::NON_ATOMIC>* NonAtomicMarkingState::bitmap(
+ const BasicMemoryChunk* chunk) const {
+ return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
+}
+
+void NonAtomicMarkingState::IncrementLiveBytes(MemoryChunk* chunk,
+ intptr_t by) {
+ DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
+ IsAligned(by, kObjectAlignment8GbHeap));
+ chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
+}
+
+intptr_t NonAtomicMarkingState::live_bytes(const MemoryChunk* chunk) const {
+ return chunk->live_byte_count_.load(std::memory_order_relaxed);
+}
+
+void NonAtomicMarkingState::SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
+ DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
+ IsAligned(value, kObjectAlignment8GbHeap));
+ chunk->live_byte_count_.store(value, std::memory_order_relaxed);
+}
+
+ConcurrentBitmap<AccessMode::ATOMIC>* AtomicMarkingState::bitmap(
+ const BasicMemoryChunk* chunk) const {
+ return chunk->marking_bitmap<AccessMode::ATOMIC>();
+}
+
+void AtomicMarkingState::IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
+ DCHECK_IMPLIES(V8_COMPRESS_POINTERS_8GB_BOOL,
+ IsAligned(by, kObjectAlignment8GbHeap));
+ chunk->live_byte_count_.fetch_add(by);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_MARKING_STATE_INL_H_
diff --git a/deps/v8/src/heap/marking-state.h b/deps/v8/src/heap/marking-state.h
new file mode 100644
index 0000000000..c197c10243
--- /dev/null
+++ b/deps/v8/src/heap/marking-state.h
@@ -0,0 +1,137 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_MARKING_STATE_H_
+#define V8_HEAP_MARKING_STATE_H_
+
+#include "src/common/globals.h"
+#include "src/heap/marking.h"
+#include "src/objects/heap-object.h"
+
+namespace v8 {
+namespace internal {
+
+class BasicMemoryChunk;
+class MemoryChunk;
+
+template <typename ConcreteState, AccessMode access_mode>
+class MarkingStateBase {
+ public:
+ // Declares that this marking state is not collecting retainers, so the
+ // marking visitor may update the heap state to store information about
+ // progress, and may avoid fully visiting an object if it is safe to do so.
+ static constexpr bool kCollectRetainers = false;
+
+ explicit MarkingStateBase(PtrComprCageBase cage_base)
+#if V8_COMPRESS_POINTERS
+ : cage_base_(cage_base)
+#endif
+ {
+ }
+
+ // The pointer compression cage base value used for decompression of all
+ // tagged values except references to Code objects.
+ V8_INLINE PtrComprCageBase cage_base() const {
+#if V8_COMPRESS_POINTERS
+ return cage_base_;
+#else
+ return PtrComprCageBase{};
+#endif // V8_COMPRESS_POINTERS
+ }
+
+ V8_INLINE MarkBit MarkBitFrom(const HeapObject obj) const;
+
+ // {addr} may be tagged or aligned.
+ V8_INLINE MarkBit MarkBitFrom(const BasicMemoryChunk* p, Address addr) const;
+
+ V8_INLINE Marking::ObjectColor Color(const HeapObject obj) const;
+
+ V8_INLINE bool IsImpossible(const HeapObject obj) const;
+
+ V8_INLINE bool IsBlack(const HeapObject obj) const;
+
+ V8_INLINE bool IsWhite(const HeapObject obj) const;
+
+ V8_INLINE bool IsGrey(const HeapObject obj) const;
+
+ V8_INLINE bool IsBlackOrGrey(const HeapObject obj) const;
+
+ V8_INLINE bool WhiteToGrey(HeapObject obj);
+
+ V8_INLINE bool WhiteToBlack(HeapObject obj);
+
+ V8_INLINE bool GreyToBlack(HeapObject obj);
+
+ V8_INLINE bool GreyToBlackUnaccounted(HeapObject obj);
+
+ V8_INLINE void ClearLiveness(MemoryChunk* chunk);
+
+ void AddStrongReferenceForReferenceSummarizer(HeapObject host,
+ HeapObject obj) {
+ // This is not a reference summarizer, so there is nothing to do here.
+ }
+
+ void AddWeakReferenceForReferenceSummarizer(HeapObject host, HeapObject obj) {
+ // This is not a reference summarizer, so there is nothing to do here.
+ }
+
+ private:
+#if V8_COMPRESS_POINTERS
+ const PtrComprCageBase cage_base_;
+#endif // V8_COMPRESS_POINTERS
+};
+
+// This is used by marking visitors.
+class MarkingState final
+ : public MarkingStateBase<MarkingState, AccessMode::ATOMIC> {
+ public:
+ explicit MarkingState(PtrComprCageBase cage_base)
+ : MarkingStateBase(cage_base) {}
+
+ V8_INLINE ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
+ const BasicMemoryChunk* chunk) const;
+
+ // Concurrent marking uses local live bytes so we may do these accesses
+ // non-atomically.
+ V8_INLINE void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by);
+
+ V8_INLINE intptr_t live_bytes(const MemoryChunk* chunk) const;
+
+ V8_INLINE void SetLiveBytes(MemoryChunk* chunk, intptr_t value);
+};
+
+class NonAtomicMarkingState final
+ : public MarkingStateBase<NonAtomicMarkingState, AccessMode::NON_ATOMIC> {
+ public:
+ explicit NonAtomicMarkingState(PtrComprCageBase cage_base)
+ : MarkingStateBase(cage_base) {}
+
+ V8_INLINE ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
+ const BasicMemoryChunk* chunk) const;
+
+ V8_INLINE void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by);
+
+ V8_INLINE intptr_t live_bytes(const MemoryChunk* chunk) const;
+
+ V8_INLINE void SetLiveBytes(MemoryChunk* chunk, intptr_t value);
+};
+
+// This is used by Scavenger and Evacuator in TransferColor.
+// Live byte increments have to be atomic.
+class AtomicMarkingState final
+ : public MarkingStateBase<AtomicMarkingState, AccessMode::ATOMIC> {
+ public:
+ explicit AtomicMarkingState(PtrComprCageBase cage_base)
+ : MarkingStateBase(cage_base) {}
+
+ V8_INLINE ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
+ const BasicMemoryChunk* chunk) const;
+
+ V8_INLINE void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_MARKING_STATE_H_
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index dfaa739317..64053c6042 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_HEAP_MARKING_VISITOR_INL_H_
#define V8_HEAP_MARKING_VISITOR_INL_H_
+#include "src/heap/marking-state-inl.h"
#include "src/heap/marking-visitor.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/objects-visiting-inl.h"
@@ -44,7 +45,7 @@ template <typename THeapObjectSlot>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessStrongHeapObject(
HeapObject host, THeapObjectSlot slot, HeapObject heap_object) {
SynchronizePageAccess(heap_object);
- if (!is_shared_heap_ && heap_object.InSharedHeap()) return;
+ if (!ShouldMarkObject(heap_object)) return;
MarkObject(host, heap_object);
concrete_visitor()->RecordSlot(host, slot, heap_object);
}
@@ -56,7 +57,7 @@ template <typename THeapObjectSlot>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessWeakHeapObject(
HeapObject host, THeapObjectSlot slot, HeapObject heap_object) {
SynchronizePageAccess(heap_object);
- if (!is_shared_heap_ && heap_object.InSharedHeap()) return;
+ if (!ShouldMarkObject(heap_object)) return;
if (concrete_visitor()->marking_state()->IsBlackOrGrey(heap_object)) {
// Weak references with live values are directly processed here to
// reduce the processing time of weak cells during the main GC
@@ -116,7 +117,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEmbeddedPointer(
DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject object =
rinfo->target_object(ObjectVisitorWithCageBases::cage_base());
- if (!is_shared_heap_ && object.InSharedHeap()) return;
+ if (!ShouldMarkObject(object)) return;
if (!concrete_visitor()->marking_state()->IsBlackOrGrey(object)) {
if (host.IsWeakObject(object)) {
@@ -136,7 +137,7 @@ void MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitCodeTarget(
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (!is_shared_heap_ && target.InSharedHeap()) return;
+ if (!ShouldMarkObject(target)) return;
MarkObject(host, target);
concrete_visitor()->RecordRelocSlot(host, rinfo, target);
}
@@ -371,7 +372,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEphemeronHashTable(
ObjectSlot value_slot =
table.RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
- if ((!is_shared_heap_ && key.InSharedHeap()) ||
+ if (!ShouldMarkObject(key) ||
concrete_visitor()->marking_state()->IsBlackOrGrey(key)) {
VisitPointer(table, value_slot);
} else {
@@ -383,7 +384,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitEphemeronHashTable(
concrete_visitor()->RecordSlot(table, value_slot, value);
AddWeakReferenceForReferenceSummarizer(table, value);
- if (!is_shared_heap_ && value.InSharedHeap()) continue;
+ if (!ShouldMarkObject(value)) continue;
// Revisit ephemerons with both key and value unreachable at end
// of concurrent marking cycle.
diff --git a/deps/v8/src/heap/marking-visitor.h b/deps/v8/src/heap/marking-visitor.h
index 81c81d24b8..3e6937bb41 100644
--- a/deps/v8/src/heap/marking-visitor.h
+++ b/deps/v8/src/heap/marking-visitor.h
@@ -6,9 +6,8 @@
#define V8_HEAP_MARKING_VISITOR_H_
#include "src/common/globals.h"
+#include "src/heap/marking-state.h"
#include "src/heap/marking-worklist.h"
-#include "src/heap/marking.h"
-#include "src/heap/memory-chunk.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/spaces.h"
#include "src/heap/weak-object-worklists.h"
@@ -22,106 +21,6 @@ struct EphemeronMarking {
size_t newly_discovered_limit;
};
-template <typename ConcreteState, AccessMode access_mode>
-class MarkingStateBase {
- public:
- // Declares that this marking state is not collecting retainers, so the
- // marking visitor may update the heap state to store information about
- // progress, and may avoid fully visiting an object if it is safe to do so.
- static constexpr bool kCollectRetainers = false;
-
- explicit MarkingStateBase(PtrComprCageBase cage_base)
-#if V8_COMPRESS_POINTERS
- : cage_base_(cage_base)
-#endif
- {
- }
-
- // The pointer compression cage base value used for decompression of all
- // tagged values except references to Code objects.
- V8_INLINE PtrComprCageBase cage_base() const {
-#if V8_COMPRESS_POINTERS
- return cage_base_;
-#else
- return PtrComprCageBase{};
-#endif // V8_COMPRESS_POINTERS
- }
-
- V8_INLINE MarkBit MarkBitFrom(const HeapObject obj) const {
- return MarkBitFrom(BasicMemoryChunk::FromHeapObject(obj), obj.ptr());
- }
-
- // {addr} may be tagged or aligned.
- V8_INLINE MarkBit MarkBitFrom(const BasicMemoryChunk* p, Address addr) const {
- return static_cast<const ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
- p->AddressToMarkbitIndex(addr));
- }
-
- Marking::ObjectColor Color(const HeapObject obj) const {
- return Marking::Color(MarkBitFrom(obj));
- }
-
- V8_INLINE bool IsImpossible(const HeapObject obj) const {
- return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
- }
-
- V8_INLINE bool IsBlack(const HeapObject obj) const {
- return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
- }
-
- V8_INLINE bool IsWhite(const HeapObject obj) const {
- return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
- }
-
- V8_INLINE bool IsGrey(const HeapObject obj) const {
- return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
- }
-
- V8_INLINE bool IsBlackOrGrey(const HeapObject obj) const {
- return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
- }
-
- V8_INLINE bool WhiteToGrey(HeapObject obj) {
- return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj));
- }
-
- V8_INLINE bool WhiteToBlack(HeapObject obj) {
- return WhiteToGrey(obj) && GreyToBlack(obj);
- }
-
- V8_INLINE bool GreyToBlack(HeapObject obj) {
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(obj);
- MarkBit markbit = MarkBitFrom(chunk, obj.address());
- if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
- static_cast<ConcreteState*>(this)->IncrementLiveBytes(
- MemoryChunk::cast(chunk), obj.Size(cage_base()));
- return true;
- }
-
- V8_INLINE bool GreyToBlackUnaccounted(HeapObject obj) {
- return Marking::GreyToBlack<access_mode>(MarkBitFrom(obj));
- }
-
- void ClearLiveness(MemoryChunk* chunk) {
- static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
- static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
- }
-
- void AddStrongReferenceForReferenceSummarizer(HeapObject host,
- HeapObject obj) {
- // This is not a reference summarizer, so there is nothing to do here.
- }
-
- void AddWeakReferenceForReferenceSummarizer(HeapObject host, HeapObject obj) {
- // This is not a reference summarizer, so there is nothing to do here.
- }
-
- private:
-#if V8_COMPRESS_POINTERS
- const PtrComprCageBase cage_base_;
-#endif // V8_COMPRESS_POINTERS
-};
-
// The base class for all marking visitors. It implements marking logic with
// support of bytecode flushing, embedder tracing, weak and references.
//
@@ -153,7 +52,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
code_flush_mode_(code_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
should_keep_ages_unchanged_(should_keep_ages_unchanged),
- is_shared_heap_(heap->IsShared())
+ should_mark_shared_heap_(heap->ShouldMarkSharedHeap())
#ifdef V8_ENABLE_SANDBOX
,
external_pointer_table_(&heap->isolate()->external_pointer_table()),
@@ -220,7 +119,10 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
#endif
}
- bool is_shared_heap() { return is_shared_heap_; }
+ bool ShouldMarkObject(HeapObject object) const {
+ if (should_mark_shared_heap_) return true;
+ return !object.InSharedHeap();
+ }
// Marks the object grey and pushes it on the marking work list.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
@@ -289,7 +191,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
const base::EnumSet<CodeFlushMode> code_flush_mode_;
const bool is_embedder_tracing_enabled_;
const bool should_keep_ages_unchanged_;
- const bool is_shared_heap_;
+ const bool should_mark_shared_heap_;
#ifdef V8_ENABLE_SANDBOX
ExternalPointerTable* const external_pointer_table_;
ExternalPointerTable* const shared_external_pointer_table_;
diff --git a/deps/v8/src/heap/memory-allocator.cc b/deps/v8/src/heap/memory-allocator.cc
index 381eba7bf6..dd9afbdfa6 100644
--- a/deps/v8/src/heap/memory-allocator.cc
+++ b/deps/v8/src/heap/memory-allocator.cc
@@ -777,7 +777,6 @@ const MemoryChunk* MemoryAllocator::LookupChunkContainingAddress(
it != normal_pages_.end()) {
// The chunk is a normal page.
DCHECK_LE(chunk->address(), addr);
- DCHECK_GT(chunk->area_end(), addr);
if (chunk->Contains(addr)) return *it;
} else if (auto it = large_pages_.upper_bound(static_cast<LargePage*>(chunk));
it != large_pages_.begin()) {
diff --git a/deps/v8/src/heap/memory-allocator.h b/deps/v8/src/heap/memory-allocator.h
index 0485a7fe8c..ed6e4c82fa 100644
--- a/deps/v8/src/heap/memory-allocator.h
+++ b/deps/v8/src/heap/memory-allocator.h
@@ -265,7 +265,8 @@ class MemoryAllocator {
// Return the normal or large page that contains this address, if it is owned
// by this heap, otherwise a nullptr.
- const MemoryChunk* LookupChunkContainingAddress(Address addr) const;
+ V8_EXPORT_PRIVATE const MemoryChunk* LookupChunkContainingAddress(
+ Address addr) const;
// Insert and remove normal and large pages that are owned by this heap.
void RecordNormalPageCreated(const Page& page);
diff --git a/deps/v8/src/heap/memory-chunk-layout.cc b/deps/v8/src/heap/memory-chunk-layout.cc
index ff2dbd915f..e81aaec8f3 100644
--- a/deps/v8/src/heap/memory-chunk-layout.cc
+++ b/deps/v8/src/heap/memory-chunk-layout.cc
@@ -42,7 +42,8 @@ size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
}
intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
- return RoundUp(MemoryChunk::kHeaderSize + Bitmap::kSize, kDoubleSize);
+ return RoundUp(MemoryChunk::kHeaderSize + Bitmap::kSize,
+ ALIGN_TO_ALLOCATION_ALIGNMENT(kDoubleSize));
}
size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
diff --git a/deps/v8/src/heap/memory-chunk-layout.h b/deps/v8/src/heap/memory-chunk-layout.h
index 053f35f309..2e1d0e52bb 100644
--- a/deps/v8/src/heap/memory-chunk-layout.h
+++ b/deps/v8/src/heap/memory-chunk-layout.h
@@ -72,6 +72,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
FIELD(ObjectStartBitmap, ObjectStartBitmap),
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
+ FIELD(size_t, WasUsedForAllocation),
kMarkingBitmapOffset,
kMemoryChunkHeaderSize = kMarkingBitmapOffset,
kMemoryChunkHeaderStart = kSlotSetOffset,
@@ -89,6 +90,8 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
static int MaxRegularCodeObjectSize();
+
+ static_assert(kMemoryChunkHeaderSize % alignof(size_t) == 0);
};
} // namespace internal
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
index 0a5ffa17b9..43749fed27 100644
--- a/deps/v8/src/heap/memory-chunk.cc
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -9,6 +9,7 @@
#include "src/common/globals.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/code-object-registry.h"
+#include "src/heap/marking-state-inl.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/memory-chunk-layout.h"
@@ -162,8 +163,7 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
categories_ = nullptr;
- heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(this,
- 0);
+ heap->non_atomic_marking_state()->SetLiveBytes(this, 0);
if (executable == EXECUTABLE) {
SetFlag(IS_EXECUTABLE);
if (heap->write_protect_code_memory()) {
@@ -195,7 +195,10 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
}
// All pages of a shared heap need to be marked with this flag.
- if (heap->IsShared()) SetFlag(MemoryChunk::IN_SHARED_HEAP);
+ if (heap->IsShared() || owner()->identity() == SHARED_SPACE ||
+ owner()->identity() == SHARED_LO_SPACE) {
+ SetFlag(MemoryChunk::IN_SHARED_HEAP);
+ }
#ifdef DEBUG
ValidateOffsets(this);
@@ -233,6 +236,7 @@ void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
// MemoryChunk implementation
void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
+ DCHECK(SweepingDone());
if (mutex_ != nullptr) {
delete mutex_;
mutex_ = nullptr;
diff --git a/deps/v8/src/heap/memory-chunk.h b/deps/v8/src/heap/memory-chunk.h
index 042072450d..906ff6a23e 100644
--- a/deps/v8/src/heap/memory-chunk.h
+++ b/deps/v8/src/heap/memory-chunk.h
@@ -220,6 +220,10 @@ class MemoryChunk : public BasicMemoryChunk {
}
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
+ void MarkWasUsedForAllocation() { was_used_for_allocation_ = true; }
+ void ClearWasUsedForAllocation() { was_used_for_allocation_ = false; }
+ bool WasUsedForAllocation() const { return was_used_for_allocation_; }
+
protected:
// Release all memory allocated by the chunk. Should be called when memory
// chunk is about to be freed.
@@ -287,6 +291,10 @@ class MemoryChunk : public BasicMemoryChunk {
ObjectStartBitmap object_start_bitmap_;
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
+ // Marks a chunk that was used for allocation since it was last swept. Used
+ // only for new space pages.
+ size_t was_used_for_allocation_ = false;
+
private:
friend class ConcurrentMarkingState;
friend class MarkingState;
diff --git a/deps/v8/src/heap/new-spaces-inl.h b/deps/v8/src/heap/new-spaces-inl.h
index e4648502d6..14c6753804 100644
--- a/deps/v8/src/heap/new-spaces-inl.h
+++ b/deps/v8/src/heap/new-spaces-inl.h
@@ -61,6 +61,7 @@ V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
V8_INLINE bool SemiSpaceNewSpace::EnsureAllocation(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin,
int* out_max_aligned_size) {
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
#if DEBUG
VerifyTop();
@@ -116,22 +117,21 @@ V8_INLINE bool PagedSpaceForNewSpace::EnsureAllocation(
// -----------------------------------------------------------------------------
// SemiSpaceObjectIterator
+SemiSpaceObjectIterator::SemiSpaceObjectIterator(const SemiSpaceNewSpace* space)
+ : current_(space->first_allocatable_address()) {}
+
HeapObject SemiSpaceObjectIterator::Next() {
- while (current_ != limit_) {
+ while (true) {
if (Page::IsAlignedToPageSize(current_)) {
Page* page = Page::FromAllocationAreaAddress(current_);
page = page->next_page();
- DCHECK(page);
+ if (page == nullptr) return HeapObject();
current_ = page->area_start();
- if (current_ == limit_) return HeapObject();
}
HeapObject object = HeapObject::FromAddress(current_);
- current_ += object.Size();
- if (!object.IsFreeSpaceOrFiller()) {
- return object;
- }
+ current_ += ALIGN_TO_ALLOCATION_ALIGNMENT(object.Size());
+ if (!object.IsFreeSpaceOrFiller()) return object;
}
- return HeapObject();
}
} // namespace internal
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index 4ba1f78891..ad69308c73 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -7,9 +7,12 @@
#include "src/common/globals.h"
#include "src/heap/allocation-observer.h"
#include "src/heap/array-buffer-sweeper.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/marking-state-inl.h"
+#include "src/heap/marking-state.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/paged-spaces.h"
#include "src/heap/safepoint.h"
@@ -26,10 +29,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->list_node().Initialize();
if (v8_flags.minor_mc) {
- heap()
- ->minor_mark_compact_collector()
- ->non_atomic_marking_state()
- ->ClearLiveness(page);
+ heap()->non_atomic_marking_state()->ClearLiveness(page);
}
page->InitializationMemoryFence();
return page;
@@ -76,8 +76,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
}
// Add more pages if we have less than expected_pages.
- NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
+ NonAtomicMarkingState* marking_state = heap()->non_atomic_marking_state();
while (actual_pages < expected_pages) {
actual_pages++;
current_page = heap()->memory_allocator()->AllocatePage(
@@ -133,6 +132,8 @@ bool SemiSpace::Commit() {
}
memory_chunk_list_.PushBack(new_page);
IncrementCommittedPhysicalMemory(new_page->CommittedPhysicalMemory());
+ heap()->CreateFillerObjectAt(new_page->area_start(),
+ static_cast<int>(new_page->area_size()));
}
Reset();
AccountCommitted(target_capacity_);
@@ -181,8 +182,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
DCHECK(IsAligned(delta, AllocatePageSize()));
const int delta_pages = static_cast<int>(delta / Page::kPageSize);
DCHECK(last_page());
- NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
+ NonAtomicMarkingState* marking_state = heap()->non_atomic_marking_state();
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Page* new_page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
@@ -195,6 +195,8 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
IncrementCommittedPhysicalMemory(new_page->CommittedPhysicalMemory());
// Duplicate the flags that was set on the old page.
new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
+ heap()->CreateFillerObjectAt(new_page->area_start(),
+ static_cast<int>(new_page->area_size()));
}
AccountCommitted(delta);
target_capacity_ = new_capacity;
@@ -427,20 +429,6 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
#endif
// -----------------------------------------------------------------------------
-// SemiSpaceObjectIterator implementation.
-
-SemiSpaceObjectIterator::SemiSpaceObjectIterator(
- const SemiSpaceNewSpace* space) {
- Initialize(space->first_allocatable_address(), space->top());
-}
-
-void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
- SemiSpace::AssertValidRange(start, end);
- current_ = start;
- limit_ = end;
-}
-
-// -----------------------------------------------------------------------------
// NewSpace implementation
NewSpace::NewSpace(Heap* heap, LinearAllocationArea& allocation_info)
@@ -481,8 +469,7 @@ void NewSpace::VerifyTop() const {
// We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
void NewSpace::VerifyImpl(Isolate* isolate, const Page* current_page,
- Address current_address,
- Address stop_iteration_at_address) const {
+ Address current_address) const {
DCHECK(current_page->ContainsLimit(current_address));
size_t external_space_bytes[kNumTypes];
@@ -496,13 +483,8 @@ void NewSpace::VerifyImpl(Isolate* isolate, const Page* current_page,
PtrComprCageBase cage_base(isolate);
VerifyPointersVisitor visitor(heap());
const Page* page = current_page;
- while (current_address != stop_iteration_at_address) {
+ while (true) {
if (!Page::IsAlignedToPageSize(current_address)) {
- // The allocation pointer should not be in the middle of an object.
- CHECK_IMPLIES(!v8_flags.minor_mc,
- !Page::FromAddress(current_address)->ContainsLimit(top()) ||
- current_address < top());
-
HeapObject object = HeapObject::FromAddress(current_address);
// The first word should be a map, and we expect all map pointers to
@@ -530,7 +512,7 @@ void NewSpace::VerifyImpl(Isolate* isolate, const Page* current_page,
string_size;
}
- current_address += size;
+ current_address += ALIGN_TO_ALLOCATION_ALIGNMENT(size);
} else {
// At end of page, switch to next page.
page = page->next_page();
@@ -554,7 +536,7 @@ void NewSpace::VerifyImpl(Isolate* isolate, const Page* current_page,
}
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
- page->object_start_bitmap()->Verify();
+ current_page->object_start_bitmap()->Verify();
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
}
#endif // VERIFY_HEAP
@@ -562,6 +544,7 @@ void NewSpace::VerifyImpl(Isolate* isolate, const Page* current_page,
void NewSpace::PromotePageToOldSpace(Page* page) {
DCHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
DCHECK(page->InYoungGeneration());
+ page->ClearWasUsedForAllocation();
RemovePage(page);
Page* new_page = Page::ConvertNewToOld(page);
DCHECK(!new_page->InYoungGeneration());
@@ -660,6 +643,10 @@ void SemiSpaceNewSpace::UpdateLinearAllocationArea(Address known_top) {
linear_area_original_data_.set_original_top_release(top());
}
+ // The linear allocation area should reach the end of the page, so no filler
+ // object is needed there to make the page iterable.
+ DCHECK_EQ(limit(), to_space_.page_high());
+
to_space_.AddRangeToActiveSystemPages(top(), limit());
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
@@ -670,8 +657,7 @@ void SemiSpaceNewSpace::ResetLinearAllocationArea() {
to_space_.Reset();
UpdateLinearAllocationArea();
// Clear all mark-bits in the to-space.
- NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
+ NonAtomicMarkingState* marking_state = heap()->non_atomic_marking_state();
for (Page* p : to_space_) {
marking_state->ClearLiveness(p);
// Concurrent marking may have local live bytes for this page.
@@ -680,12 +666,18 @@ void SemiSpaceNewSpace::ResetLinearAllocationArea() {
}
void SemiSpaceNewSpace::UpdateInlineAllocationLimit(size_t min_size) {
- Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
+ Address new_limit = ComputeLimit(top(), to_space_.page_high(),
+ ALIGN_TO_ALLOCATION_ALIGNMENT(min_size));
DCHECK_LE(top(), new_limit);
DCHECK_LE(new_limit, to_space_.page_high());
allocation_info_.SetLimit(new_limit);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+ // Add a filler object after the linear allocation area (if there is space
+ // left), to ensure that the page will be iterable.
+ heap()->CreateFillerObjectAt(
+ limit(), static_cast<int>(to_space_.page_high() - limit()));
+
#if DEBUG
VerifyTop();
#endif
@@ -770,7 +762,7 @@ void SemiSpaceNewSpace::Verify(Isolate* isolate) const {
Address current = to_space_.first_page()->area_start();
CHECK_EQ(current, to_space_.space_start());
- VerifyImpl(isolate, Page::FromAllocationAreaAddress(current), current, top());
+ VerifyImpl(isolate, Page::FromAllocationAreaAddress(current), current);
// Check semi-spaces.
CHECK_EQ(from_space_.id(), kFromSpace);
@@ -780,6 +772,37 @@ void SemiSpaceNewSpace::Verify(Isolate* isolate) const {
}
#endif // VERIFY_HEAP
+void SemiSpaceNewSpace::MakeIterable() {
+ MakeAllPagesInFromSpaceIterable();
+ MakeUnusedPagesInToSpaceIterable();
+}
+
+void SemiSpaceNewSpace::MakeAllPagesInFromSpaceIterable() {
+ if (!IsFromSpaceCommitted()) return;
+
+ // Fix all pages in the "from" semispace.
+ for (Page* page : from_space()) {
+ heap()->CreateFillerObjectAt(page->area_start(),
+ static_cast<int>(page->area_size()));
+ }
+}
+
+void SemiSpaceNewSpace::MakeUnusedPagesInToSpaceIterable() {
+ PageIterator it(to_space().current_page());
+
+ // Fix the current page, above the LAB.
+ DCHECK_NOT_NULL(*it);
+ DCHECK((*it)->Contains(limit()));
+ heap()->CreateFillerObjectAt(limit(),
+ static_cast<int>((*it)->area_end() - limit()));
+
+ // Fix the remaining unused pages in the "to" semispace.
+ for (Page* page = *(++it); page != nullptr; page = *(++it)) {
+ heap()->CreateFillerObjectAt(page->area_start(),
+ static_cast<int>(page->area_size()));
+ }
+}
+
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
void SemiSpaceNewSpace::ClearUnusedObjectStartBitmaps() {
if (!IsFromSpaceCommitted()) return;
@@ -918,10 +941,7 @@ Page* PagedSpaceForNewSpace::InitializePage(MemoryChunk* chunk) {
page->ResetAllocationStatistics();
page->SetFlags(Page::TO_PAGE);
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
- heap()
- ->minor_mark_compact_collector()
- ->non_atomic_marking_state()
- ->ClearLiveness(page);
+ heap()->non_atomic_marking_state()->ClearLiveness(page);
page->AllocateFreeListCategories();
page->InitializeFreeListCategories();
page->list_node().Initialize();
@@ -941,23 +961,29 @@ void PagedSpaceForNewSpace::Grow() {
CHECK(EnsureCurrentCapacity());
}
-void PagedSpaceForNewSpace::Shrink() {
- target_capacity_ =
+bool PagedSpaceForNewSpace::StartShrinking() {
+ DCHECK_EQ(current_capacity_, target_capacity_);
+ DCHECK(heap()->tracer()->IsInAtomicPause());
+ size_t new_target_capacity =
RoundUp(std::max(initial_capacity_, 2 * Size()), Page::kPageSize);
- if (target_capacity_ < current_capacity_) {
- // Try to shrink by freeing empty pages.
- for (Page* page = first_page();
- page != last_page() && (current_capacity_ > target_capacity_);) {
- Page* current_page = page;
- page = page->next_page();
- if (current_page->allocated_bytes() == 0) {
- memory_chunk_list().Remove(current_page);
- ReleasePage(current_page);
- }
+ if (new_target_capacity > target_capacity_) return false;
+ target_capacity_ = new_target_capacity;
+ return true;
+}
+
+void PagedSpaceForNewSpace::FinishShrinking() {
+ DCHECK(heap()->tracer()->IsInAtomicPause());
+ if (current_capacity_ > target_capacity_) {
+#if DEBUG
+ // If `current_capacity_` is higher than `target_capacity_`, i.e. the
+ // space could not be shrunk all the way down to `target_capacity_`, it
+ // must mean that all pages contain live objects.
+ for (Page* page : *this) {
+ DCHECK_NE(0, heap()->non_atomic_marking_state()->live_bytes(page));
}
+#endif // DEBUG
+ target_capacity_ = current_capacity_;
}
- // Shrinking to target capacity may not have been possible.
- target_capacity_ = current_capacity_;
}
void PagedSpaceForNewSpace::UpdateInlineAllocationLimit(size_t size_in_bytes) {
@@ -982,15 +1008,7 @@ void PagedSpaceForNewSpace::ReleasePage(Page* page) {
PagedSpaceBase::ReleasePage(page);
}
-bool PagedSpaceForNewSpace::AddFreshPage() {
- DCHECK_LE(TotalCapacity(), MaximumCapacity());
- if (current_capacity_ >= target_capacity_) return false;
- return EnsureCurrentCapacity();
-}
-
bool PagedSpaceForNewSpace::PreallocatePages() {
- // Verify that the free space map is already initialized. Otherwise, new free
- // list entries will be invalid.
while (current_capacity_ < target_capacity_) {
if (!TryExpandImpl()) return false;
}
@@ -1001,7 +1019,8 @@ bool PagedSpaceForNewSpace::PreallocatePages() {
bool PagedSpaceForNewSpace::EnsureCurrentCapacity() {
// Verify that the free space map is already initialized. Otherwise, new free
// list entries will be invalid.
- DCHECK_NE(0, heap()->isolate()->root(RootIndex::kFreeSpaceMap).ptr());
+ DCHECK_NE(kNullAddress,
+ heap()->isolate()->root(RootIndex::kFreeSpaceMap).ptr());
return PreallocatePages();
}
@@ -1022,6 +1041,10 @@ void PagedSpaceForNewSpace::Verify(Isolate* isolate,
}
#endif // VERIFY_HEAP
+bool PagedSpaceForNewSpace::ShouldReleasePage() const {
+ return current_capacity_ > target_capacity_;
+}
+
// -----------------------------------------------------------------------------
// PagedNewSpace implementation
@@ -1044,10 +1067,7 @@ PagedNewSpace::~PagedNewSpace() {
void PagedNewSpace::Verify(Isolate* isolate) const {
const Page* first_page = paged_space_.first_page();
- if (first_page) {
- // No bailout needed since all pages are iterable.
- VerifyImpl(isolate, first_page, first_page->area_start(), kNullAddress);
- }
+ if (first_page) VerifyImpl(isolate, first_page, first_page->area_start());
// Check paged-spaces.
VerifyPointersVisitor visitor(heap());
diff --git a/deps/v8/src/heap/new-spaces.h b/deps/v8/src/heap/new-spaces.h
index ebfca7edc0..b0e61af300 100644
--- a/deps/v8/src/heap/new-spaces.h
+++ b/deps/v8/src/heap/new-spaces.h
@@ -8,6 +8,7 @@
#include <atomic>
#include <memory>
+#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
@@ -217,24 +218,17 @@ class SemiSpace final : public Space {
};
// A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
-// semispace of the heap's new space. It iterates over the objects in the
-// semispace from a given start address (defaulting to the bottom of the
-// semispace) to the top of the semispace. New objects allocated after the
-// iterator is created are not iterated.
+// semispace of the heap's new space.
class SemiSpaceObjectIterator : public ObjectIterator {
public:
- // Create an iterator over the allocated objects in the given to-space.
- explicit SemiSpaceObjectIterator(const SemiSpaceNewSpace* space);
+ // Create an iterator over the objects in the given to-space.
+ inline explicit SemiSpaceObjectIterator(const SemiSpaceNewSpace* space);
inline HeapObject Next() final;
private:
- void Initialize(Address start, Address end);
-
// The current iteration point.
Address current_;
- // The end of iteration.
- Address limit_;
};
class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
@@ -294,14 +288,15 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
#ifdef VERIFY_HEAP
virtual void Verify(Isolate* isolate) const = 0;
- // VerifyImpl verifies objects on the space starting from |page| and
- // |address|. |address| should be a valid limit on |page| (see
- // BasicMemoryChunk::ContainsLimit).
+ // VerifyImpl verifies objects on the space starting from |current_page| and
+ // |current_address|. |current_address| should be a valid limit on
+ // |current_page| (see BasicMemoryChunk::ContainsLimit).
void VerifyImpl(Isolate* isolate, const Page* current_page,
- Address current_address,
- Address stop_iteration_at_address) const;
+ Address current_address) const;
#endif
+ virtual void MakeIterable() = 0;
+
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
virtual void ClearUnusedObjectStartBitmaps() = 0;
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
@@ -483,6 +478,11 @@ class V8_EXPORT_PRIVATE SemiSpaceNewSpace final : public NewSpace {
void Print() override { to_space_.Print(); }
#endif
+ void MakeIterable() override;
+
+ void MakeAllPagesInFromSpaceIterable();
+ void MakeUnusedPagesInToSpaceIterable();
+
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
void ClearUnusedObjectStartBitmaps() override;
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
@@ -565,7 +565,9 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
void Grow();
// Shrink the capacity of the space.
- void Shrink();
+ void Shrink() { UNREACHABLE(); }
+ bool StartShrinking();
+ void FinishShrinking();
size_t AllocatedSinceLastGC() const {
// allocated since last gc is compiuted as allocated linear areas minus
@@ -599,7 +601,7 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
// Returns false if this isn't possible or reasonable (i.e., there
// are no pages, or the current page is already empty), or true
// if successful.
- bool AddFreshPage();
+ bool AddFreshPage() { return false; }
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin,
@@ -625,10 +627,14 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
void Verify(Isolate* isolate, ObjectVisitor* visitor) const final;
#endif
+ void MakeIterable() { free_list()->RepairLists(heap()); }
+
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
void ClearUnusedObjectStartBitmaps() {}
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
+ bool ShouldReleasePage() const;
+
private:
bool PreallocatePages();
@@ -664,11 +670,13 @@ class V8_EXPORT_PRIVATE PagedNewSpace final : public NewSpace {
// Shrink the capacity of the space.
void Shrink() final { paged_space_.Shrink(); }
+ bool StartShrinking() { return paged_space_.StartShrinking(); }
+ void FinishShrinking() { paged_space_.FinishShrinking(); }
// Return the allocated bytes in the active space.
size_t Size() const final { return paged_space_.Size(); }
- size_t SizeOfObjects() const final { return Size(); }
+ size_t SizeOfObjects() const final { return paged_space_.SizeOfObjects(); }
// Return the allocatable capacity of the space.
size_t Capacity() const final { return paged_space_.Capacity(); }
@@ -781,7 +789,9 @@ class V8_EXPORT_PRIVATE PagedNewSpace final : public NewSpace {
paged_space_.MakeLinearAllocationAreaIterable();
}
- PagedSpaceBase* paged_space() { return &paged_space_; }
+ PagedSpaceForNewSpace* paged_space() { return &paged_space_; }
+
+ void MakeIterable() override { paged_space_.MakeIterable(); }
#ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
void ClearUnusedObjectStartBitmaps() override {
@@ -789,6 +799,12 @@ class V8_EXPORT_PRIVATE PagedNewSpace final : public NewSpace {
}
#endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB
+ // All operations on `memory_chunk_list_` should go through `paged_space_`.
+ heap::List<MemoryChunk>& memory_chunk_list() final { UNREACHABLE(); }
+
+ bool ShouldReleasePage() const { return paged_space_.ShouldReleasePage(); }
+ void ReleasePage(Page* page) { paged_space_.ReleasePage(page); }
+
private:
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin,
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index d28d03dc49..e15b5f332e 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -15,6 +15,7 @@
#include "src/heap/combined-heap.h"
#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/marking-state-inl.h"
#include "src/logging/counters.h"
#include "src/objects/compilation-cache-table-inl.h"
#include "src/objects/heap-object.h"
@@ -448,9 +449,9 @@ class ObjectStatsCollectorImpl {
return field_stats_collector_.cage_base();
}
- Heap* heap_;
- ObjectStats* stats_;
- NonAtomicMarkingState* marking_state_;
+ Heap* const heap_;
+ ObjectStats* const stats_;
+ NonAtomicMarkingState* const marking_state_;
std::unordered_set<HeapObject, Object::Hasher, Object::KeyEqualSafe>
virtual_objects_;
std::unordered_set<Address> external_resources_;
@@ -461,8 +462,7 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
ObjectStats* stats)
: heap_(heap),
stats_(stats),
- marking_state_(
- heap->mark_compact_collector()->non_atomic_marking_state()),
+ marking_state_(heap->non_atomic_marking_state()),
field_stats_collector_(
heap_, &stats->tagged_fields_count_, &stats->embedder_fields_count_,
&stats->inobject_smi_fields_count_,
@@ -1089,8 +1089,7 @@ class ObjectStatsVisitor {
ObjectStatsCollectorImpl::Phase phase)
: live_collector_(live_collector),
dead_collector_(dead_collector),
- marking_state_(
- heap->mark_compact_collector()->non_atomic_marking_state()),
+ marking_state_(heap->non_atomic_marking_state()),
phase_(phase) {}
void Visit(HeapObject obj) {
@@ -1105,9 +1104,9 @@ class ObjectStatsVisitor {
}
private:
- ObjectStatsCollectorImpl* live_collector_;
- ObjectStatsCollectorImpl* dead_collector_;
- NonAtomicMarkingState* marking_state_;
+ ObjectStatsCollectorImpl* const live_collector_;
+ ObjectStatsCollectorImpl* const dead_collector_;
+ NonAtomicMarkingState* const marking_state_;
ObjectStatsCollectorImpl::Phase phase_;
};
diff --git a/deps/v8/src/heap/paged-spaces-inl.h b/deps/v8/src/heap/paged-spaces-inl.h
index 341cc40569..6283e00540 100644
--- a/deps/v8/src/heap/paged-spaces-inl.h
+++ b/deps/v8/src/heap/paged-spaces-inl.h
@@ -29,7 +29,7 @@ HeapObject PagedSpaceObjectIterator::Next() {
HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
HeapObject obj = HeapObject::FromAddress(cur_addr_);
- const int obj_size = obj.Size(cage_base());
+ const int obj_size = ALIGN_TO_ALLOCATION_ALIGNMENT(obj.Size(cage_base()));
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
if (!obj.IsFreeSpaceOrFiller(cage_base())) {
diff --git a/deps/v8/src/heap/paged-spaces.cc b/deps/v8/src/heap/paged-spaces.cc
index c2c6265824..7159a1e877 100644
--- a/deps/v8/src/heap/paged-spaces.cc
+++ b/deps/v8/src/heap/paged-spaces.cc
@@ -14,8 +14,11 @@
#include "src/execution/vm-state-inl.h"
#include "src/heap/allocation-observer.h"
#include "src/heap/array-buffer-sweeper.h"
+#include "src/heap/gc-tracer-inl.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/marking-state-inl.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/memory-chunk-layout.h"
@@ -139,12 +142,14 @@ void PagedSpaceBase::TearDown() {
accounting_stats_.Clear();
}
-void PagedSpaceBase::RefillFreeList(Sweeper* sweeper) {
+void PagedSpaceBase::RefillFreeList() {
// Any PagedSpace might invoke RefillFreeList. We filter all but our old
// generation spaces out.
DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
- identity() == MAP_SPACE || identity() == NEW_SPACE);
+ identity() == MAP_SPACE || identity() == NEW_SPACE ||
+ identity() == SHARED_SPACE);
+ Sweeper* sweeper = heap()->sweeper();
size_t added = 0;
{
@@ -283,8 +288,7 @@ bool PagedSpaceBase::ContainsSlow(Address addr) const {
void PagedSpaceBase::RefineAllocatedBytesAfterSweeping(Page* page) {
CHECK(page->SweepingDone());
- auto marking_state =
- heap()->mark_compact_collector()->non_atomic_marking_state();
+ auto marking_state = heap()->non_atomic_marking_state();
// The live_byte on the page was accounted in the space allocated
// bytes counter. After sweeping allocated_bytes() contains the
// accurate live byte count on the page.
@@ -426,9 +430,14 @@ int PagedSpaceBase::CountTotalPages() const {
void PagedSpaceBase::SetLinearAllocationArea(Address top, Address limit) {
SetTopAndLimit(top, limit);
- if (top != kNullAddress && top != limit && identity() != NEW_SPACE &&
- heap()->incremental_marking()->black_allocation()) {
- Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
+ if (top != kNullAddress && top != limit) {
+ Page* page = Page::FromAllocationAreaAddress(top);
+ if (identity() == NEW_SPACE) {
+ page->MarkWasUsedForAllocation();
+ } else if (heap()->incremental_marking()->black_allocation()) {
+ DCHECK_NE(NEW_SPACE, identity());
+ page->CreateBlackArea(top, limit);
+ }
}
}
@@ -534,21 +543,22 @@ void PagedSpaceBase::FreeLinearAllocationArea() {
GetUnprotectMemoryOrigin(is_compaction_space()));
}
- DCHECK_IMPLIES(current_limit - current_top >= 2 * kTaggedSize,
- heap()->incremental_marking()->marking_state()->IsWhite(
- HeapObject::FromAddress(current_top)));
+ DCHECK_IMPLIES(
+ current_limit - current_top >= 2 * kTaggedSize,
+ heap()->marking_state()->IsWhite(HeapObject::FromAddress(current_top)));
Free(current_top, current_limit - current_top,
SpaceAccountingMode::kSpaceAccounted);
}
void PagedSpaceBase::ReleasePage(Page* page) {
- DCHECK_EQ(
- 0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
- page));
+ DCHECK(page->SweepingDone());
+ DCHECK_EQ(0, heap()->non_atomic_marking_state()->live_bytes(page));
DCHECK_EQ(page->owner(), this);
DCHECK_IMPLIES(identity() == NEW_SPACE, page->IsFlagSet(Page::TO_PAGE));
+ memory_chunk_list().Remove(page);
+
free_list_->EvictFreeListItems(page);
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
@@ -649,83 +659,13 @@ bool PagedSpaceBase::TryAllocationFromFreeListMain(size_t size_in_bytes,
}
base::Optional<std::pair<Address, size_t>>
-PagedSpaceBase::RawAllocateBackground(LocalHeap* local_heap,
- size_t min_size_in_bytes,
- size_t max_size_in_bytes,
- AllocationOrigin origin) {
- DCHECK(!is_compaction_space());
- DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
- identity() == MAP_SPACE);
- DCHECK(origin == AllocationOrigin::kRuntime ||
- origin == AllocationOrigin::kGC);
- DCHECK_IMPLIES(!local_heap, origin == AllocationOrigin::kGC);
-
- base::Optional<std::pair<Address, size_t>> result =
- TryAllocationFromFreeListBackground(min_size_in_bytes, max_size_in_bytes,
- origin);
- if (result) return result;
-
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- // Sweeping is still in progress.
- if (collector->sweeping_in_progress()) {
- // First try to refill the free-list, concurrent sweeper threads
- // may have freed some objects in the meantime.
- RefillFreeList(collector->sweeper());
-
- // Retry the free list allocation.
- result = TryAllocationFromFreeListBackground(min_size_in_bytes,
- max_size_in_bytes, origin);
- if (result) return result;
-
- if (IsSweepingAllowedOnThread(local_heap)) {
- // Now contribute to sweeping from background thread and then try to
- // reallocate.
- const int kMaxPagesToSweep = 1;
- int max_freed = collector->sweeper()->ParallelSweepSpace(
- identity(), Sweeper::SweepingMode::kLazyOrConcurrent,
- static_cast<int>(min_size_in_bytes), kMaxPagesToSweep);
-
- // Keep new space sweeping atomic.
- RefillFreeList(collector->sweeper());
-
- if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
- result = TryAllocationFromFreeListBackground(min_size_in_bytes,
- max_size_in_bytes, origin);
- if (result) return result;
- }
- }
- }
-
- if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
- heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
- result = TryExpandBackground(max_size_in_bytes);
- if (result) return result;
- }
-
- if (collector->sweeping_in_progress()) {
- // Complete sweeping for this space.
- if (IsSweepingAllowedOnThread(local_heap)) {
- collector->DrainSweepingWorklistForSpace(identity());
- }
-
- RefillFreeList(collector->sweeper());
-
- // Last try to acquire memory from free list.
- return TryAllocationFromFreeListBackground(min_size_in_bytes,
- max_size_in_bytes, origin);
- }
-
- return {};
-}
-
-base::Optional<std::pair<Address, size_t>>
PagedSpaceBase::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationOrigin origin) {
base::MutexGuard lock(&space_mutex_);
DCHECK_LE(min_size_in_bytes, max_size_in_bytes);
DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
- identity() == MAP_SPACE);
+ identity() == MAP_SPACE || identity() == SHARED_SPACE);
size_t new_node_size = 0;
FreeSpace new_node =
@@ -743,8 +683,6 @@ PagedSpaceBase::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page);
- heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
-
size_t used_size_in_bytes = std::min(new_node_size, max_size_in_bytes);
Address start = new_node.address();
@@ -764,12 +702,6 @@ PagedSpaceBase::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
return std::make_pair(start, used_size_in_bytes);
}
-bool PagedSpaceBase::IsSweepingAllowedOnThread(LocalHeap* local_heap) const {
- // Code space sweeping is only allowed on main thread.
- return (local_heap && local_heap->is_main_thread()) ||
- identity() != CODE_SPACE;
-}
-
#ifdef DEBUG
void PagedSpaceBase::Print() {}
#endif
@@ -788,6 +720,7 @@ void PagedSpaceBase::Verify(Isolate* isolate, ObjectVisitor* visitor) const {
PtrComprCageBase cage_base(isolate);
for (const Page* page : *this) {
CHECK_EQ(page->owner(), this);
+ CHECK_IMPLIES(identity() != NEW_SPACE, !page->WasUsedForAllocation());
for (int i = 0; i < kNumTypes; i++) {
external_page_bytes[static_cast<ExternalBackingStoreType>(i)] = 0;
@@ -866,7 +799,7 @@ void PagedSpaceBase::Verify(Isolate* isolate, ObjectVisitor* visitor) const {
}
void PagedSpaceBase::VerifyLiveBytes() const {
- MarkingState* marking_state = heap()->incremental_marking()->marking_state();
+ MarkingState* marking_state = heap()->marking_state();
PtrComprCageBase cage_base(heap()->isolate());
for (const Page* page : *this) {
CHECK(page->SweepingDone());
@@ -895,7 +828,7 @@ void PagedSpaceBase::VerifyCountersAfterSweeping(Heap* heap) const {
size_t real_allocated = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
if (!object.IsFreeSpaceOrFiller()) {
- real_allocated += object.Size(cage_base);
+ real_allocated += ALIGN_TO_ALLOCATION_ALIGNMENT(object.Size(cage_base));
}
}
total_allocated += page->allocated_bytes();
@@ -911,8 +844,7 @@ void PagedSpaceBase::VerifyCountersAfterSweeping(Heap* heap) const {
void PagedSpaceBase::VerifyCountersBeforeConcurrentSweeping() const {
size_t total_capacity = 0;
size_t total_allocated = 0;
- auto marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
+ auto marking_state = heap()->non_atomic_marking_state();
for (const Page* page : *this) {
size_t page_allocated =
page->SweepingDone()
@@ -940,11 +872,6 @@ void PagedSpaceBase::UpdateInlineAllocationLimit(size_t min_size) {
// -----------------------------------------------------------------------------
// OldSpace implementation
-void PagedSpaceBase::PrepareForMarkCompact() {
- // Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_->Reset();
-}
-
bool PagedSpaceBase::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
VMState<GC> state(heap()->isolate());
RCS_SCOPE(heap()->isolate(),
@@ -989,21 +916,33 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
return false;
}
- MarkCompactCollector* collector = heap()->mark_compact_collector();
+ const bool is_main_thread =
+ heap()->IsMainThread() || heap()->IsSharedMainThread();
+ const auto sweeping_scope_id = is_main_thread
+ ? GCTracer::Scope::MC_SWEEP
+ : GCTracer::Scope::MC_BACKGROUND_SWEEPING;
+ const auto sweeping_scope_kind =
+ is_main_thread ? ThreadKind::kMain : ThreadKind::kBackground;
// Sweeping is still in progress.
- if (collector->sweeping_in_progress()) {
+ if (heap()->sweeping_in_progress()) {
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
- RefillFreeList(collector->sweeper());
+ {
+ TRACE_GC_EPOCH(heap()->tracer(), sweeping_scope_id, sweeping_scope_kind);
+ RefillFreeList();
+ }
// Retry the free list allocation.
if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
origin))
return true;
- if (ContributeToSweepingMain(size_in_bytes, kMaxPagesToSweep, size_in_bytes,
- origin))
- return true;
+ {
+ TRACE_GC_EPOCH(heap()->tracer(), sweeping_scope_id, sweeping_scope_kind);
+ if (ContributeToSweepingMain(size_in_bytes, kMaxPagesToSweep,
+ size_in_bytes, origin))
+ return true;
+ }
}
if (is_compaction_space()) {
@@ -1029,8 +968,9 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
}
// Try sweeping all pages.
- if (ContributeToSweepingMain(0, 0, size_in_bytes, origin)) {
- return true;
+ {
+ TRACE_GC_EPOCH(heap()->tracer(), sweeping_scope_id, sweeping_scope_kind);
+ if (ContributeToSweepingMain(0, 0, size_in_bytes, origin)) return true;
}
if (heap()->gc_state() != Heap::NOT_IN_GC && !heap()->force_oom()) {
@@ -1054,11 +994,10 @@ bool PagedSpaceBase::ContributeToSweepingMain(int required_freed_bytes,
is_compaction_space() ? Sweeper::SweepingMode::kEagerDuringGC
: Sweeper::SweepingMode::kLazyOrConcurrent;
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->sweeping_in_progress()) {
- collector->sweeper()->ParallelSweepSpace(identity(), sweeping_mode,
- required_freed_bytes, max_pages);
- RefillFreeList(collector->sweeper());
+ if (heap()->sweeping_in_progress()) {
+ heap()->sweeper()->ParallelSweepSpace(identity(), sweeping_mode,
+ required_freed_bytes, max_pages);
+ RefillFreeList();
return TryAllocationFromFreeListMain(size_in_bytes, origin);
}
return false;
diff --git a/deps/v8/src/heap/paged-spaces.h b/deps/v8/src/heap/paged-spaces.h
index 7241a29b0e..70da63e53d 100644
--- a/deps/v8/src/heap/paged-spaces.h
+++ b/deps/v8/src/heap/paged-spaces.h
@@ -110,9 +110,6 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
// Does the space need executable memory?
Executability executable() const { return executable_; }
- // Prepares for a mark-compact GC.
- void PrepareForMarkCompact();
-
// Current capacity without growing (Size() + Available()).
size_t Capacity() const { return accounting_stats_.Capacity(); }
@@ -280,7 +277,7 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
// Refills the free list from the corresponding free list filled by the
// sweeper.
- void RefillFreeList(Sweeper* sweeper);
+ void RefillFreeList();
base::Mutex* mutex() { return &space_mutex_; }
@@ -321,6 +318,19 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
void ReduceActiveSystemPages(Page* page,
ActiveSystemPages active_system_pages);
+ // Allocates memory with the given size constraints from the space's free
+ // list.
+ V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
+ TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationOrigin origin);
+
+ // Expands the space by a single page from a background thread and allocates
+ // a memory area of the given size in it. If successful the method returns
+ // the address and size of the area.
+ base::Optional<std::pair<Address, size_t>> TryExpandBackground(
+ size_t size_in_bytes);
+
private:
class ConcurrentAllocationMutex {
public:
@@ -353,10 +363,6 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
bool HasPages() const { return first_page() != nullptr; }
- // Returns whether sweeping of this space is safe on this thread. Code space
- // sweeping is only allowed on the main thread.
- bool IsSweepingAllowedOnThread(LocalHeap* local_heap) const;
-
// Cleans up the space, frees all pages in this space except those belonging
// to the initial chunk, uncommits addresses in the initial chunk.
void TearDown();
@@ -366,12 +372,6 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
// size limit has been hit.
virtual Page* TryExpandImpl();
- // Expands the space by a single page from a background thread and allocates
- // a memory area of the given size in it. If successful the method returns
- // the address and size of the area.
- base::Optional<std::pair<Address, size_t>> TryExpandBackground(
- size_t size_in_bytes);
-
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
AllocationOrigin origin,
int* out_max_aligned_size) override;
@@ -395,11 +395,6 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
V8_WARN_UNUSED_RESULT bool RawRefillLabMain(int size_in_bytes,
AllocationOrigin origin);
- V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
- TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
- size_t max_size_in_bytes,
- AllocationOrigin origin);
-
V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
AllocationOrigin origin);
@@ -483,7 +478,9 @@ class CompactionSpaceCollection : public Malloced {
map_space_(heap, MAP_SPACE, Executability::NOT_EXECUTABLE,
compaction_space_kind),
code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
- compaction_space_kind) {}
+ compaction_space_kind),
+ shared_space_(heap, SHARED_SPACE, Executability::NOT_EXECUTABLE,
+ compaction_space_kind) {}
CompactionSpace* Get(AllocationSpace space) {
switch (space) {
@@ -493,6 +490,8 @@ class CompactionSpaceCollection : public Malloced {
return &map_space_;
case CODE_SPACE:
return &code_space_;
+ case SHARED_SPACE:
+ return &shared_space_;
default:
UNREACHABLE();
}
@@ -503,6 +502,7 @@ class CompactionSpaceCollection : public Malloced {
CompactionSpace old_space_;
CompactionSpace map_space_;
CompactionSpace code_space_;
+ CompactionSpace shared_space_;
};
// -----------------------------------------------------------------------------
@@ -554,7 +554,9 @@ class MapSpace final : public PagedSpace {
paged_allocation_info_) {}
int RoundSizeDownToObjectAlignment(int size) const override {
- if (base::bits::IsPowerOfTwo(Map::kSize)) {
+ if (V8_COMPRESS_POINTERS_8GB_BOOL) {
+ return RoundDown(size, kObjectAlignment8GbHeap);
+ } else if (base::bits::IsPowerOfTwo(Map::kSize)) {
return RoundDown(size, Map::kSize);
} else {
return (size / Map::kSize) * Map::kSize;
@@ -571,6 +573,32 @@ class MapSpace final : public PagedSpace {
LinearAllocationArea paged_allocation_info_;
};
+// -----------------------------------------------------------------------------
+// Shared space regular object space.
+
+class SharedSpace final : public PagedSpace {
+ public:
+ // Creates an old space object. The constructor does not allocate pages
+ // from OS.
+ explicit SharedSpace(Heap* heap)
+ : PagedSpace(heap, SHARED_SPACE, NOT_EXECUTABLE,
+ FreeList::CreateFreeList(), allocation_info) {}
+
+ static bool IsAtPageStart(Address addr) {
+ return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
+ MemoryChunkLayout::ObjectStartOffsetInDataPage();
+ }
+
+ size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
+ if (type == ExternalBackingStoreType::kArrayBuffer) return 0;
+ DCHECK_EQ(type, ExternalBackingStoreType::kExternalString);
+ return external_backing_store_bytes_[type];
+ }
+
+ private:
+ LinearAllocationArea allocation_info;
+};
+
// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
class OldGenerationMemoryChunkIterator {
diff --git a/deps/v8/src/heap/pretenuring-handler-inl.h b/deps/v8/src/heap/pretenuring-handler-inl.h
new file mode 100644
index 0000000000..7447b08b8b
--- /dev/null
+++ b/deps/v8/src/heap/pretenuring-handler-inl.h
@@ -0,0 +1,112 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_PRETENURING_HANDLER_INL_H_
+#define V8_HEAP_PRETENURING_HANDLER_INL_H_
+
+#include "src/base/sanitizer/msan.h"
+#include "src/heap/memory-chunk.h"
+#include "src/heap/new-spaces.h"
+#include "src/heap/pretenuring-handler.h"
+#include "src/heap/spaces.h"
+#include "src/objects/allocation-site-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void PretenturingHandler::UpdateAllocationSite(
+ Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback) {
+ DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
+#ifdef DEBUG
+ BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
+ DCHECK_IMPLIES(chunk->IsToPage(),
+ v8_flags.minor_mc ||
+ chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION));
+ DCHECK_IMPLIES(!chunk->InYoungGeneration(),
+ chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
+#endif
+ if (!v8_flags.allocation_site_pretenuring ||
+ !AllocationSite::CanTrack(map.instance_type())) {
+ return;
+ }
+ AllocationMemento memento_candidate =
+ FindAllocationMemento<kForGC>(map, object);
+ if (memento_candidate.is_null()) return;
+
+ // Entering cached feedback is used in the parallel case. We are not allowed
+ // to dereference the allocation site and rather have to postpone all checks
+ // till actually merging the data.
+ Address key = memento_candidate.GetAllocationSiteUnchecked();
+ (*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
+}
+
+template <PretenturingHandler::FindMementoMode mode>
+AllocationMemento PretenturingHandler::FindAllocationMemento(
+ Map map, HeapObject object) {
+ Address object_address = object.address();
+ Address memento_address =
+ object_address + ALIGN_TO_ALLOCATION_ALIGNMENT(object.SizeFromMap(map));
+ Address last_memento_word_address = memento_address + kTaggedSize;
+ // If the memento would be on another page, bail out immediately.
+ if (!Page::OnSamePage(object_address, last_memento_word_address)) {
+ return AllocationMemento();
+ }
+ HeapObject candidate = HeapObject::FromAddress(memento_address);
+ ObjectSlot candidate_map_slot = candidate.map_slot();
+ // This fast check may peek at an uninitialized word. However, the slow check
+ // below (memento_address == top) ensures that this is safe. Mark the word as
+ // initialized to silence MemorySanitizer warnings.
+ MSAN_MEMORY_IS_INITIALIZED(candidate_map_slot.address(), kTaggedSize);
+ if (!candidate_map_slot.contains_map_value(
+ ReadOnlyRoots(heap_).allocation_memento_map().ptr())) {
+ return AllocationMemento();
+ }
+
+ // Bail out if the memento is below the age mark, which can happen when
+ // mementos survived because a page got moved within new space.
+ Page* object_page = Page::FromAddress(object_address);
+ if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
+ Address age_mark =
+ reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
+ if (!object_page->Contains(age_mark)) {
+ return AllocationMemento();
+ }
+ // Do an exact check in the case where the age mark is on the same page.
+ if (object_address < age_mark) {
+ return AllocationMemento();
+ }
+ }
+
+ AllocationMemento memento_candidate = AllocationMemento::cast(candidate);
+
+ // Depending on what the memento is used for, we might need to perform
+ // additional checks.
+ Address top;
+ switch (mode) {
+ case kForGC:
+ return memento_candidate;
+ case kForRuntime:
+ if (memento_candidate.is_null()) return AllocationMemento();
+ // Either the object is the last object in the new space, or there is
+ // another object of at least word size (the header map word) following
+ // it, so suffices to compare ptr and top here.
+ top = heap_->NewSpaceTop();
+ DCHECK(memento_address >= heap_->new_space()->limit() ||
+ memento_address +
+ ALIGN_TO_ALLOCATION_ALIGNMENT(AllocationMemento::kSize) <=
+ top);
+ if ((memento_address != top) && memento_candidate.IsValid()) {
+ return memento_candidate;
+ }
+ return AllocationMemento();
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_PRETENURING_HANDLER_INL_H_
diff --git a/deps/v8/src/heap/pretenuring-handler.cc b/deps/v8/src/heap/pretenuring-handler.cc
new file mode 100644
index 0000000000..3276bdec01
--- /dev/null
+++ b/deps/v8/src/heap/pretenuring-handler.cc
@@ -0,0 +1,244 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/pretenuring-handler.h"
+
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles-inl.h"
+#include "src/heap/new-spaces.h"
+#include "src/objects/allocation-site-inl.h"
+
+namespace v8 {
+namespace internal {
+
+PretenturingHandler::PretenturingHandler(Heap* heap)
+ : heap_(heap), global_pretenuring_feedback_(kInitialFeedbackCapacity) {}
+
+PretenturingHandler::~PretenturingHandler() = default;
+
+void PretenturingHandler::MergeAllocationSitePretenuringFeedback(
+ const PretenuringFeedbackMap& local_pretenuring_feedback) {
+ PtrComprCageBase cage_base(heap_->isolate());
+ AllocationSite site;
+ for (auto& site_and_count : local_pretenuring_feedback) {
+ site = site_and_count.first;
+ MapWord map_word = site.map_word(cage_base, kRelaxedLoad);
+ if (map_word.IsForwardingAddress()) {
+ site = AllocationSite::cast(map_word.ToForwardingAddress());
+ }
+
+ // We have not validated the allocation site yet, since we have not
+ // dereferenced the site during collecting information.
+ // This is an inlined check of AllocationMemento::IsValid.
+ if (!site.IsAllocationSite() || site.IsZombie()) continue;
+
+ const int value = static_cast<int>(site_and_count.second);
+ DCHECK_LT(0, value);
+ if (site.IncrementMementoFoundCount(value)) {
+ // For sites in the global map the count is accessed through the site.
+ global_pretenuring_feedback_.insert(std::make_pair(site, 0));
+ }
+ }
+}
+
+bool PretenturingHandler::DeoptMaybeTenuredAllocationSites() const {
+ NewSpace* new_space = heap_->new_space();
+ return new_space && new_space->IsAtMaximumCapacity() &&
+ !heap_->MaximumSizeMinorGC();
+}
+
+namespace {
+
+inline bool MakePretenureDecision(
+ AllocationSite site, AllocationSite::PretenureDecision current_decision,
+ double ratio, bool maximum_size_scavenge) {
+ // Here we just allow state transitions from undecided or maybe tenure
+ // to don't tenure, maybe tenure, or tenure.
+ if ((current_decision == AllocationSite::kUndecided ||
+ current_decision == AllocationSite::kMaybeTenure)) {
+ if (ratio >= AllocationSite::kPretenureRatio) {
+ // We just transition into tenure state when the semi-space was at
+ // maximum capacity.
+ if (maximum_size_scavenge) {
+ site.set_deopt_dependent_code(true);
+ site.set_pretenure_decision(AllocationSite::kTenure);
+ // Currently we just need to deopt when we make a state transition to
+ // tenure.
+ return true;
+ }
+ site.set_pretenure_decision(AllocationSite::kMaybeTenure);
+ } else {
+ site.set_pretenure_decision(AllocationSite::kDontTenure);
+ }
+ }
+ return false;
+}
+
+// Clear feedback calculation fields until the next gc.
+inline void ResetPretenuringFeedback(AllocationSite site) {
+ site.set_memento_found_count(0);
+ site.set_memento_create_count(0);
+}
+
+inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
+ bool maximum_size_scavenge) {
+ bool deopt = false;
+ int create_count = site.memento_create_count();
+ int found_count = site.memento_found_count();
+ bool minimum_mementos_created =
+ create_count >= AllocationSite::kPretenureMinimumCreated;
+ double ratio =
+ minimum_mementos_created || v8_flags.trace_pretenuring_statistics
+ ? static_cast<double>(found_count) / create_count
+ : 0.0;
+ AllocationSite::PretenureDecision current_decision =
+ site.pretenure_decision();
+
+ if (minimum_mementos_created) {
+ deopt = MakePretenureDecision(site, current_decision, ratio,
+ maximum_size_scavenge);
+ }
+
+ if (v8_flags.trace_pretenuring_statistics) {
+ PrintIsolate(isolate,
+ "pretenuring: AllocationSite(%p): (created, found, ratio) "
+ "(%d, %d, %f) %s => %s\n",
+ reinterpret_cast<void*>(site.ptr()), create_count, found_count,
+ ratio, site.PretenureDecisionName(current_decision),
+ site.PretenureDecisionName(site.pretenure_decision()));
+ }
+
+ ResetPretenuringFeedback(site);
+ return deopt;
+}
+
+bool PretenureAllocationSiteManually(Isolate* isolate, AllocationSite site) {
+ AllocationSite::PretenureDecision current_decision =
+ site.pretenure_decision();
+ bool deopt = true;
+ if (current_decision == AllocationSite::kUndecided ||
+ current_decision == AllocationSite::kMaybeTenure) {
+ site.set_deopt_dependent_code(true);
+ site.set_pretenure_decision(AllocationSite::kTenure);
+ } else {
+ deopt = false;
+ }
+ if (v8_flags.trace_pretenuring_statistics) {
+ PrintIsolate(isolate,
+ "pretenuring manually requested: AllocationSite(%p): "
+ "%s => %s\n",
+ reinterpret_cast<void*>(site.ptr()),
+ site.PretenureDecisionName(current_decision),
+ site.PretenureDecisionName(site.pretenure_decision()));
+ }
+
+ ResetPretenuringFeedback(site);
+ return deopt;
+}
+
+} // namespace
+
+void PretenturingHandler::RemoveAllocationSitePretenuringFeedback(
+ AllocationSite site) {
+ global_pretenuring_feedback_.erase(site);
+}
+
+void PretenturingHandler::ProcessPretenuringFeedback() {
+ bool trigger_deoptimization = false;
+ if (v8_flags.allocation_site_pretenuring) {
+ int tenure_decisions = 0;
+ int dont_tenure_decisions = 0;
+ int allocation_mementos_found = 0;
+ int allocation_sites = 0;
+ int active_allocation_sites = 0;
+
+ AllocationSite site;
+
+ // Step 1: Digest feedback for recorded allocation sites.
+ bool maximum_size_scavenge = heap_->MaximumSizeMinorGC();
+ for (auto& site_and_count : global_pretenuring_feedback_) {
+ allocation_sites++;
+ site = site_and_count.first;
+ // Count is always access through the site.
+ DCHECK_EQ(0, site_and_count.second);
+ int found_count = site.memento_found_count();
+ // An entry in the storage does not imply that the count is > 0 because
+ // allocation sites might have been reset due to too many objects dying
+ // in old space.
+ if (found_count > 0) {
+ DCHECK(site.IsAllocationSite());
+ active_allocation_sites++;
+ allocation_mementos_found += found_count;
+ if (DigestPretenuringFeedback(heap_->isolate(), site,
+ maximum_size_scavenge)) {
+ trigger_deoptimization = true;
+ }
+ if (site.GetAllocationType() == AllocationType::kOld) {
+ tenure_decisions++;
+ } else {
+ dont_tenure_decisions++;
+ }
+ }
+ }
+
+ // Step 2: Pretenure allocation sites for manual requests.
+ if (allocation_sites_to_pretenure_) {
+ while (!allocation_sites_to_pretenure_->empty()) {
+ auto pretenure_site = allocation_sites_to_pretenure_->Pop();
+ if (PretenureAllocationSiteManually(heap_->isolate(), pretenure_site)) {
+ trigger_deoptimization = true;
+ }
+ }
+ allocation_sites_to_pretenure_.reset();
+ }
+
+ // Step 3: Deopt maybe tenured allocation sites if necessary.
+ bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
+ if (deopt_maybe_tenured) {
+ heap_->ForeachAllocationSite(
+ heap_->allocation_sites_list(),
+ [&allocation_sites, &trigger_deoptimization](AllocationSite site) {
+ DCHECK(site.IsAllocationSite());
+ allocation_sites++;
+ if (site.IsMaybeTenure()) {
+ site.set_deopt_dependent_code(true);
+ trigger_deoptimization = true;
+ }
+ });
+ }
+
+ if (trigger_deoptimization) {
+ heap_->isolate()->stack_guard()->RequestDeoptMarkedAllocationSites();
+ }
+
+ if (v8_flags.trace_pretenuring_statistics &&
+ (allocation_mementos_found > 0 || tenure_decisions > 0 ||
+ dont_tenure_decisions > 0)) {
+ PrintIsolate(heap_->isolate(),
+ "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
+ "active_sites=%d "
+ "mementos=%d tenured=%d not_tenured=%d\n",
+ deopt_maybe_tenured ? 1 : 0, allocation_sites,
+ active_allocation_sites, allocation_mementos_found,
+ tenure_decisions, dont_tenure_decisions);
+ }
+
+ global_pretenuring_feedback_.clear();
+ global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
+ }
+}
+
+void PretenturingHandler::PretenureAllocationSiteOnNextCollection(
+ AllocationSite site) {
+ if (!allocation_sites_to_pretenure_) {
+ allocation_sites_to_pretenure_.reset(
+ new GlobalHandleVector<AllocationSite>(heap_));
+ }
+ allocation_sites_to_pretenure_->Push(site);
+}
+
+void PretenturingHandler::reset() { allocation_sites_to_pretenure_.reset(); }
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/pretenuring-handler.h b/deps/v8/src/heap/pretenuring-handler.h
new file mode 100644
index 0000000000..4c31141fb8
--- /dev/null
+++ b/deps/v8/src/heap/pretenuring-handler.h
@@ -0,0 +1,90 @@
+// Copyright 2022 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_PRETENURING_HANDLER_H_
+#define V8_HEAP_PRETENURING_HANDLER_H_
+
+#include <memory>
+
+#include "src/objects/allocation-site.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/map.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class GlobalHandleVector;
+class Heap;
+
+class PretenturingHandler final {
+ public:
+ static const int kInitialFeedbackCapacity = 256;
+ using PretenuringFeedbackMap =
+ std::unordered_map<AllocationSite, size_t, Object::Hasher>;
+ enum FindMementoMode { kForRuntime, kForGC };
+
+ explicit PretenturingHandler(Heap* heap);
+ ~PretenturingHandler();
+
+ void reset();
+
+ // If an object has an AllocationMemento trailing it, return it, otherwise
+ // return a null AllocationMemento.
+ template <FindMementoMode mode>
+ inline AllocationMemento FindAllocationMemento(Map map, HeapObject object);
+
+ // ===========================================================================
+ // Allocation site tracking. =================================================
+ // ===========================================================================
+
+ // Updates the AllocationSite of a given {object}. The entry (including the
+ // count) is cached on the local pretenuring feedback.
+ inline void UpdateAllocationSite(
+ Map map, HeapObject object, PretenuringFeedbackMap* pretenuring_feedback);
+
+ // Merges local pretenuring feedback into the global one. Note that this
+ // method needs to be called after evacuation, as allocation sites may be
+ // evacuated and this method resolves forward pointers accordingly.
+ void MergeAllocationSitePretenuringFeedback(
+ const PretenuringFeedbackMap& local_pretenuring_feedback);
+
+ // Adds an allocation site to the list of sites to be pretenured during the
+ // next collection. Added allocation sites are pretenured independent of
+ // their feedback.
+ V8_EXPORT_PRIVATE void PretenureAllocationSiteOnNextCollection(
+ AllocationSite site);
+
+ // ===========================================================================
+ // Pretenuring. ==============================================================
+ // ===========================================================================
+
+ // Pretenuring decisions are made based on feedback collected during new space
+ // evacuation. Note that between feedback collection and calling this method
+ // object in old space must not move.
+ void ProcessPretenuringFeedback();
+
+ // Removes an entry from the global pretenuring storage.
+ void RemoveAllocationSitePretenuringFeedback(AllocationSite site);
+
+ private:
+ bool DeoptMaybeTenuredAllocationSites() const;
+
+ Heap* const heap_;
+
+ // The feedback storage is used to store allocation sites (keys) and how often
+ // they have been visited (values) by finding a memento behind an object. The
+ // storage is only alive temporary during a GC. The invariant is that all
+ // pointers in this map are already fixed, i.e., they do not point to
+ // forwarding pointers.
+ PretenuringFeedbackMap global_pretenuring_feedback_;
+
+ std::unique_ptr<GlobalHandleVector<AllocationSite>>
+ allocation_sites_to_pretenure_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_PRETENURING_HANDLER_H_
diff --git a/deps/v8/src/heap/read-only-heap.cc b/deps/v8/src/heap/read-only-heap.cc
index a365fe3833..07db0d09da 100644
--- a/deps/v8/src/heap/read-only-heap.cc
+++ b/deps/v8/src/heap/read-only-heap.cc
@@ -9,6 +9,7 @@
#include "src/base/lazy-instance.h"
#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -298,7 +299,7 @@ HeapObject ReadOnlyHeapObjectIterator::Next() {
}
HeapObject object = HeapObject::FromAddress(current_addr_);
const int object_size = object.Size();
- current_addr_ += object_size;
+ current_addr_ += ALIGN_TO_ALLOCATION_ALIGNMENT(object_size);
if (object.IsFreeSpaceOrFiller()) {
continue;
diff --git a/deps/v8/src/heap/read-only-spaces.cc b/deps/v8/src/heap/read-only-spaces.cc
index 0277f18dd5..7385fd2353 100644
--- a/deps/v8/src/heap/read-only-spaces.cc
+++ b/deps/v8/src/heap/read-only-spaces.cc
@@ -15,6 +15,7 @@
#include "src/heap/allocation-stats.h"
#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/marking-state-inl.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/read-only-heap.h"
#include "src/objects/objects-inl.h"
@@ -185,7 +186,9 @@ ReadOnlyHeap* PointerCompressedReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
Address isolate_root = isolate->isolate_root();
for (Object original_object : original_cache) {
Address original_address = original_object.ptr();
- Address new_address = isolate_root + CompressTagged(original_address);
+ Address new_address =
+ isolate_root +
+ V8HeapCompressionScheme::CompressTagged(original_address);
Object new_object = Object(new_address);
cache.push_back(new_object);
}
@@ -235,7 +238,8 @@ void PointerCompressedReadOnlyArtifacts::Initialize(
pages_.push_back(new_page);
shared_memory_.push_back(std::move(shared_memory));
// This is just CompressTagged but inlined so it will always compile.
- Tagged_t compressed_address = CompressTagged(page->address());
+ Tagged_t compressed_address =
+ V8HeapCompressionScheme::CompressTagged(page->address());
page_offsets_.push_back(compressed_address);
// 3. Update the accounting stats so the allocated bytes are for the new
@@ -332,10 +336,7 @@ ReadOnlyPage::ReadOnlyPage(Heap* heap, BaseSpace* space, size_t chunk_size,
std::move(reservation)) {
allocated_bytes_ = 0;
SetFlags(Flag::NEVER_EVACUATE | Flag::READ_ONLY_HEAP);
- heap->incremental_marking()
- ->non_atomic_marking_state()
- ->bitmap(this)
- ->MarkAllBits();
+ heap->non_atomic_marking_state()->bitmap(this)->MarkAllBits();
}
void ReadOnlyPage::MakeHeaderRelocatable() {
@@ -436,7 +437,7 @@ class ReadOnlySpaceObjectIterator : public ObjectIterator {
}
HeapObject obj = HeapObject::FromAddress(cur_addr_);
const int obj_size = obj.Size();
- cur_addr_ += obj_size;
+ cur_addr_ += ALIGN_TO_ALLOCATION_ALIGNMENT(obj_size);
DCHECK_LE(cur_addr_, cur_end_);
if (!obj.IsFreeSpaceOrFiller()) {
if (obj.IsCode()) {
@@ -575,7 +576,7 @@ void ReadOnlySpace::FreeLinearAllocationArea() {
// Clear the bits in the unused black area.
ReadOnlyPage* page = pages_.back();
- heap()->incremental_marking()->marking_state()->bitmap(page)->ClearRange(
+ heap()->marking_state()->bitmap(page)->ClearRange(
page->AddressToMarkbitIndex(top_), page->AddressToMarkbitIndex(limit_));
heap()->CreateFillerObjectAt(top_, static_cast<int>(limit_ - top_));
@@ -614,6 +615,7 @@ void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) {
HeapObject ReadOnlySpace::TryAllocateLinearlyAligned(
int size_in_bytes, AllocationAlignment alignment) {
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
Address current_top = top_;
int filler_size = Heap::GetFillToAlign(current_top, alignment);
@@ -639,6 +641,7 @@ AllocationResult ReadOnlySpace::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
DCHECK(!v8_flags.enable_third_party_heap);
DCHECK(!IsDetached());
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
int allocation_size = size_in_bytes;
HeapObject object = TryAllocateLinearlyAligned(allocation_size, alignment);
@@ -658,6 +661,7 @@ AllocationResult ReadOnlySpace::AllocateRawAligned(
AllocationResult ReadOnlySpace::AllocateRawUnaligned(int size_in_bytes) {
DCHECK(!IsDetached());
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
EnsureSpaceForAllocation(size_in_bytes);
Address current_top = top_;
Address new_top = current_top + size_in_bytes;
@@ -684,7 +688,7 @@ AllocationResult ReadOnlySpace::AllocateRaw(int size_in_bytes,
: AllocateRawUnaligned(size_in_bytes);
HeapObject heap_obj;
if (result.To(&heap_obj)) {
- DCHECK(heap()->incremental_marking()->marking_state()->IsBlack(heap_obj));
+ DCHECK(heap()->marking_state()->IsBlack(heap_obj));
}
return result;
}
diff --git a/deps/v8/src/heap/remembered-set-inl.h b/deps/v8/src/heap/remembered-set-inl.h
index fe446a6b8c..03e22cb806 100644
--- a/deps/v8/src/heap/remembered-set-inl.h
+++ b/deps/v8/src/heap/remembered-set-inl.h
@@ -34,18 +34,16 @@ SlotCallbackResult UpdateTypedSlotHelper::UpdateTypedSlot(Heap* heap,
RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
- case SlotType::kEmbeddedObjectData: {
- RelocInfo rinfo(addr, RelocInfo::DATA_EMBEDDED_OBJECT, 0, Code());
- return UpdateEmbeddedPointer(heap, &rinfo, callback);
- }
case SlotType::kConstPoolEmbeddedObjectCompressed: {
- HeapObject old_target = HeapObject::cast(Object(
- DecompressTaggedAny(heap->isolate(), base::Memory<Tagged_t>(addr))));
+ HeapObject old_target =
+ HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
+ heap->isolate(), base::Memory<Tagged_t>(addr))));
HeapObject new_target = old_target;
SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
- base::Memory<Tagged_t>(addr) = CompressTagged(new_target.ptr());
+ base::Memory<Tagged_t>(addr) =
+ V8HeapCompressionScheme::CompressTagged(new_target.ptr());
}
return result;
}
@@ -77,13 +75,9 @@ HeapObject UpdateTypedSlotHelper::GetTargetObject(Heap* heap,
RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
return rinfo.target_object(heap->isolate());
}
- case SlotType::kEmbeddedObjectData: {
- RelocInfo rinfo(addr, RelocInfo::DATA_EMBEDDED_OBJECT, 0, Code());
- return rinfo.target_object(heap->isolate());
- }
case SlotType::kConstPoolEmbeddedObjectCompressed: {
- Address full =
- DecompressTaggedAny(heap->isolate(), base::Memory<Tagged_t>(addr));
+ Address full = V8HeapCompressionScheme::DecompressTaggedAny(
+ heap->isolate(), base::Memory<Tagged_t>(addr));
return HeapObject::cast(Object(full));
}
case SlotType::kConstPoolEmbeddedObjectFull: {
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 08c5e88010..d7dac8809d 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -31,7 +31,10 @@ class RememberedSetOperations {
static void Insert(SlotSet* slot_set, MemoryChunk* chunk, Address slot_addr) {
DCHECK(chunk->Contains(slot_addr));
uintptr_t offset = slot_addr - chunk->address();
- slot_set->Insert<access_mode>(offset);
+ slot_set->Insert<access_mode == v8::internal::AccessMode::ATOMIC
+ ? v8::internal::SlotSet::AccessMode::ATOMIC
+ : v8::internal::SlotSet::AccessMode::NON_ATOMIC>(
+ offset);
}
template <typename Callback>
diff --git a/deps/v8/src/heap/safepoint.cc b/deps/v8/src/heap/safepoint.cc
index cec6ab7fcb..ea36ba296d 100644
--- a/deps/v8/src/heap/safepoint.cc
+++ b/deps/v8/src/heap/safepoint.cc
@@ -72,7 +72,7 @@ class PerClientSafepointData final {
void IsolateSafepoint::InitiateGlobalSafepointScope(
Isolate* initiator, PerClientSafepointData* client_data) {
- shared_isolate()->global_safepoint()->AssertActive();
+ shared_heap_isolate()->global_safepoint()->AssertActive();
IgnoreLocalGCRequests ignore_gc_requests(initiator->heap());
LockMutex(initiator->main_thread_local_heap());
InitiateGlobalSafepointScopeRaw(initiator, client_data);
@@ -80,7 +80,7 @@ void IsolateSafepoint::InitiateGlobalSafepointScope(
void IsolateSafepoint::TryInitiateGlobalSafepointScope(
Isolate* initiator, PerClientSafepointData* client_data) {
- shared_isolate()->global_safepoint()->AssertActive();
+ shared_heap_isolate()->global_safepoint()->AssertActive();
if (!local_heaps_mutex_.TryLock()) return;
InitiateGlobalSafepointScopeRaw(initiator, client_data);
}
@@ -278,8 +278,8 @@ void IsolateSafepoint::AssertMainThreadIsOnlyThread() {
Isolate* IsolateSafepoint::isolate() const { return heap_->isolate(); }
-Isolate* IsolateSafepoint::shared_isolate() const {
- return isolate()->shared_isolate();
+Isolate* IsolateSafepoint::shared_heap_isolate() const {
+ return isolate()->shared_heap_isolate();
}
SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
@@ -289,7 +289,7 @@ SafepointScope::SafepointScope(Heap* heap) : safepoint_(heap->safepoint()) {
SafepointScope::~SafepointScope() { safepoint_->LeaveLocalSafepointScope(); }
GlobalSafepoint::GlobalSafepoint(Isolate* isolate)
- : shared_isolate_(isolate), shared_heap_(isolate->heap()) {}
+ : shared_heap_isolate_(isolate) {}
void GlobalSafepoint::AppendClient(Isolate* client) {
clients_mutex_.AssertHeld();
@@ -306,7 +306,6 @@ void GlobalSafepoint::AppendClient(Isolate* client) {
client->global_safepoint_next_client_isolate_ = clients_head_;
clients_head_ = client;
- client->shared_isolate_ = shared_isolate_;
}
void GlobalSafepoint::RemoveClient(Isolate* client) {
@@ -369,11 +368,15 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
initiator, &clients.back());
});
- // Make it possible to use AssertActive() on shared isolates.
- CHECK(shared_isolate_->heap()->safepoint()->local_heaps_mutex_.TryLock());
+ if (shared_heap_isolate_->is_shared()) {
+ // Make it possible to use AssertActive() on shared isolates.
+ CHECK(shared_heap_isolate_->heap()
+ ->safepoint()
+ ->local_heaps_mutex_.TryLock());
- // Shared isolates should never have multiple threads.
- shared_isolate_->heap()->safepoint()->AssertMainThreadIsOnlyThread();
+ // Shared isolates should never have multiple threads.
+ shared_heap_isolate_->heap()->safepoint()->AssertMainThreadIsOnlyThread();
+ }
// Iterate all clients again to initiate the safepoint for all of them - even
// if that means blocking.
@@ -384,7 +387,7 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
#if DEBUG
for (const PerClientSafepointData& client : clients) {
- DCHECK_EQ(client.isolate()->shared_isolate(), shared_isolate_);
+ DCHECK_EQ(client.isolate()->shared_heap_isolate(), shared_heap_isolate_);
DCHECK(client.heap()->deserialization_complete());
}
#endif // DEBUG
@@ -398,7 +401,9 @@ void GlobalSafepoint::EnterGlobalSafepointScope(Isolate* initiator) {
}
void GlobalSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
- shared_isolate_->heap()->safepoint()->local_heaps_mutex_.Unlock();
+ if (shared_heap_isolate_->is_shared()) {
+ shared_heap_isolate_->heap()->safepoint()->local_heaps_mutex_.Unlock();
+ }
IterateClientIsolates([initiator](Isolate* client) {
Heap* client_heap = client->heap();
@@ -409,17 +414,22 @@ void GlobalSafepoint::LeaveGlobalSafepointScope(Isolate* initiator) {
}
GlobalSafepointScope::GlobalSafepointScope(Isolate* initiator)
- : initiator_(initiator), shared_isolate_(initiator->shared_isolate()) {
- if (shared_isolate_) {
- shared_isolate_->global_safepoint()->EnterGlobalSafepointScope(initiator_);
+ : initiator_(initiator),
+ shared_heap_isolate_(initiator->has_shared_heap()
+ ? initiator->shared_heap_isolate()
+ : nullptr) {
+ if (shared_heap_isolate_) {
+ shared_heap_isolate_->global_safepoint()->EnterGlobalSafepointScope(
+ initiator_);
} else {
initiator_->heap()->safepoint()->EnterLocalSafepointScope();
}
}
GlobalSafepointScope::~GlobalSafepointScope() {
- if (shared_isolate_) {
- shared_isolate_->global_safepoint()->LeaveGlobalSafepointScope(initiator_);
+ if (shared_heap_isolate_) {
+ shared_heap_isolate_->global_safepoint()->LeaveGlobalSafepointScope(
+ initiator_);
} else {
initiator_->heap()->safepoint()->LeaveLocalSafepointScope();
}
diff --git a/deps/v8/src/heap/safepoint.h b/deps/v8/src/heap/safepoint.h
index 82e76fe6d5..97e0e54591 100644
--- a/deps/v8/src/heap/safepoint.h
+++ b/deps/v8/src/heap/safepoint.h
@@ -133,7 +133,7 @@ class IsolateSafepoint final {
}
Isolate* isolate() const;
- Isolate* shared_isolate() const;
+ Isolate* shared_heap_isolate() const;
Barrier barrier_;
Heap* heap_;
@@ -186,8 +186,7 @@ class GlobalSafepoint final {
void EnterGlobalSafepointScope(Isolate* initiator);
void LeaveGlobalSafepointScope(Isolate* initiator);
- Isolate* const shared_isolate_;
- Heap* const shared_heap_;
+ Isolate* const shared_heap_isolate_;
base::Mutex clients_mutex_;
Isolate* clients_head_ = nullptr;
@@ -202,7 +201,7 @@ class V8_NODISCARD GlobalSafepointScope {
private:
Isolate* const initiator_;
- Isolate* const shared_isolate_;
+ Isolate* const shared_heap_isolate_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 91f4f528d0..59d837e1a3 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -8,9 +8,11 @@
#include "src/codegen/assembler-inl.h"
#include "src/heap/evacuation-allocator-inl.h"
#include "src/heap/incremental-marking-inl.h"
+#include "src/heap/marking-state-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/new-spaces.h"
#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/pretenuring-handler-inl.h"
#include "src/heap/scavenger.h"
#include "src/objects/map.h"
#include "src/objects/objects-body-descriptors-inl.h"
@@ -110,10 +112,11 @@ bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
}
if (is_incremental_marking_ &&
- promotion_heap_choice != kPromoteIntoSharedHeap) {
+ (promotion_heap_choice != kPromoteIntoSharedHeap || mark_shared_heap_)) {
heap()->incremental_marking()->TransferColor(source, target);
}
- heap()->UpdateAllocationSite(map, source, &local_pretenuring_feedback_);
+ pretenuring_handler_->UpdateAllocationSite(map, source,
+ &local_pretenuring_feedback_);
return true;
}
@@ -132,8 +135,7 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
HeapObject target;
if (allocation.To(&target)) {
- DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
- target));
+ DCHECK(heap()->non_atomic_marking_state()->IsWhite(target));
const bool self_success =
MigrateObject(map, object, target, object_size, kPromoteIntoLocalHeap);
if (!self_success) {
@@ -181,8 +183,7 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
HeapObject target;
if (allocation.To(&target)) {
- DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
- target));
+ DCHECK(heap()->non_atomic_marking_state()->IsWhite(target));
const bool self_success =
MigrateObject(map, object, target, object_size, promotion_heap_choice);
if (!self_success) {
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index dfebb0b89b..8f1c27c05c 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -18,6 +18,7 @@
#include "src/heap/memory-chunk-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/pretenuring-handler.h"
#include "src/heap/remembered-set-inl.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/slot-set.h"
@@ -330,7 +331,7 @@ void ScavengerCollector::CollectGarbage() {
EphemeronTableList ephemeron_table_list;
{
- Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
+ Sweeper* sweeper = heap_->sweeper();
// Pause the concurrent sweeper.
Sweeper::PauseScope pause_scope(sweeper);
@@ -540,8 +541,7 @@ void ScavengerCollector::SweepArrayBufferExtensions() {
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
const bool is_compacting = heap_->incremental_marking()->IsCompacting();
- AtomicMarkingState* marking_state =
- heap_->incremental_marking()->atomic_marking_state();
+ AtomicMarkingState* marking_state = heap_->atomic_marking_state();
for (SurvivingNewLargeObjectMapEntry update_info :
surviving_new_large_objects_) {
@@ -598,8 +598,8 @@ Scavenger::PromotionList::Local::Local(Scavenger::PromotionList* promotion_list)
namespace {
ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
- if (v8_flags.shared_string_table && heap->isolate()->shared_isolate()) {
- return new ConcurrentAllocator(nullptr, heap->shared_old_space());
+ if (v8_flags.shared_string_table && heap->isolate()->has_shared_heap()) {
+ return new ConcurrentAllocator(nullptr, heap->shared_allocation_space());
}
return nullptr;
}
@@ -615,7 +615,9 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
promotion_list_local_(promotion_list),
copied_list_local_(*copied_list),
ephemeron_table_list_local_(*ephemeron_table_list),
- local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
+ pretenuring_handler_(heap_->pretenuring_handler()),
+ local_pretenuring_feedback_(
+ PretenturingHandler::kInitialFeedbackCapacity),
copied_size_(0),
promoted_size_(0),
allocator_(heap, CompactionSpaceKind::kCompactionSpaceForScavenge),
@@ -625,7 +627,8 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
is_compacting_(heap->incremental_marking()->IsCompacting()),
is_compacting_including_map_space_(is_compacting_ &&
v8_flags.compact_maps),
- shared_string_table_(shared_old_allocator_.get() != nullptr) {}
+ shared_string_table_(shared_old_allocator_.get() != nullptr),
+ mark_shared_heap_(heap->isolate()->is_shared_space_isolate()) {}
void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
int size) {
@@ -636,8 +639,7 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
// the end of collection it would be a violation of the invariant to record
// its slots.
const bool record_slots =
- is_compacting_ &&
- heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
+ is_compacting_ && heap()->atomic_marking_state()->IsBlack(target);
IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
@@ -663,27 +665,29 @@ void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) {
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
AllocationSpace space = page->owner_identity();
if ((space == OLD_SPACE) && !page->SweepingDone()) {
- heap()->mark_compact_collector()->sweeper()->AddPage(
- space, reinterpret_cast<Page*>(page),
- Sweeper::READD_TEMPORARY_REMOVED_PAGE);
+ heap()->sweeper()->AddPage(space, reinterpret_cast<Page*>(page),
+ Sweeper::READD_TEMPORARY_REMOVED_PAGE);
}
}
void Scavenger::ScavengePage(MemoryChunk* page) {
CodePageMemoryModificationScope memory_modification_scope(page);
- const bool has_shared_isolate = heap_->isolate()->shared_isolate();
+ const bool record_old_to_shared_slots = heap_->isolate()->has_shared_heap();
if (page->slot_set<OLD_TO_NEW, AccessMode::ATOMIC>() != nullptr) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(
page, InvalidatedSlotsFilter::LivenessCheck::kNo);
RememberedSet<OLD_TO_NEW>::IterateAndTrackEmptyBuckets(
page,
- [this, page, has_shared_isolate, &filter](MaybeObjectSlot slot) {
+ [this, page, record_old_to_shared_slots,
+ &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
SlotCallbackResult result = CheckAndScavengeObject(heap_, slot);
// A new space string might have been promoted into the shared heap
// during GC.
- if (has_shared_isolate) CheckOldToNewSlotForSharedUntyped(page, slot);
+ if (record_old_to_shared_slots) {
+ CheckOldToNewSlotForSharedUntyped(page, slot);
+ }
return result;
},
&empty_chunks_local_);
@@ -700,11 +704,11 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
heap_, slot_type, slot_address,
[this, page, slot_type, slot_address,
- has_shared_isolate](FullMaybeObjectSlot slot) {
+ record_old_to_shared_slots](FullMaybeObjectSlot slot) {
SlotCallbackResult result = CheckAndScavengeObject(heap(), slot);
// A new space string might have been promoted into the shared
// heap during GC.
- if (has_shared_isolate) {
+ if (record_old_to_shared_slots) {
CheckOldToNewSlotForSharedTyped(page, slot_type, slot_address);
}
return result;
@@ -809,8 +813,9 @@ void ScavengerCollector::ClearOldEphemerons() {
}
void Scavenger::Finalize() {
- heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
- heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
+ pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
+ local_pretenuring_feedback_);
+ heap()->IncrementNewSpaceSurvivingObjectSize(copied_size_);
heap()->IncrementPromotedObjectsSize(promoted_size_);
collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
allocator_.Finalize();
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 38979bcf1d..6476d1c927 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -12,6 +12,7 @@
#include "src/heap/memory-chunk.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/parallel-work-item.h"
+#include "src/heap/pretenuring-handler.h"
#include "src/heap/slot-set.h"
namespace v8 {
@@ -116,7 +117,6 @@ class Scavenger {
// Number of objects to process before interrupting for potentially waking
// up other tasks.
static const int kInterruptThreshold = 128;
- static const int kInitialLocalPretenuringFeedbackCapacity = 256;
inline Heap* heap() { return heap_; }
@@ -199,7 +199,8 @@ class Scavenger {
PromotionList::Local promotion_list_local_;
CopiedList::Local copied_list_local_;
EphemeronTableList::Local ephemeron_table_list_local_;
- Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
+ PretenturingHandler* const pretenuring_handler_;
+ PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
size_t copied_size_;
size_t promoted_size_;
EvacuationAllocator allocator_;
@@ -212,6 +213,7 @@ class Scavenger {
const bool is_compacting_;
const bool is_compacting_including_map_space_;
const bool shared_string_table_;
+ const bool mark_shared_heap_;
friend class IterateAndScavengePromotedObjectsVisitor;
friend class RootScavengeVisitor;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index c1b2fab13d..601d1b8322 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -75,9 +75,11 @@ bool SetupIsolateDelegate::SetupHeapInternal(Heap* heap) {
bool Heap::CreateHeapObjects() {
// Create initial maps.
if (!CreateInitialMaps()) return false;
- if (v8_flags.minor_mc && new_space()) {
- paged_new_space()->paged_space()->free_list()->RepairLists(this);
- }
+
+ // Ensure that all young generation pages are iterable. It must be after heap
+ // setup, so that the maps have been created.
+ if (new_space()) new_space()->MakeIterable();
+
CreateApiObjects();
// Create initial objects
@@ -876,9 +878,11 @@ void Heap::CreateInitialObjects() {
set_feedback_vectors_for_profiling_tools(roots.undefined_value());
set_pending_optimize_for_test_bytecode(roots.undefined_value());
set_shared_wasm_memories(roots.empty_weak_array_list());
+ set_locals_block_list_cache(roots.undefined_value());
#ifdef V8_ENABLE_WEBASSEMBLY
set_active_continuation(roots.undefined_value());
set_active_suspender(roots.undefined_value());
+ set_js_to_wasm_wrappers(roots.empty_weak_array_list());
set_wasm_canonical_rtts(roots.empty_weak_array_list());
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1025,8 +1029,8 @@ void Heap::CreateInitialObjects() {
set_async_generator_await_reject_shared_fun(*info);
info = CreateSharedFunctionInfo(
- isolate(), Builtin::kAsyncGeneratorYieldResolveClosure, 1);
- set_async_generator_yield_resolve_shared_fun(*info);
+ isolate(), Builtin::kAsyncGeneratorYieldWithAwaitResolveClosure, 1);
+ set_async_generator_yield_with_await_resolve_shared_fun(*info);
info = CreateSharedFunctionInfo(
isolate(), Builtin::kAsyncGeneratorReturnResolveClosure, 1);
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index a67f3e94c5..a3a40885f8 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -10,9 +10,8 @@
#include <stack>
#include <vector>
-#include "src/base/atomic-utils.h"
#include "src/base/bit-field.h"
-#include "src/base/bits.h"
+#include "src/heap/base/basic-slot-set.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/slots.h"
#include "src/utils/allocation.h"
@@ -22,7 +21,9 @@
namespace v8 {
namespace internal {
-enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
+using ::heap::base::KEEP_SLOT;
+using ::heap::base::REMOVE_SLOT;
+using ::heap::base::SlotCallbackResult;
// Possibly empty buckets (buckets that do not contain any slots) are discovered
// by the scavenger. Buckets might become non-empty when promoting objects later
@@ -126,256 +127,46 @@ class PossiblyEmptyBuckets {
static_assert(std::is_standard_layout<PossiblyEmptyBuckets>::value);
static_assert(sizeof(PossiblyEmptyBuckets) == kSystemPointerSize);
-// Data structure for maintaining a set of slots in a standard (non-large)
-// page.
-// The data structure assumes that the slots are pointer size aligned and
-// splits the valid slot offset range into buckets.
-// Each bucket is a bitmap with a bit corresponding to a single slot offset.
-class SlotSet {
- public:
- enum EmptyBucketMode {
- FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
- KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
- };
+class SlotSet final : public ::heap::base::BasicSlotSet<kTaggedSize> {
+ using BasicSlotSet = ::heap::base::BasicSlotSet<kTaggedSize>;
- SlotSet() = delete;
+ public:
+ static const int kBucketsRegularPage =
+ (1 << kPageSizeBits) / kTaggedSize / kCellsPerBucket / kBitsPerCell;
static SlotSet* Allocate(size_t buckets) {
- // SlotSet* slot_set --+
- // |
- // v
- // +-----------------+-------------------------+
- // | initial buckets | buckets array |
- // +-----------------+-------------------------+
- // pointer-sized pointer-sized * buckets
- //
- //
- // The SlotSet pointer points to the beginning of the buckets array for
- // faster access in the write barrier. The number of buckets is needed for
- // calculating the size of this data structure.
- size_t buckets_size = buckets * sizeof(Bucket*);
- size_t size = kInitialBucketsSize + buckets_size;
- void* allocation = AlignedAllocWithRetry(size, kSystemPointerSize);
- SlotSet* slot_set = reinterpret_cast<SlotSet*>(
- reinterpret_cast<uint8_t*>(allocation) + kInitialBucketsSize);
- DCHECK(
- IsAligned(reinterpret_cast<uintptr_t>(slot_set), kSystemPointerSize));
-#ifdef DEBUG
- *slot_set->initial_buckets() = buckets;
-#endif
- for (size_t i = 0; i < buckets; i++) {
- *slot_set->bucket(i) = nullptr;
- }
- return slot_set;
- }
-
- static void Delete(SlotSet* slot_set, size_t buckets) {
- if (slot_set == nullptr) return;
-
- for (size_t i = 0; i < buckets; i++) {
- slot_set->ReleaseBucket(i);
- }
-
-#ifdef DEBUG
- size_t initial_buckets = *slot_set->initial_buckets();
-
- for (size_t i = buckets; i < initial_buckets; i++) {
- DCHECK_NULL(*slot_set->bucket(i));
- }
-#endif
-
- AlignedFree(reinterpret_cast<uint8_t*>(slot_set) - kInitialBucketsSize);
- }
-
- static size_t BucketsForSize(size_t size) {
- return (size + (kTaggedSize * kBitsPerBucket) - 1) >>
- (kTaggedSizeLog2 + kBitsPerBucketLog2);
- }
-
- // Converts the slot offset into bucket index.
- static size_t BucketForSlot(size_t slot_offset) {
- DCHECK(IsAligned(slot_offset, kTaggedSize));
- return slot_offset >> (kTaggedSizeLog2 + kBitsPerBucketLog2);
- }
-
- // The slot offset specifies a slot at address page_start_ + slot_offset.
- // AccessMode defines whether there can be concurrent access on the buckets
- // or not.
- template <AccessMode access_mode>
- void Insert(size_t slot_offset) {
- size_t bucket_index;
- int cell_index, bit_index;
- SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- Bucket* bucket = LoadBucket<access_mode>(bucket_index);
- if (bucket == nullptr) {
- bucket = new Bucket;
- if (!SwapInNewBucket<access_mode>(bucket_index, bucket)) {
- delete bucket;
- bucket = LoadBucket<access_mode>(bucket_index);
- }
- }
- // Check that monotonicity is preserved, i.e., once a bucket is set we do
- // not free it concurrently.
- DCHECK(bucket != nullptr);
- DCHECK_EQ(bucket->cells(), LoadBucket<access_mode>(bucket_index)->cells());
- uint32_t mask = 1u << bit_index;
- if ((bucket->LoadCell<access_mode>(cell_index) & mask) == 0) {
- bucket->SetCellBits<access_mode>(cell_index, mask);
- }
- }
-
- // The slot offset specifies a slot at address page_start_ + slot_offset.
- // Returns true if the set contains the slot.
- bool Contains(size_t slot_offset) {
- size_t bucket_index;
- int cell_index, bit_index;
- SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- Bucket* bucket = LoadBucket(bucket_index);
- if (bucket == nullptr) return false;
- return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
- }
-
- // The slot offset specifies a slot at address page_start_ + slot_offset.
- void Remove(size_t slot_offset) {
- size_t bucket_index;
- int cell_index, bit_index;
- SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- Bucket* bucket = LoadBucket(bucket_index);
- if (bucket != nullptr) {
- uint32_t cell = bucket->LoadCell(cell_index);
- uint32_t bit_mask = 1u << bit_index;
- if (cell & bit_mask) {
- bucket->ClearCellBits(cell_index, bit_mask);
- }
- }
- }
-
- // The slot offsets specify a range of slots at addresses:
- // [page_start_ + start_offset ... page_start_ + end_offset).
- void RemoveRange(size_t start_offset, size_t end_offset, size_t buckets,
- EmptyBucketMode mode) {
- CHECK_LE(end_offset, buckets * kBitsPerBucket * kTaggedSize);
- DCHECK_LE(start_offset, end_offset);
- size_t start_bucket;
- int start_cell, start_bit;
- SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
- size_t end_bucket;
- int end_cell, end_bit;
- SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
- uint32_t start_mask = (1u << start_bit) - 1;
- uint32_t end_mask = ~((1u << end_bit) - 1);
- Bucket* bucket;
- if (start_bucket == end_bucket && start_cell == end_cell) {
- bucket = LoadBucket(start_bucket);
- if (bucket != nullptr) {
- bucket->ClearCellBits(start_cell, ~(start_mask | end_mask));
- }
- return;
- }
- size_t current_bucket = start_bucket;
- int current_cell = start_cell;
- bucket = LoadBucket(current_bucket);
- if (bucket != nullptr) {
- bucket->ClearCellBits(current_cell, ~start_mask);
- }
- current_cell++;
- if (current_bucket < end_bucket) {
- if (bucket != nullptr) {
- ClearBucket(bucket, current_cell, kCellsPerBucket);
- }
- // The rest of the current bucket is cleared.
- // Move on to the next bucket.
- current_bucket++;
- current_cell = 0;
- }
- DCHECK(current_bucket == end_bucket ||
- (current_bucket < end_bucket && current_cell == 0));
- while (current_bucket < end_bucket) {
- if (mode == FREE_EMPTY_BUCKETS) {
- ReleaseBucket(current_bucket);
- } else {
- DCHECK(mode == KEEP_EMPTY_BUCKETS);
- bucket = LoadBucket(current_bucket);
- if (bucket != nullptr) {
- ClearBucket(bucket, 0, kCellsPerBucket);
- }
- }
- current_bucket++;
- }
- // All buckets between start_bucket and end_bucket are cleared.
- DCHECK(current_bucket == end_bucket);
- if (current_bucket == buckets) return;
- bucket = LoadBucket(current_bucket);
- DCHECK(current_cell <= end_cell);
- if (bucket == nullptr) return;
- while (current_cell < end_cell) {
- bucket->StoreCell(current_cell, 0);
- current_cell++;
- }
- // All cells between start_cell and end_cell are cleared.
- DCHECK(current_bucket == end_bucket && current_cell == end_cell);
- bucket->ClearCellBits(end_cell, ~end_mask);
- }
-
- // The slot offset specifies a slot at address page_start_ + slot_offset.
- bool Lookup(size_t slot_offset) {
- size_t bucket_index;
- int cell_index, bit_index;
- SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
- Bucket* bucket = LoadBucket(bucket_index);
- if (bucket == nullptr) return false;
- return (bucket->LoadCell(cell_index) & (1u << bit_index)) != 0;
+ return static_cast<SlotSet*>(BasicSlotSet::Allocate(buckets));
}
- // Iterate over all slots in the set and for each slot invoke the callback.
- // If the callback returns REMOVE_SLOT then the slot is removed from the set.
- // Returns the new number of slots.
- //
- // Iteration can be performed concurrently with other operations that use
- // atomic access mode such as insertion and removal. However there is no
- // guarantee about ordering and linearizability.
- //
- // Sample usage:
- // Iterate([](MaybeObjectSlot slot) {
- // if (good(slot)) return KEEP_SLOT;
- // else return REMOVE_SLOT;
- // });
- //
- // Releases memory for empty buckets with FREE_EMPTY_BUCKETS.
+ // Similar to BasicSlotSet::Iterate() but Callback takes the parameter of type
+ // MaybeObjectSlot.
template <typename Callback>
size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
Callback callback, EmptyBucketMode mode) {
- return Iterate(chunk_start, start_bucket, end_bucket, callback,
- [this, mode](size_t bucket_index) {
- if (mode == EmptyBucketMode::FREE_EMPTY_BUCKETS) {
- ReleaseBucket(bucket_index);
- }
- });
+ return BasicSlotSet::Iterate(
+ chunk_start, start_bucket, end_bucket,
+ [&callback](Address slot) { return callback(MaybeObjectSlot(slot)); },
+ [this, mode](size_t bucket_index) {
+ if (mode == EmptyBucketMode::FREE_EMPTY_BUCKETS) {
+ ReleaseBucket(bucket_index);
+ }
+ });
}
- // Similar to Iterate but marks potentially empty buckets internally. Stores
- // true in empty_bucket_found in case a potentially empty bucket was found.
- // Assumes that the possibly empty-array was already cleared by
- // CheckPossiblyEmptyBuckets.
+ // Similar to SlotSet::Iterate() but marks potentially empty buckets
+ // internally. Stores true in empty_bucket_found in case a potentially empty
+ // bucket was found. Assumes that the possibly empty-array was already cleared
+ // by CheckPossiblyEmptyBuckets.
template <typename Callback>
size_t IterateAndTrackEmptyBuckets(
Address chunk_start, size_t start_bucket, size_t end_bucket,
Callback callback, PossiblyEmptyBuckets* possibly_empty_buckets) {
- return Iterate(chunk_start, start_bucket, end_bucket, callback,
- [possibly_empty_buckets, end_bucket](size_t bucket_index) {
- possibly_empty_buckets->Insert(bucket_index, end_bucket);
- });
- }
-
- bool FreeEmptyBuckets(size_t buckets) {
- bool empty = true;
- for (size_t bucket_index = 0; bucket_index < buckets; bucket_index++) {
- if (!FreeBucketIfEmpty(bucket_index)) {
- empty = false;
- }
- }
-
- return empty;
+ return BasicSlotSet::Iterate(
+ chunk_start, start_bucket, end_bucket,
+ [&callback](Address slot) { return callback(MaybeObjectSlot(slot)); },
+ [possibly_empty_buckets, end_bucket](size_t bucket_index) {
+ possibly_empty_buckets->Insert(bucket_index, end_bucket);
+ });
}
// Check whether possibly empty buckets are really empty. Empty buckets are
@@ -406,198 +197,6 @@ class SlotSet {
return empty;
}
-
- static const int kCellsPerBucket = 32;
- static const int kCellsPerBucketLog2 = 5;
- static const int kCellSizeBytesLog2 = 2;
- static const int kCellSizeBytes = 1 << kCellSizeBytesLog2;
- static const int kBitsPerCell = 32;
- static const int kBitsPerCellLog2 = 5;
- static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
- static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
- static const int kBucketsRegularPage =
- (1 << kPageSizeBits) / kTaggedSize / kCellsPerBucket / kBitsPerCell;
-
- class Bucket : public Malloced {
- uint32_t cells_[kCellsPerBucket];
-
- public:
- Bucket() {
- for (int i = 0; i < kCellsPerBucket; i++) {
- cells_[i] = 0;
- }
- }
-
- uint32_t* cells() { return cells_; }
- uint32_t* cell(int cell_index) { return cells() + cell_index; }
-
- template <AccessMode access_mode = AccessMode::ATOMIC>
- uint32_t LoadCell(int cell_index) {
- DCHECK_LT(cell_index, kCellsPerBucket);
- if (access_mode == AccessMode::ATOMIC)
- return base::AsAtomic32::Acquire_Load(cells() + cell_index);
- return *(cells() + cell_index);
- }
-
- template <AccessMode access_mode = AccessMode::ATOMIC>
- void SetCellBits(int cell_index, uint32_t mask) {
- if (access_mode == AccessMode::ATOMIC) {
- base::AsAtomic32::SetBits(cell(cell_index), mask, mask);
- } else {
- uint32_t* c = cell(cell_index);
- *c = (*c & ~mask) | mask;
- }
- }
-
- void ClearCellBits(int cell_index, uint32_t mask) {
- base::AsAtomic32::SetBits(cell(cell_index), 0u, mask);
- }
-
- void StoreCell(int cell_index, uint32_t value) {
- base::AsAtomic32::Release_Store(cell(cell_index), value);
- }
-
- bool IsEmpty() {
- for (int i = 0; i < kCellsPerBucket; i++) {
- if (cells_[i] != 0) {
- return false;
- }
- }
- return true;
- }
- };
-
- private:
- template <typename Callback, typename EmptyBucketCallback>
- size_t Iterate(Address chunk_start, size_t start_bucket, size_t end_bucket,
- Callback callback, EmptyBucketCallback empty_bucket_callback) {
- size_t new_count = 0;
- for (size_t bucket_index = start_bucket; bucket_index < end_bucket;
- bucket_index++) {
- Bucket* bucket = LoadBucket(bucket_index);
- if (bucket != nullptr) {
- size_t in_bucket_count = 0;
- size_t cell_offset = bucket_index << kBitsPerBucketLog2;
- for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
- uint32_t cell = bucket->LoadCell(i);
- if (cell) {
- uint32_t old_cell = cell;
- uint32_t mask = 0;
- while (cell) {
- int bit_offset = base::bits::CountTrailingZeros(cell);
- uint32_t bit_mask = 1u << bit_offset;
- Address slot = (cell_offset + bit_offset) << kTaggedSizeLog2;
- if (callback(MaybeObjectSlot(chunk_start + slot)) == KEEP_SLOT) {
- ++in_bucket_count;
- } else {
- mask |= bit_mask;
- }
- cell ^= bit_mask;
- }
- uint32_t new_cell = old_cell & ~mask;
- if (old_cell != new_cell) {
- bucket->ClearCellBits(i, mask);
- }
- }
- }
- if (in_bucket_count == 0) {
- empty_bucket_callback(bucket_index);
- }
- new_count += in_bucket_count;
- }
- }
- return new_count;
- }
-
- bool FreeBucketIfEmpty(size_t bucket_index) {
- Bucket* bucket = LoadBucket<AccessMode::NON_ATOMIC>(bucket_index);
- if (bucket != nullptr) {
- if (bucket->IsEmpty()) {
- ReleaseBucket<AccessMode::NON_ATOMIC>(bucket_index);
- } else {
- return false;
- }
- }
-
- return true;
- }
-
- void ClearBucket(Bucket* bucket, int start_cell, int end_cell) {
- DCHECK_GE(start_cell, 0);
- DCHECK_LE(end_cell, kCellsPerBucket);
- int current_cell = start_cell;
- while (current_cell < kCellsPerBucket) {
- bucket->StoreCell(current_cell, 0);
- current_cell++;
- }
- }
-
- template <AccessMode access_mode = AccessMode::ATOMIC>
- void ReleaseBucket(size_t bucket_index) {
- Bucket* bucket = LoadBucket<access_mode>(bucket_index);
- StoreBucket<access_mode>(bucket_index, nullptr);
- delete bucket;
- }
-
- template <AccessMode access_mode = AccessMode::ATOMIC>
- Bucket* LoadBucket(Bucket** bucket) {
- if (access_mode == AccessMode::ATOMIC)
- return base::AsAtomicPointer::Acquire_Load(bucket);
- return *bucket;
- }
-
- template <AccessMode access_mode = AccessMode::ATOMIC>
- Bucket* LoadBucket(size_t bucket_index) {
- return LoadBucket(bucket(bucket_index));
- }
-
- template <AccessMode access_mode = AccessMode::ATOMIC>
- void StoreBucket(Bucket** bucket, Bucket* value) {
- if (access_mode == AccessMode::ATOMIC) {
- base::AsAtomicPointer::Release_Store(bucket, value);
- } else {
- *bucket = value;
- }
- }
-
- template <AccessMode access_mode = AccessMode::ATOMIC>
- void StoreBucket(size_t bucket_index, Bucket* value) {
- StoreBucket(bucket(bucket_index), value);
- }
-
- template <AccessMode access_mode = AccessMode::ATOMIC>
- bool SwapInNewBucket(size_t bucket_index, Bucket* value) {
- Bucket** b = bucket(bucket_index);
- if (access_mode == AccessMode::ATOMIC) {
- return base::AsAtomicPointer::Release_CompareAndSwap(b, nullptr, value) ==
- nullptr;
- } else {
- DCHECK_NULL(*b);
- *b = value;
- return true;
- }
- }
-
- // Converts the slot offset into bucket/cell/bit index.
- static void SlotToIndices(size_t slot_offset, size_t* bucket_index,
- int* cell_index, int* bit_index) {
- DCHECK(IsAligned(slot_offset, kTaggedSize));
- size_t slot = slot_offset >> kTaggedSizeLog2;
- *bucket_index = slot >> kBitsPerBucketLog2;
- *cell_index =
- static_cast<int>((slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1));
- *bit_index = static_cast<int>(slot & (kBitsPerCell - 1));
- }
-
- Bucket** buckets() { return reinterpret_cast<Bucket**>(this); }
- Bucket** bucket(size_t bucket_index) { return buckets() + bucket_index; }
-
-#ifdef DEBUG
- size_t* initial_buckets() { return reinterpret_cast<size_t*>(this) - 1; }
- static const int kInitialBucketsSize = sizeof(size_t);
-#else
- static const int kInitialBucketsSize = 0;
-#endif
};
static_assert(std::is_standard_layout<SlotSet>::value);
@@ -614,11 +213,6 @@ enum class SlotType : uint8_t {
// accessing. Used when pointer is stored in the instruction stream.
kEmbeddedObjectCompressed,
- // Full pointer sized slot storing an object start address.
- // RelocInfo::target_object/RelocInfo::set_target_object methods are used for
- // accessing. Used when pointer is stored in the instruction stream.
- kEmbeddedObjectData,
-
// Full pointer sized slot storing instruction start of Code object.
// RelocInfo::target_address/RelocInfo::set_target_address methods are used
// for accessing. Used when pointer is stored in the instruction stream.
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 4397ad5ba2..9986f84f1b 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -149,6 +149,7 @@ MemoryChunk* OldGenerationMemoryChunkIterator::next() {
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
Address current_top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(current_top, alignment);
int aligned_size = filler_size + size_in_bytes;
@@ -164,6 +165,7 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
AllocationResult LocalAllocationBuffer::AllocateRawUnaligned(
int size_in_bytes) {
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
return allocation_info_.CanIncrementTop(size_in_bytes)
? AllocationResult::FromObject(HeapObject::FromAddress(
allocation_info_.IncrementTop(size_in_bytes)))
@@ -214,6 +216,7 @@ MemoryChunk* MemoryChunkIterator::Next() {
AllocationResult SpaceWithLinearArea::AllocateFastUnaligned(
int size_in_bytes, AllocationOrigin origin) {
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
if (!allocation_info_.CanIncrementTop(size_in_bytes)) {
return AllocationResult::Failure();
}
@@ -253,6 +256,7 @@ AllocationResult SpaceWithLinearArea::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
DCHECK(!v8_flags.enable_third_party_heap);
+ size_in_bytes = ALIGN_TO_ALLOCATION_ALIGNMENT(size_in_bytes);
AllocationResult result;
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index cb80998276..a29cb88d5a 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -174,7 +174,7 @@ void Page::CreateBlackArea(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
- MarkingState* marking_state = heap()->incremental_marking()->marking_state();
+ MarkingState* marking_state = heap()->marking_state();
marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
marking_state->IncrementLiveBytes(this, static_cast<intptr_t>(end - start));
@@ -186,8 +186,7 @@ void Page::CreateBlackAreaBackground(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
- AtomicMarkingState* marking_state =
- heap()->incremental_marking()->atomic_marking_state();
+ AtomicMarkingState* marking_state = heap()->atomic_marking_state();
marking_state->bitmap(this)->SetRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
heap()->incremental_marking()->IncrementLiveBytesBackground(
@@ -200,7 +199,7 @@ void Page::DestroyBlackArea(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
- MarkingState* marking_state = heap()->incremental_marking()->marking_state();
+ MarkingState* marking_state = heap()->marking_state();
marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
@@ -212,8 +211,7 @@ void Page::DestroyBlackAreaBackground(Address start, Address end) {
DCHECK_EQ(Page::FromAddress(start), this);
DCHECK_LT(start, end);
DCHECK_EQ(Page::FromAddress(end - 1), this);
- AtomicMarkingState* marking_state =
- heap()->incremental_marking()->atomic_marking_state();
+ AtomicMarkingState* marking_state = heap()->atomic_marking_state();
marking_state->bitmap(this)->ClearRange(AddressToMarkbitIndex(start),
AddressToMarkbitIndex(end));
heap()->incremental_marking()->IncrementLiveBytesBackground(
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 1d60095de3..48b8c9fc41 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -153,6 +153,8 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
virtual int RoundSizeDownToObjectAlignment(int size) const {
if (id_ == CODE_SPACE) {
return RoundDown(size, kCodeAlignment);
+ } else if (V8_COMPRESS_POINTERS_8GB_BOOL) {
+ return RoundDown(size, kObjectAlignment8GbHeap);
} else {
return RoundDown(size, kTaggedSize);
}
@@ -182,7 +184,9 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
return memory_chunk_list_.back();
}
- heap::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
+ virtual heap::List<MemoryChunk>& memory_chunk_list() {
+ return memory_chunk_list_;
+ }
virtual Page* InitializePage(MemoryChunk* chunk) { UNREACHABLE(); }
@@ -299,7 +303,7 @@ class Page : public MemoryChunk {
return categories_[type];
}
- size_t ShrinkToHighWaterMark();
+ V8_EXPORT_PRIVATE size_t ShrinkToHighWaterMark();
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
V8_EXPORT_PRIVATE void CreateBlackAreaBackground(Address start, Address end);
@@ -346,7 +350,11 @@ static_assert(sizeof(Page) <= MemoryChunk::kHeaderSize);
class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
public:
- virtual ~ObjectIterator() = default;
+ // Note: The destructor can not be marked as `= default` as this causes
+ // the compiler on C++20 to define it as `constexpr` resulting in the
+ // compiler producing warnings about undefined inlines for Next()
+ // on classes inheriting from it.
+ virtual ~ObjectIterator() {}
virtual HeapObject Next() = 0;
};
diff --git a/deps/v8/src/heap/stress-scavenge-observer.cc b/deps/v8/src/heap/stress-scavenge-observer.cc
index 9515f6bb80..4c72416e8c 100644
--- a/deps/v8/src/heap/stress-scavenge-observer.cc
+++ b/deps/v8/src/heap/stress-scavenge-observer.cc
@@ -62,8 +62,10 @@ bool StressScavengeObserver::HasRequestedGC() const {
}
void StressScavengeObserver::RequestedGCDone() {
+ size_t new_space_size = heap_->new_space()->Size();
double current_percent =
- heap_->new_space()->Size() * 100.0 / heap_->new_space()->Capacity();
+ new_space_size ? new_space_size * 100.0 / heap_->new_space()->Capacity()
+ : 0;
limit_percentage_ = NextLimit(static_cast<int>(current_percent));
if (v8_flags.trace_stress_scavenge) {
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index a910dddcbe..24ad9beeea 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -10,6 +10,7 @@
#include "src/base/logging.h"
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
+#include "src/flags/flags.h"
#include "src/heap/base/active-system-pages.h"
#include "src/heap/code-object-registry.h"
#include "src/heap/free-list-inl.h"
@@ -19,21 +20,20 @@
#include "src/heap/mark-compact-inl.h"
#include "src/heap/new-spaces.h"
#include "src/heap/paged-spaces.h"
+#include "src/heap/pretenuring-handler-inl.h"
+#include "src/heap/pretenuring-handler.h"
#include "src/heap/remembered-set.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
-namespace {
-static const int kInitialLocalPretenuringFeedbackCapacity = 256;
-} // namespace
-
class Sweeper::ConcurrentSweeper final {
public:
explicit ConcurrentSweeper(Sweeper* sweeper)
: sweeper_(sweeper),
- local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity) {}
+ local_pretenuring_feedback_(
+ PretenturingHandler::kInitialFeedbackCapacity) {}
bool ConcurrentSweepSpace(AllocationSpace identity, JobDelegate* delegate) {
while (!delegate->ShouldYield()) {
@@ -45,13 +45,13 @@ class Sweeper::ConcurrentSweeper final {
return false;
}
- Heap::PretenuringFeedbackMap* local_pretenuring_feedback() {
+ PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback() {
return &local_pretenuring_feedback_;
}
private:
Sweeper* const sweeper_;
- Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
+ PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
};
class Sweeper::SweeperJob final : public JobTask {
@@ -69,12 +69,20 @@ class Sweeper::SweeperJob final : public JobTask {
void Run(JobDelegate* delegate) final {
RwxMemoryWriteScope::SetDefaultPermissionsForNewThread();
+ DCHECK(sweeper_->current_collector_.has_value());
if (delegate->IsJoiningThread()) {
- TRACE_GC(tracer_, GCTracer::Scope::MC_SWEEP);
+ TRACE_GC(tracer_, sweeper_->current_collector_ ==
+ GarbageCollector::MINOR_MARK_COMPACTOR
+ ? GCTracer::Scope::MINOR_MC_SWEEP
+ : GCTracer::Scope::MC_SWEEP);
RunImpl(delegate);
} else {
- TRACE_GC_EPOCH(tracer_, GCTracer::Scope::MC_BACKGROUND_SWEEPING,
- ThreadKind::kBackground);
+ TRACE_GC_EPOCH(
+ tracer_,
+ sweeper_->current_collector_ == GarbageCollector::MINOR_MARK_COMPACTOR
+ ? GCTracer::Scope::MINOR_MC_BACKGROUND_SWEEPING
+ : GCTracer::Scope::MC_BACKGROUND_SWEEPING,
+ ThreadKind::kBackground);
RunImpl(delegate);
}
}
@@ -106,12 +114,14 @@ class Sweeper::SweeperJob final : public JobTask {
GCTracer* const tracer_;
};
-Sweeper::Sweeper(Heap* heap, NonAtomicMarkingState* marking_state)
+Sweeper::Sweeper(Heap* heap)
: heap_(heap),
- marking_state_(marking_state),
+ marking_state_(heap_->non_atomic_marking_state()),
sweeping_in_progress_(false),
should_reduce_memory_(false),
- local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity) {}
+ pretenuring_handler_(heap_->pretenuring_handler()),
+ local_pretenuring_feedback_(
+ PretenturingHandler::kInitialFeedbackCapacity) {}
Sweeper::~Sweeper() {
DCHECK(concurrent_sweepers_.empty());
@@ -160,9 +170,10 @@ void Sweeper::TearDown() {
if (job_handle_ && job_handle_->IsValid()) job_handle_->Cancel();
}
-void Sweeper::StartSweeping() {
+void Sweeper::StartSweeping(GarbageCollector collector) {
DCHECK(local_pretenuring_feedback_.empty());
sweeping_in_progress_ = true;
+ current_collector_ = collector;
should_reduce_memory_ = heap_->ShouldReduceMemory();
ForAllSweepingSpaces([this](AllocationSpace space) {
// Sorting is done in order to make compaction more efficient: by sweeping
@@ -188,6 +199,7 @@ int Sweeper::NumberOfConcurrentSweepers() const {
}
void Sweeper::StartSweeperTasks() {
+ DCHECK(current_collector_.has_value());
DCHECK(!job_handle_ || !job_handle_->IsValid());
if (v8_flags.concurrent_sweeping && sweeping_in_progress_ &&
!heap_->delay_sweeper_tasks_for_testing_) {
@@ -230,14 +242,16 @@ void Sweeper::EnsureCompleted(SweepingMode sweeping_mode) {
CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
});
- heap_->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
+ pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
+ local_pretenuring_feedback_);
for (ConcurrentSweeper& concurrent_sweeper : concurrent_sweepers_) {
- heap_->MergeAllocationSitePretenuringFeedback(
+ pretenuring_handler_->MergeAllocationSitePretenuringFeedback(
*concurrent_sweeper.local_pretenuring_feedback());
}
local_pretenuring_feedback_.clear();
concurrent_sweepers_.clear();
+ current_collector_.reset();
sweeping_in_progress_ = false;
}
@@ -246,14 +260,6 @@ void Sweeper::DrainSweepingWorklistForSpace(AllocationSpace space) {
ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
}
-void Sweeper::SupportConcurrentSweeping() {
- ForAllSweepingSpaces([this](AllocationSpace space) {
- const int kMaxPagesToSweepPerSpace = 1;
- ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0,
- kMaxPagesToSweepPerSpace);
- });
-}
-
bool Sweeper::AreSweeperTasksRunning() {
return job_handle_ && job_handle_->IsValid() && job_handle_->IsActive();
}
@@ -268,8 +274,8 @@ V8_INLINE size_t Sweeper::FreeAndProcessFreedMemory(
ZapCode(free_start, size);
}
page->heap()->CreateFillerObjectAtSweeper(free_start, static_cast<int>(size));
- freed_bytes =
- reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(free_start, size);
+ freed_bytes = reinterpret_cast<PagedSpaceBase*>(space)->UnaccountedFree(
+ free_start, size);
if (should_reduce_memory_) page->DiscardUnusedMemory(free_start, size);
return freed_bytes;
@@ -349,11 +355,11 @@ void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(Page* page,
int Sweeper::RawSweep(
Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
SweepingMode sweeping_mode, const base::MutexGuard& page_guard,
- Heap::PretenuringFeedbackMap* local_pretenuring_feedback) {
+ PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback) {
Space* space = p->owner();
DCHECK_NOT_NULL(space);
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
- space->identity() == MAP_SPACE ||
+ space->identity() == MAP_SPACE || space->identity() == SHARED_SPACE ||
(space->identity() == NEW_SPACE && v8_flags.minor_mc));
DCHECK_IMPLIES(space->identity() == NEW_SPACE,
sweeping_mode == SweepingMode::kEagerDuringGC);
@@ -434,12 +440,13 @@ int Sweeper::RawSweep(
}
Map map = object.map(cage_base, kAcquireLoad);
DCHECK(MarkCompactCollector::IsMapOrForwarded(map));
- int size = object.SizeFromMap(map);
+ int size = ALIGN_TO_ALLOCATION_ALIGNMENT(object.SizeFromMap(map));
live_bytes += size;
free_start = free_end + size;
if (p->InYoungGeneration()) {
- heap_->UpdateAllocationSite(map, object, local_pretenuring_feedback);
+ pretenuring_handler_->UpdateAllocationSite(map, object,
+ local_pretenuring_feedback);
}
if (active_system_pages_after_sweeping) {
@@ -519,7 +526,7 @@ int Sweeper::ParallelSweepSpace(AllocationSpace identity,
int Sweeper::ParallelSweepPage(
Page* page, AllocationSpace identity,
- Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
+ PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
SweepingMode sweeping_mode) {
DCHECK(IsValidSweepingSpace(identity));
@@ -572,7 +579,7 @@ void Sweeper::EnsurePageIsSwept(Page* page) {
}
}
} else {
- DCHECK(page->InNewSpace());
+ DCHECK(page->InNewSpace() && !v8_flags.minor_mc);
}
CHECK(page->SweepingDone());
@@ -592,6 +599,21 @@ bool Sweeper::TryRemoveSweepingPageSafe(AllocationSpace space, Page* page) {
void Sweeper::AddPage(AllocationSpace space, Page* page,
Sweeper::AddPageMode mode) {
+ DCHECK_NE(NEW_SPACE, space);
+ AddPageImpl(space, page, mode);
+}
+
+void Sweeper::AddNewSpacePage(Page* page) {
+ DCHECK_EQ(NEW_SPACE, page->owner_identity());
+ size_t live_bytes = marking_state_->live_bytes(page);
+ heap_->IncrementNewSpaceSurvivingObjectSize(live_bytes);
+ heap_->IncrementYoungSurvivorsCounter(live_bytes);
+ page->ClearWasUsedForAllocation();
+ AddPageImpl(NEW_SPACE, page, AddPageMode::REGULAR);
+}
+
+void Sweeper::AddPageImpl(AllocationSpace space, Page* page,
+ Sweeper::AddPageMode mode) {
base::MutexGuard guard(&mutex_);
DCHECK(IsValidSweepingSpace(space));
DCHECK(!v8_flags.concurrent_sweeping || !job_handle_ ||
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index 6b747547db..aa40f0b546 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -8,11 +8,12 @@
#include <map>
#include <vector>
+#include "src/base/optional.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
-#include "src/heap/heap.h"
+#include "src/heap/pretenuring-handler.h"
#include "src/heap/slot-set.h"
#include "src/tasks/cancelable-task.h"
@@ -76,7 +77,7 @@ class Sweeper {
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
enum class SweepingMode { kEagerDuringGC, kLazyOrConcurrent };
- Sweeper(Heap* heap, NonAtomicMarkingState* marking_state);
+ Sweeper(Heap* heap);
~Sweeper();
bool sweeping_in_progress() const { return sweeping_in_progress_; }
@@ -84,38 +85,39 @@ class Sweeper {
void TearDown();
void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
+ void AddNewSpacePage(Page* page);
int ParallelSweepSpace(AllocationSpace identity, SweepingMode sweeping_mode,
int required_freed_bytes, int max_pages = 0);
int ParallelSweepPage(
Page* page, AllocationSpace identity,
- Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
+ PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback,
SweepingMode sweeping_mode);
void EnsurePageIsSwept(Page* page);
- int RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
- SweepingMode sweeping_mode, const base::MutexGuard& page_guard,
- Heap::PretenuringFeedbackMap* local_pretenuring_feedback);
+ int RawSweep(
+ Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
+ SweepingMode sweeping_mode, const base::MutexGuard& page_guard,
+ PretenturingHandler::PretenuringFeedbackMap* local_pretenuring_feedback);
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks
// are not running yet.
- void StartSweeping();
+ void StartSweeping(GarbageCollector collector);
V8_EXPORT_PRIVATE void StartSweeperTasks();
void EnsureCompleted(
SweepingMode sweeping_mode = SweepingMode::kLazyOrConcurrent);
void DrainSweepingWorklistForSpace(AllocationSpace space);
bool AreSweeperTasksRunning();
- // Support concurrent sweepers from main thread
- void SupportConcurrentSweeping();
-
Page* GetSweptPageSafe(PagedSpaceBase* space);
+ private:
NonAtomicMarkingState* marking_state() const { return marking_state_; }
- private:
+ void AddPageImpl(AllocationSpace space, Page* page, AddPageMode mode);
+
class ConcurrentSweeper;
class SweeperJob;
@@ -131,6 +133,7 @@ class Sweeper {
callback(OLD_SPACE);
callback(CODE_SPACE);
callback(MAP_SPACE);
+ callback(SHARED_SPACE);
}
// Helper function for RawSweep. Depending on the FreeListRebuildingMode and
@@ -187,7 +190,7 @@ class Sweeper {
int NumberOfConcurrentSweepers() const;
Heap* const heap_;
- NonAtomicMarkingState* marking_state_;
+ NonAtomicMarkingState* const marking_state_;
std::unique_ptr<JobHandle> job_handle_;
base::Mutex mutex_;
base::ConditionVariable cv_page_swept_;
@@ -198,7 +201,9 @@ class Sweeper {
// path checks this flag to see whether it could support concurrent sweeping.
std::atomic<bool> sweeping_in_progress_;
bool should_reduce_memory_;
- Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
+ PretenturingHandler* const pretenuring_handler_;
+ PretenturingHandler::PretenuringFeedbackMap local_pretenuring_feedback_;
+ base::Optional<GarbageCollector> current_collector_;
};
} // namespace internal