summaryrefslogtreecommitdiff
path: root/chromium/v8/src/heap
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2021-09-03 13:32:17 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2021-10-01 14:31:55 +0200
commit21ba0c5d4bf8fba15dddd97cd693bad2358b77fd (patch)
tree91be119f694044dfc1ff9fdc054459e925de9df0 /chromium/v8/src/heap
parent03c549e0392f92c02536d3f86d5e1d8dfa3435ac (diff)
downloadqtwebengine-chromium-21ba0c5d4bf8fba15dddd97cd693bad2358b77fd.tar.gz
BASELINE: Update Chromium to 92.0.4515.166
Change-Id: I42a050486714e9e54fc271f2a8939223a02ae364
Diffstat (limited to 'chromium/v8/src/heap')
-rw-r--r--chromium/v8/src/heap/array-buffer-sweeper.cc55
-rw-r--r--chromium/v8/src/heap/array-buffer-sweeper.h9
-rw-r--r--chromium/v8/src/heap/base/asm/riscv64/push_registers_asm.cc34
-rw-r--r--chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc16
-rw-r--r--chromium/v8/src/heap/base/asm/x64/push_registers_masm.S18
-rw-r--r--chromium/v8/src/heap/base/stack.cc17
-rw-r--r--chromium/v8/src/heap/base/stack.h14
-rw-r--r--chromium/v8/src/heap/basic-memory-chunk.h7
-rw-r--r--chromium/v8/src/heap/code-range.cc172
-rw-r--r--chromium/v8/src/heap/code-range.h147
-rw-r--r--chromium/v8/src/heap/collection-barrier.cc3
-rw-r--r--chromium/v8/src/heap/combined-heap.h3
-rw-r--r--chromium/v8/src/heap/concurrent-allocator-inl.h1
-rw-r--r--chromium/v8/src/heap/concurrent-marking.cc31
-rw-r--r--chromium/v8/src/heap/cppgc-js/cpp-heap.cc115
-rw-r--r--chromium/v8/src/heap/cppgc-js/cpp-heap.h9
-rw-r--r--chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc10
-rw-r--r--chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc11
-rw-r--r--chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h2
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap.cc23
-rw-r--r--chromium/v8/src/heap/cppgc/compactor.cc21
-rw-r--r--chromium/v8/src/heap/cppgc/concurrent-marker.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/explicit-management.cc67
-rw-r--r--chromium/v8/src/heap/cppgc/free-list.cc12
-rw-r--r--chromium/v8/src/heap/cppgc/gc-info-table.cc5
-rw-r--r--chromium/v8/src/heap/cppgc/heap-base.cc32
-rw-r--r--chromium/v8/src/heap/cppgc/heap-base.h30
-rw-r--r--chromium/v8/src/heap/cppgc/heap-object-header.cc8
-rw-r--r--chromium/v8/src/heap/cppgc/heap-object-header.h43
-rw-r--r--chromium/v8/src/heap/cppgc/heap-page.cc5
-rw-r--r--chromium/v8/src/heap/cppgc/heap-page.h22
-rw-r--r--chromium/v8/src/heap/cppgc/heap-statistics-collector.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/heap.cc4
-rw-r--r--chromium/v8/src/heap/cppgc/liveness-broker.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/marker.cc42
-rw-r--r--chromium/v8/src/heap/cppgc/marking-state.h41
-rw-r--r--chromium/v8/src/heap/cppgc/marking-verifier.cc61
-rw-r--r--chromium/v8/src/heap/cppgc/marking-verifier.h14
-rw-r--r--chromium/v8/src/heap/cppgc/marking-visitor.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/memory.cc22
-rw-r--r--chromium/v8/src/heap/cppgc/memory.h76
-rw-r--r--chromium/v8/src/heap/cppgc/metric-recorder.h2
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator.h10
-rw-r--r--chromium/v8/src/heap/cppgc/object-poisoner.h4
-rw-r--r--chromium/v8/src/heap/cppgc/object-size-trait.cc7
-rw-r--r--chromium/v8/src/heap/cppgc/object-view.h54
-rw-r--r--chromium/v8/src/heap/cppgc/page-memory.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/pointer-policies.cc59
-rw-r--r--chromium/v8/src/heap/cppgc/process-heap-statistics.h2
-rw-r--r--chromium/v8/src/heap/cppgc/process-heap.cc56
-rw-r--r--chromium/v8/src/heap/cppgc/process-heap.h35
-rw-r--r--chromium/v8/src/heap/cppgc/sanitizers.h82
-rw-r--r--chromium/v8/src/heap/cppgc/stats-collector.h4
-rw-r--r--chromium/v8/src/heap/cppgc/sweeper.cc81
-rw-r--r--chromium/v8/src/heap/cppgc/sweeper.h4
-rw-r--r--chromium/v8/src/heap/cppgc/trace-trait.cc7
-rw-r--r--chromium/v8/src/heap/cppgc/visitor.cc21
-rw-r--r--chromium/v8/src/heap/embedder-tracing.cc34
-rw-r--r--chromium/v8/src/heap/embedder-tracing.h42
-rw-r--r--chromium/v8/src/heap/factory-base-inl.h26
-rw-r--r--chromium/v8/src/heap/factory-base.cc121
-rw-r--r--chromium/v8/src/heap/factory-base.h9
-rw-r--r--chromium/v8/src/heap/factory.cc248
-rw-r--r--chromium/v8/src/heap/factory.h19
-rw-r--r--chromium/v8/src/heap/finalization-registry-cleanup-task.cc15
-rw-r--r--chromium/v8/src/heap/free-list.cc21
-rw-r--r--chromium/v8/src/heap/gc-tracer.cc32
-rw-r--r--chromium/v8/src/heap/gc-tracer.h6
-rw-r--r--chromium/v8/src/heap/heap-inl.h89
-rw-r--r--chromium/v8/src/heap/heap-write-barrier.cc11
-rw-r--r--chromium/v8/src/heap/heap.cc553
-rw-r--r--chromium/v8/src/heap/heap.h127
-rw-r--r--chromium/v8/src/heap/incremental-marking.cc43
-rw-r--r--chromium/v8/src/heap/large-spaces.cc6
-rw-r--r--chromium/v8/src/heap/large-spaces.h1
-rw-r--r--chromium/v8/src/heap/local-allocator.h2
-rw-r--r--chromium/v8/src/heap/local-heap-inl.h2
-rw-r--r--chromium/v8/src/heap/local-heap.cc2
-rw-r--r--chromium/v8/src/heap/mark-compact-inl.h8
-rw-r--r--chromium/v8/src/heap/mark-compact.cc164
-rw-r--r--chromium/v8/src/heap/mark-compact.h7
-rw-r--r--chromium/v8/src/heap/marking-barrier-inl.h10
-rw-r--r--chromium/v8/src/heap/marking-visitor-inl.h6
-rw-r--r--chromium/v8/src/heap/marking-visitor.h9
-rw-r--r--chromium/v8/src/heap/memory-allocator.cc112
-rw-r--r--chromium/v8/src/heap/memory-allocator.h68
-rw-r--r--chromium/v8/src/heap/memory-chunk.cc3
-rw-r--r--chromium/v8/src/heap/memory-measurement.cc2
-rw-r--r--chromium/v8/src/heap/new-spaces-inl.h8
-rw-r--r--chromium/v8/src/heap/new-spaces.cc9
-rw-r--r--chromium/v8/src/heap/object-stats.cc2
-rw-r--r--chromium/v8/src/heap/objects-visiting-inl.h7
-rw-r--r--chromium/v8/src/heap/objects-visiting.h7
-rw-r--r--chromium/v8/src/heap/paged-spaces-inl.h3
-rw-r--r--chromium/v8/src/heap/paged-spaces.cc6
-rw-r--r--chromium/v8/src/heap/read-only-heap.cc4
-rw-r--r--chromium/v8/src/heap/read-only-heap.h5
-rw-r--r--chromium/v8/src/heap/read-only-spaces.cc40
-rw-r--r--chromium/v8/src/heap/read-only-spaces.h3
-rw-r--r--chromium/v8/src/heap/scavenger-inl.h20
-rw-r--r--chromium/v8/src/heap/scavenger.cc18
-rw-r--r--chromium/v8/src/heap/setup-heap-internal.cc32
-rw-r--r--chromium/v8/src/heap/spaces-inl.h3
-rw-r--r--chromium/v8/src/heap/spaces.cc2
-rw-r--r--chromium/v8/src/heap/spaces.h3
-rw-r--r--chromium/v8/src/heap/sweeper.cc6
-rw-r--r--chromium/v8/src/heap/third-party/heap-api-stub.cc6
-rw-r--r--chromium/v8/src/heap/third-party/heap-api.h6
-rw-r--r--chromium/v8/src/heap/weak-object-worklists.cc24
110 files changed, 2529 insertions, 1125 deletions
diff --git a/chromium/v8/src/heap/array-buffer-sweeper.cc b/chromium/v8/src/heap/array-buffer-sweeper.cc
index 8af2a60e9c1..108a3497776 100644
--- a/chromium/v8/src/heap/array-buffer-sweeper.cc
+++ b/chromium/v8/src/heap/array-buffer-sweeper.cc
@@ -101,30 +101,25 @@ void ArrayBufferSweeper::EnsureFinished() {
UNREACHABLE();
}
- DecrementExternalMemoryCounters();
+ UpdateCountersForConcurrentlySweptExtensions();
sweeping_in_progress_ = false;
}
-void ArrayBufferSweeper::AdjustCountersAndMergeIfPossible() {
+void ArrayBufferSweeper::MergeBackExtensionsWhenSwept() {
if (sweeping_in_progress_) {
DCHECK(job_.has_value());
if (job_->state_ == SweepingState::kDone) {
Merge();
sweeping_in_progress_ = false;
} else {
- DecrementExternalMemoryCounters();
+ UpdateCountersForConcurrentlySweptExtensions();
}
}
}
-void ArrayBufferSweeper::DecrementExternalMemoryCounters() {
+void ArrayBufferSweeper::UpdateCountersForConcurrentlySweptExtensions() {
size_t freed_bytes = freed_bytes_.exchange(0, std::memory_order_relaxed);
-
- if (freed_bytes > 0) {
- heap_->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kArrayBuffer, freed_bytes);
- heap_->update_external_memory(-static_cast<int64_t>(freed_bytes));
- }
+ DecrementExternalMemoryCounters(freed_bytes);
}
void ArrayBufferSweeper::RequestSweepYoung() {
@@ -166,7 +161,7 @@ void ArrayBufferSweeper::RequestSweep(SweepingScope scope) {
Prepare(scope);
job_->Sweep();
Merge();
- DecrementExternalMemoryCounters();
+ UpdateCountersForConcurrentlySweptExtensions();
}
}
@@ -228,18 +223,52 @@ void ArrayBufferSweeper::Append(JSArrayBuffer object,
old_bytes_ += bytes;
}
- AdjustCountersAndMergeIfPossible();
- DecrementExternalMemoryCounters();
+ MergeBackExtensionsWhenSwept();
IncrementExternalMemoryCounters(bytes);
}
+void ArrayBufferSweeper::Detach(JSArrayBuffer object,
+ ArrayBufferExtension* extension) {
+ size_t bytes = extension->ClearAccountingLength();
+
+ // We cannot free the extension eagerly here, since extensions are tracked in
+ // a singly linked list. The next GC will remove it automatically.
+
+ if (!sweeping_in_progress_) {
+ // If concurrent sweeping isn't running at the moment, we can also adjust
+ // young_bytes_ or old_bytes_ right away.
+ if (Heap::InYoungGeneration(object)) {
+ DCHECK_GE(young_bytes_, bytes);
+ young_bytes_ -= bytes;
+ young_.bytes_ -= bytes;
+ } else {
+ DCHECK_GE(old_bytes_, bytes);
+ old_bytes_ -= bytes;
+ old_.bytes_ -= bytes;
+ }
+ }
+
+ MergeBackExtensionsWhenSwept();
+ DecrementExternalMemoryCounters(bytes);
+}
+
void ArrayBufferSweeper::IncrementExternalMemoryCounters(size_t bytes) {
+ if (bytes == 0) return;
heap_->IncrementExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer, bytes);
reinterpret_cast<v8::Isolate*>(heap_->isolate())
->AdjustAmountOfExternalAllocatedMemory(static_cast<int64_t>(bytes));
}
+void ArrayBufferSweeper::DecrementExternalMemoryCounters(size_t bytes) {
+ if (bytes == 0) return;
+ heap_->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, bytes);
+ // Unlike IncrementExternalMemoryCounters we don't use
+ // AdjustAmountOfExternalAllocatedMemory such that we never start a GC here.
+ heap_->update_external_memory(-static_cast<int64_t>(bytes));
+}
+
void ArrayBufferSweeper::IncrementFreedBytes(size_t bytes) {
if (bytes == 0) return;
freed_bytes_.fetch_add(bytes, std::memory_order_relaxed);
diff --git a/chromium/v8/src/heap/array-buffer-sweeper.h b/chromium/v8/src/heap/array-buffer-sweeper.h
index 0c15428b754..6dd7ed97f6c 100644
--- a/chromium/v8/src/heap/array-buffer-sweeper.h
+++ b/chromium/v8/src/heap/array-buffer-sweeper.h
@@ -59,8 +59,12 @@ class ArrayBufferSweeper {
void RequestSweepYoung();
void RequestSweepFull();
+ // Track the given ArrayBufferExtension for the given JSArrayBuffer.
void Append(JSArrayBuffer object, ArrayBufferExtension* extension);
+ // Detaches an ArrayBufferExtension from a JSArrayBuffer.
+ void Detach(JSArrayBuffer object, ArrayBufferExtension* extension);
+
ArrayBufferList young() { return young_; }
ArrayBufferList old() { return old_; }
@@ -98,10 +102,11 @@ class ArrayBufferSweeper {
base::Optional<SweepingJob> job_;
void Merge();
- void AdjustCountersAndMergeIfPossible();
+ void MergeBackExtensionsWhenSwept();
- void DecrementExternalMemoryCounters();
+ void UpdateCountersForConcurrentlySweptExtensions();
void IncrementExternalMemoryCounters(size_t bytes);
+ void DecrementExternalMemoryCounters(size_t bytes);
void IncrementFreedBytes(size_t bytes);
void RequestSweep(SweepingScope sweeping_task);
diff --git a/chromium/v8/src/heap/base/asm/riscv64/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/riscv64/push_registers_asm.cc
index 1de4055a283..2d90aab1829 100644
--- a/chromium/v8/src/heap/base/asm/riscv64/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/riscv64/push_registers_asm.cc
@@ -5,21 +5,26 @@
// Push all callee-saved registers to get them on the stack for conservative
// stack scanning.
//
-// See asm/x64/push_registers_clang.cc for why the function is not generated
+// See asm/x64/push_registers_asm.cc for why the function is not generated
// using clang.
//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
+// Calling convention source:
+// https://riscv.org/wp-content/uploads/2015/01/riscv-calling.pdf Table 18.2
asm(".global PushAllRegistersAndIterateStack \n"
".type PushAllRegistersAndIterateStack, %function \n"
".hidden PushAllRegistersAndIterateStack \n"
"PushAllRegistersAndIterateStack: \n"
// Push all callee-saved registers and save return address.
- " addi sp, sp, -96 \n"
- " sd ra, 88(sp) \n"
- " sd s8, 80(sp) \n"
- " sd sp, 72(sp) \n"
- " sd gp, 64(sp) \n"
+ " addi sp, sp, -112 \n"
+ // Save return address.
+ " sd ra, 104(sp) \n"
+ // sp is callee-saved.
+ " sd sp, 96(sp) \n"
+ // s0-s11 are callee-saved.
+ " sd s11, 88(sp) \n"
+ " sd s10, 80(sp) \n"
+ " sd s9, 72(sp) \n"
+ " sd s8, 64(sp) \n"
" sd s7, 56(sp) \n"
" sd s6, 48(sp) \n"
" sd s5, 40(sp) \n"
@@ -28,18 +33,19 @@ asm(".global PushAllRegistersAndIterateStack \n"
" sd s2, 16(sp) \n"
" sd s1, 8(sp) \n"
" sd s0, 0(sp) \n"
- // Maintain frame pointer.
- " mv s8, sp \n"
+ // Maintain frame pointer(fp is s0).
+ " mv s0, sp \n"
// Pass 1st parameter (a0) unchanged (Stack*).
// Pass 2nd parameter (a1) unchanged (StackVisitor*).
- // Save 3rd parameter (a2; IterateStackCallback).
+ // Save 3rd parameter (a2; IterateStackCallback) to a3.
" mv a3, a2 \n"
+ // Pass 3rd parameter as sp (stack pointer).
" mv a2, sp \n"
// Call the callback.
" jalr a3 \n"
// Load return address.
- " ld ra, 88(sp) \n"
+ " ld ra, 104(sp) \n"
// Restore frame pointer.
- " ld s8, 80(sp) \n"
- " addi sp, sp, 96 \n"
+ " ld s0, 0(sp) \n"
+ " addi sp, sp, 112 \n"
" jr ra \n");
diff --git a/chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc
index 68f7918c93c..9780b877b8c 100644
--- a/chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc
@@ -19,7 +19,7 @@
#ifdef _WIN64
// We maintain 16-byte alignment at calls. There is an 8-byte return address
-// on the stack and we push 72 bytes which maintains 16-byte stack alignment
+// on the stack and we push 232 bytes which maintains 16-byte stack alignment
// at the call.
// Source: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
asm(".globl PushAllRegistersAndIterateStack \n"
@@ -36,6 +36,18 @@ asm(".globl PushAllRegistersAndIterateStack \n"
" push %r13 \n"
" push %r14 \n"
" push %r15 \n"
+ " sub $160, %rsp \n"
+ // Use aligned instrs as we are certain that the stack is properly aligned.
+ " movdqa %xmm6, 144(%rsp) \n"
+ " movdqa %xmm7, 128(%rsp) \n"
+ " movdqa %xmm8, 112(%rsp) \n"
+ " movdqa %xmm9, 96(%rsp) \n"
+ " movdqa %xmm10, 80(%rsp) \n"
+ " movdqa %xmm11, 64(%rsp) \n"
+ " movdqa %xmm12, 48(%rsp) \n"
+ " movdqa %xmm13, 32(%rsp) \n"
+ " movdqa %xmm14, 16(%rsp) \n"
+ " movdqa %xmm15, (%rsp) \n"
// Pass 1st parameter (rcx) unchanged (Stack*).
// Pass 2nd parameter (rdx) unchanged (StackVisitor*).
// Save 3rd parameter (r8; IterateStackCallback)
@@ -45,7 +57,7 @@ asm(".globl PushAllRegistersAndIterateStack \n"
// Call the callback.
" call *%r9 \n"
// Pop the callee-saved registers.
- " add $64, %rsp \n"
+ " add $224, %rsp \n"
// Restore rbp as it was used as frame pointer.
" pop %rbp \n"
" ret \n");
diff --git a/chromium/v8/src/heap/base/asm/x64/push_registers_masm.S b/chromium/v8/src/heap/base/asm/x64/push_registers_masm.S
index 627843830fa..a32e193c2f1 100644
--- a/chromium/v8/src/heap/base/asm/x64/push_registers_masm.S
+++ b/chromium/v8/src/heap/base/asm/x64/push_registers_masm.S
@@ -13,8 +13,8 @@ PushAllRegistersAndIterateStack:
;; stack scanning.
;;
;; We maintain 16-byte alignment at calls. There is an 8-byte return address
- ;; on the stack and we push 72 bytes which maintains 16-byte stack alignment
- ;; at the call.
+ ;; on the stack and we push 232 bytes which maintains 16-byte stack
+ ;; alignment at the call.
;; Source: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
;;
;; rbp is callee-saved. Maintain proper frame pointer for debugging.
@@ -28,6 +28,18 @@ PushAllRegistersAndIterateStack:
push r13
push r14
push r15
+ sub rsp, 160
+ ;; Use aligned instrs as we are certain that the stack is properly aligned.
+ movdqa xmmword ptr [rsp + 144], xmm6
+ movdqa xmmword ptr [rsp + 128], xmm7
+ movdqa xmmword ptr [rsp + 112], xmm8
+ movdqa xmmword ptr [rsp + 96], xmm9
+ movdqa xmmword ptr [rsp + 80], xmm10
+ movdqa xmmword ptr [rsp + 64], xmm11
+ movdqa xmmword ptr [rsp + 48], xmm12
+ movdqa xmmword ptr [rsp + 32], xmm13
+ movdqa xmmword ptr [rsp + 16], xmm14
+ movdqa xmmword ptr [rsp], xmm15
;; Pass 1st parameter (rcx) unchanged (Stack*).
;; Pass 2nd parameter (rdx) unchanged (StackVisitor*).
;; Save 3rd parameter (r8; IterateStackCallback)
@@ -37,7 +49,7 @@ PushAllRegistersAndIterateStack:
;; Call the callback.
call r9
;; Pop the callee-saved registers.
- add rsp, 64
+ add rsp, 224
;; Restore rbp as it was used as frame pointer.
pop rbp
ret
diff --git a/chromium/v8/src/heap/base/stack.cc b/chromium/v8/src/heap/base/stack.cc
index 939487ca77d..f6d522f1931 100644
--- a/chromium/v8/src/heap/base/stack.cc
+++ b/chromium/v8/src/heap/base/stack.cc
@@ -6,9 +6,11 @@
#include <limits>
+#include "src/base/macros.h"
#include "src/base/platform/platform.h"
+#include "src/base/sanitizer/asan.h"
+#include "src/base/sanitizer/msan.h"
#include "src/heap/cppgc/globals.h"
-#include "src/heap/cppgc/sanitizers.h"
namespace heap {
namespace base {
@@ -41,7 +43,7 @@ namespace {
// No ASAN support as accessing fake frames otherwise results in
// "stack-use-after-scope" warnings.
-NO_SANITIZE_ADDRESS
+DISABLE_ASAN
void IterateAsanFakeFrameIfNecessary(StackVisitor* visitor,
void* asan_fake_stack,
const void* stack_start,
@@ -77,7 +79,7 @@ void IterateSafeStackIfNecessary(StackVisitor* visitor) {
#if defined(__has_feature)
#if __has_feature(safe_stack)
// Source:
- // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/safestack/safestack.cpp
+ // https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/safestack/safestack.cpp
constexpr size_t kSafeStackAlignmentBytes = 16;
void* stack_end = __builtin___get_unsafe_stack_ptr();
void* stack_start = __builtin___get_unsafe_stack_top();
@@ -101,7 +103,7 @@ void IterateSafeStackIfNecessary(StackVisitor* visitor) {
// any data that needs to be scanned.
V8_NOINLINE
// No ASAN support as method accesses redzones while walking the stack.
-NO_SANITIZE_ADDRESS
+DISABLE_ASAN
void IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
intptr_t* stack_end) {
#ifdef V8_USE_ADDRESS_SANITIZER
@@ -116,7 +118,7 @@ void IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
// MSAN: Instead of unpoisoning the whole stack, the slot's value is copied
// into a local which is unpoisoned.
void* address = *current;
- MSAN_UNPOISON(&address, sizeof(address));
+ MSAN_MEMORY_IS_INITIALIZED(&address, sizeof(address));
if (address == nullptr) continue;
visitor->VisitPointer(address);
#ifdef V8_USE_ADDRESS_SANITIZER
@@ -135,5 +137,10 @@ void Stack::IteratePointers(StackVisitor* visitor) const {
IterateSafeStackIfNecessary(visitor);
}
+void Stack::IteratePointersUnsafe(StackVisitor* visitor,
+ uintptr_t stack_end) const {
+ IteratePointersImpl(this, visitor, reinterpret_cast<intptr_t*>(stack_end));
+}
+
} // namespace base
} // namespace heap
diff --git a/chromium/v8/src/heap/base/stack.h b/chromium/v8/src/heap/base/stack.h
index a46e6e660ed..d7267deee7e 100644
--- a/chromium/v8/src/heap/base/stack.h
+++ b/chromium/v8/src/heap/base/stack.h
@@ -26,10 +26,20 @@ class V8_EXPORT_PRIVATE Stack final {
// Returns true if |slot| is part of the stack and false otherwise.
bool IsOnStack(void* slot) const;
- // Word-aligned iteration of the stack. Slot values are passed on to
- // |visitor|.
+ // Word-aligned iteration of the stack. Callee-saved registers are pushed to
+ // the stack before iterating pointers. Slot values are passed on to
+ // `visitor`.
void IteratePointers(StackVisitor* visitor) const;
+ // Word-aligned iteration of the stack, starting at `stack_end`. Slot values
+ // are passed on to `visitor`. This is intended to be used with verifiers that
+ // only visit a subset of the stack of IteratePointers().
+ //
+ // **Ignores:**
+ // - Callee-saved registers.
+ // - SafeStack.
+ void IteratePointersUnsafe(StackVisitor* visitor, uintptr_t stack_end) const;
+
// Returns the start of the stack.
const void* stack_start() const { return stack_start_; }
diff --git a/chromium/v8/src/heap/basic-memory-chunk.h b/chromium/v8/src/heap/basic-memory-chunk.h
index 1b2d7cb5dad..993291dc0e0 100644
--- a/chromium/v8/src/heap/basic-memory-chunk.h
+++ b/chromium/v8/src/heap/basic-memory-chunk.h
@@ -106,6 +106,9 @@ class BasicMemoryChunk {
// because there exists a potential pointer to somewhere in the chunk which
// can't be updated.
PINNED = 1u << 22,
+
+ // This page belongs to a shared heap.
+ IN_SHARED_HEAP = 1u << 23,
};
static const intptr_t kAlignment =
@@ -255,6 +258,8 @@ class BasicMemoryChunk {
bool InOldSpace() const;
V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
+ bool InSharedHeap() const { return IsFlagSet(IN_SHARED_HEAP); }
+
bool IsWritable() const {
// If this is a read-only space chunk but heap_ is non-null, it has not yet
// been sealed and can be written to.
@@ -294,11 +299,13 @@ class BasicMemoryChunk {
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static BasicMemoryChunk* FromAddress(Address a) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(a));
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
static BasicMemoryChunk* FromHeapObject(HeapObject o) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(o.ptr()));
}
diff --git a/chromium/v8/src/heap/code-range.cc b/chromium/v8/src/heap/code-range.cc
new file mode 100644
index 00000000000..738c12710c9
--- /dev/null
+++ b/chromium/v8/src/heap/code-range.cc
@@ -0,0 +1,172 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/code-range.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/common/globals.h"
+#include "src/flags/flags.h"
+#include "src/heap/heap-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<CodeRange>,
+ GetProcessWideCodeRangeCage)
+
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(CodeRangeAddressHint, GetCodeRangeAddressHint)
+
+void FunctionInStaticBinaryForAddressHint() {}
+} // anonymous namespace
+
+Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
+ base::MutexGuard guard(&mutex_);
+ auto it = recently_freed_.find(code_range_size);
+ if (it == recently_freed_.end() || it->second.empty()) {
+ return FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint);
+ }
+ Address result = it->second.back();
+ it->second.pop_back();
+ return result;
+}
+
+void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
+ size_t code_range_size) {
+ base::MutexGuard guard(&mutex_);
+ recently_freed_[code_range_size].push_back(code_range_start);
+}
+
+CodeRange::~CodeRange() { Free(); }
+
+bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
+ size_t requested) {
+ DCHECK_NE(requested, 0);
+
+ if (requested <= kMinimumCodeRangeSize) {
+ requested = kMinimumCodeRangeSize;
+ }
+ const size_t reserved_area =
+ kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
+ if (requested < (kMaximalCodeRangeSize - reserved_area)) {
+ requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
+ // Fullfilling both reserved pages requirement and huge code area
+ // alignments is not supported (requires re-implementation).
+ DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
+ }
+ DCHECK_IMPLIES(kPlatformRequiresCodeRange,
+ requested <= kMaximalCodeRangeSize);
+
+ VirtualMemoryCage::ReservationParams params;
+ params.page_allocator = page_allocator;
+ params.reservation_size = requested;
+ params.base_alignment =
+ VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
+ params.base_bias_size = reserved_area;
+ params.page_size = MemoryChunk::kPageSize;
+ params.requested_start_hint =
+ GetCodeRangeAddressHint()->GetAddressHint(requested);
+
+ if (!VirtualMemoryCage::InitReservation(params)) return false;
+
+ // On some platforms, specifically Win64, we need to reserve some pages at
+ // the beginning of an executable space. See
+ // https://cs.chromium.org/chromium/src/components/crash/content/
+ // app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
+ // for details.
+ if (reserved_area > 0) {
+ if (!reservation()->SetPermissions(reservation()->address(), reserved_area,
+ PageAllocator::kReadWrite)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void CodeRange::Free() {
+ if (IsReserved()) {
+ GetCodeRangeAddressHint()->NotifyFreedCodeRange(
+ reservation()->region().begin(), reservation()->region().size());
+ VirtualMemoryCage::Free();
+ }
+}
+
+uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
+ const uint8_t* embedded_blob_code,
+ size_t embedded_blob_code_size) {
+ base::MutexGuard guard(&remap_embedded_builtins_mutex_);
+
+ const base::AddressRegion& code_region = reservation()->region();
+ CHECK_NE(code_region.begin(), kNullAddress);
+ CHECK(!code_region.is_empty());
+
+ uint8_t* embedded_blob_code_copy =
+ embedded_blob_code_copy_.load(std::memory_order_acquire);
+ if (embedded_blob_code_copy) {
+ DCHECK(
+ code_region.contains(reinterpret_cast<Address>(embedded_blob_code_copy),
+ embedded_blob_code_size));
+ SLOW_DCHECK(memcmp(embedded_blob_code, embedded_blob_code_copy,
+ embedded_blob_code_size) == 0);
+ return embedded_blob_code_copy;
+ }
+
+ const size_t kAllocatePageSize = page_allocator()->AllocatePageSize();
+ size_t allocate_code_size =
+ RoundUp(embedded_blob_code_size, kAllocatePageSize);
+
+ // Allocate the re-embedded code blob in the end.
+ void* hint = reinterpret_cast<void*>(code_region.end() - allocate_code_size);
+
+ embedded_blob_code_copy =
+ reinterpret_cast<uint8_t*>(page_allocator()->AllocatePages(
+ hint, allocate_code_size, kAllocatePageSize,
+ PageAllocator::kNoAccess));
+
+ if (!embedded_blob_code_copy) {
+ V8::FatalProcessOutOfMemory(
+ isolate, "Can't allocate space for re-embedded builtins");
+ }
+
+ size_t code_size =
+ RoundUp(embedded_blob_code_size, page_allocator()->CommitPageSize());
+
+ if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size,
+ PageAllocator::kReadWrite)) {
+ V8::FatalProcessOutOfMemory(isolate,
+ "Re-embedded builtins: set permissions");
+ }
+ memcpy(embedded_blob_code_copy, embedded_blob_code, embedded_blob_code_size);
+
+ if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size,
+ PageAllocator::kReadExecute)) {
+ V8::FatalProcessOutOfMemory(isolate,
+ "Re-embedded builtins: set permissions");
+ }
+
+ embedded_blob_code_copy_.store(embedded_blob_code_copy,
+ std::memory_order_release);
+ return embedded_blob_code_copy;
+}
+
+// static
+void CodeRange::InitializeProcessWideCodeRangeOnce(
+ v8::PageAllocator* page_allocator, size_t requested_size) {
+ *GetProcessWideCodeRangeCage() = std::make_shared<CodeRange>();
+ if (!GetProcessWideCodeRange()->InitReservation(page_allocator,
+ requested_size)) {
+ V8::FatalProcessOutOfMemory(
+ nullptr, "Failed to reserve virtual memory for CodeRange");
+ }
+}
+
+// static
+std::shared_ptr<CodeRange> CodeRange::GetProcessWideCodeRange() {
+ return *GetProcessWideCodeRangeCage();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/code-range.h b/chromium/v8/src/heap/code-range.h
new file mode 100644
index 00000000000..b1bc6020b58
--- /dev/null
+++ b/chromium/v8/src/heap/code-range.h
@@ -0,0 +1,147 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CODE_RANGE_H_
+#define V8_HEAP_CODE_RANGE_H_
+
+#include <unordered_map>
+#include <vector>
+
+#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+// The process-wide singleton that keeps track of code range regions with the
+// intention to reuse free code range regions as a workaround for CFG memory
+// leaks (see crbug.com/870054).
+class CodeRangeAddressHint {
+ public:
+ // Returns the most recently freed code range start address for the given
+ // size. If there is no such entry, then a random address is returned.
+ V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
+
+ V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
+ size_t code_range_size);
+
+ private:
+ base::Mutex mutex_;
+ // A map from code range size to an array of recently freed code range
+ // addresses. There should be O(1) different code range sizes.
+ // The length of each array is limited by the peak number of code ranges,
+ // which should be also O(1).
+ std::unordered_map<size_t, std::vector<Address>> recently_freed_;
+};
+
+// A code range is a virtual memory cage that may contain executable code. It
+// has the following layout.
+//
+// +------------+-----+---------------- ~~~ -+
+// | RW | ... | ... |
+// +------------+-----+----------------- ~~~ -+
+// ^ ^ ^
+// start base allocatable base
+//
+// <------------> <------------------------>
+// reserved allocatable region
+// <------------------------------------------->
+// code region
+//
+// The start of the reservation may include reserved page with read-write access
+// as required by some platforms (Win64). The cage's page allocator does not
+// control the optional reserved page in the beginning of the code region.
+//
+// The following conditions hold:
+// 1) |reservation()->region()| >= |optional RW pages| +
+// |reservation()->page_allocator()|
+// 2) |reservation()| is AllocatePageSize()-aligned
+// 3) |reservation()->page_allocator()| (i.e. allocatable base) is
+// MemoryChunk::kAlignment-aligned
+// 4) |base()| is CommitPageSize()-aligned
+class CodeRange final : public VirtualMemoryCage {
+ public:
+ V8_EXPORT_PRIVATE ~CodeRange();
+
+ uint8_t* embedded_blob_code_copy() const {
+ // remap_embedded_builtins_mutex_ is designed to protect write contention to
+ // embedded_blob_code_copy_. It is safe to be read without taking the
+ // mutex. It is read to check if short builtins ought to be enabled because
+ // a shared CodeRange has already remapped builtins and to find where the
+ // instruction stream for a builtin is.
+ //
+ // For the first, this racing with an Isolate calling RemapEmbeddedBuiltins
+ // may result in disabling short builtins, which is not a correctness issue.
+ //
+ // For the second, this racing with an Isolate calling RemapEmbeddedBuiltins
+ // may result in an already running Isolate that did not have short builtins
+ // enabled (due to max old generation size) to switch over to using remapped
+ // builtins, which is also not a correctness issue as the remapped builtins
+ // are byte-equivalent.
+ //
+ // Both these scenarios should be rare. The initial Isolate is usually
+ // created by itself, i.e. without contention. Additionally, the first
+ // Isolate usually remaps builtins on machines with enough memory, not
+ // subsequent Isolates in the same process.
+ return embedded_blob_code_copy_.load(std::memory_order_acquire);
+ }
+
+#ifdef V8_OS_WIN64
+ // 64-bit Windows needs to track how many Isolates are using the CodeRange for
+ // registering and unregistering of unwind info. Note that even though
+ // CodeRanges are used with std::shared_ptr, std::shared_ptr::use_count should
+ // not be used for synchronization as it's usually implemented with a relaxed
+ // read.
+ uint32_t AtomicIncrementUnwindInfoUseCount() {
+ return unwindinfo_use_count_.fetch_add(1, std::memory_order_acq_rel);
+ }
+
+ uint32_t AtomicDecrementUnwindInfoUseCount() {
+ return unwindinfo_use_count_.fetch_sub(1, std::memory_order_acq_rel);
+ }
+#endif // V8_OS_WIN64
+
+ bool InitReservation(v8::PageAllocator* page_allocator, size_t requested);
+
+ void Free();
+
+ // Remap and copy the embedded builtins into this CodeRange. This method is
+ // idempotent and only performs the copy once. This property is so that this
+ // method can be used uniformly regardless of having a per-Isolate or a shared
+ // pointer cage. Returns the address of the copy.
+ //
+ // The builtins code region will be freed with the code range at tear down.
+ //
+ // When ENABLE_SLOW_DCHECKS is on, the contents of the embedded_blob_code are
+ // compared against the already copied version.
+ uint8_t* RemapEmbeddedBuiltins(Isolate* isolate,
+ const uint8_t* embedded_blob_code,
+ size_t embedded_blob_code_size);
+
+ static void InitializeProcessWideCodeRangeOnce(
+ v8::PageAllocator* page_allocator, size_t requested_size);
+
+ // If InitializeProcessWideCodeRangeOnce has been called, returns the
+ // initialized CodeRange. Otherwise returns an empty std::shared_ptr.
+ V8_EXPORT_PRIVATE static std::shared_ptr<CodeRange> GetProcessWideCodeRange();
+
+ private:
+ // Used when short builtin calls are enabled, where embedded builtins are
+ // copied into the CodeRange so calls can be nearer.
+ std::atomic<uint8_t*> embedded_blob_code_copy_{nullptr};
+
+ // When sharing a CodeRange among Isolates, calls to RemapEmbeddedBuiltins may
+ // race during Isolate::Init.
+ base::Mutex remap_embedded_builtins_mutex_;
+
+#ifdef V8_OS_WIN64
+ std::atomic<uint32_t> unwindinfo_use_count_{0};
+#endif
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CODE_RANGE_H_
diff --git a/chromium/v8/src/heap/collection-barrier.cc b/chromium/v8/src/heap/collection-barrier.cc
index 92007690aa7..feeb23d0008 100644
--- a/chromium/v8/src/heap/collection-barrier.cc
+++ b/chromium/v8/src/heap/collection-barrier.cc
@@ -22,8 +22,7 @@ bool CollectionBarrier::CollectionRequested() {
}
LocalHeap::ThreadState CollectionBarrier::main_thread_state_relaxed() {
- LocalHeap* main_thread_local_heap =
- heap_->isolate()->main_thread_local_heap();
+ LocalHeap* main_thread_local_heap = heap_->main_thread_local_heap();
return main_thread_local_heap->state_relaxed();
}
diff --git a/chromium/v8/src/heap/combined-heap.h b/chromium/v8/src/heap/combined-heap.h
index 55664114d39..9c9ed9039fc 100644
--- a/chromium/v8/src/heap/combined-heap.h
+++ b/chromium/v8/src/heap/combined-heap.h
@@ -36,7 +36,8 @@ V8_WARN_UNUSED_RESULT inline bool IsValidHeapObject(Heap* heap,
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
return third_party_heap::Heap::IsValidHeapObject(object);
else
- return ReadOnlyHeap::Contains(object) || heap->Contains(object);
+ return ReadOnlyHeap::Contains(object) || heap->Contains(object) ||
+ heap->SharedHeapContains(object);
}
} // namespace internal
diff --git a/chromium/v8/src/heap/concurrent-allocator-inl.h b/chromium/v8/src/heap/concurrent-allocator-inl.h
index c92b91ca476..07d669b17cb 100644
--- a/chromium/v8/src/heap/concurrent-allocator-inl.h
+++ b/chromium/v8/src/heap/concurrent-allocator-inl.h
@@ -20,6 +20,7 @@ namespace internal {
AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
AllocationAlignment alignment,
AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
// TODO(dinfuehr): Add support for allocation observers
#ifdef DEBUG
local_heap_->VerifyCurrent();
diff --git a/chromium/v8/src/heap/concurrent-marking.cc b/chromium/v8/src/heap/concurrent-marking.cc
index eb1511f71d9..6f0aa89ebdd 100644
--- a/chromium/v8/src/heap/concurrent-marking.cc
+++ b/chromium/v8/src/heap/concurrent-marking.cc
@@ -8,6 +8,7 @@
#include <unordered_map>
#include "include/v8config.h"
+#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
@@ -136,13 +137,13 @@ class ConcurrentMarkingVisitor final
int VisitSeqOneByteString(Map map, SeqOneByteString object) {
if (!ShouldVisit(object)) return 0;
VisitMapPointer(object);
- return SeqOneByteString::SizeFor(object.synchronized_length());
+ return SeqOneByteString::SizeFor(object.length(kAcquireLoad));
}
int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
if (!ShouldVisit(object)) return 0;
VisitMapPointer(object);
- return SeqTwoByteString::SizeFor(object.synchronized_length());
+ return SeqTwoByteString::SizeFor(object.length(kAcquireLoad));
}
// Implements ephemeron semantics: Marks value if key is already reachable.
@@ -232,9 +233,9 @@ class ConcurrentMarkingVisitor final
template <typename T>
int VisitLeftTrimmableArray(Map map, T object) {
- // The synchronized_length() function checks that the length is a Smi.
+ // The length() function checks that the length is a Smi.
// This is not necessarily the case if the array is being left-trimmed.
- Object length = object.unchecked_synchronized_length();
+ Object length = object.unchecked_length(kAcquireLoad);
if (!ShouldVisit(object)) return 0;
// The cached length must be the actual length as the array is not black.
// Left trimming marks the array black before over-writing the length.
@@ -460,16 +461,28 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
break;
}
objects_processed++;
- // The order of the two loads is important.
- Address new_space_top = heap_->new_space()->original_top_acquire();
- Address new_space_limit = heap_->new_space()->original_limit_relaxed();
- Address new_large_object = heap_->new_lo_space()->pending_object();
+
+ Address new_space_top = kNullAddress;
+ Address new_space_limit = kNullAddress;
+ Address new_large_object = kNullAddress;
+
+ if (heap_->new_space()) {
+ // The order of the two loads is important.
+ new_space_top = heap_->new_space()->original_top_acquire();
+ new_space_limit = heap_->new_space()->original_limit_relaxed();
+ }
+
+ if (heap_->new_lo_space()) {
+ new_large_object = heap_->new_lo_space()->pending_object();
+ }
+
Address addr = object.address();
+
if ((new_space_top <= addr && addr < new_space_limit) ||
addr == new_large_object) {
local_marking_worklists.PushOnHold(object);
} else {
- Map map = object.synchronized_map(isolate);
+ Map map = object.map(isolate, kAcquireLoad);
if (is_per_context_mode) {
Address context;
if (native_context_inferrer.Infer(isolate, map, object, &context)) {
diff --git a/chromium/v8/src/heap/cppgc-js/cpp-heap.cc b/chromium/v8/src/heap/cppgc-js/cpp-heap.cc
index 636f666521b..006c35808f3 100644
--- a/chromium/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/chromium/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -5,6 +5,8 @@
#include "src/heap/cppgc-js/cpp-heap.h"
#include <cstdint>
+#include <memory>
+#include <numeric>
#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/platform.h"
@@ -29,6 +31,7 @@
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
+#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/sweeper.h"
#include "src/heap/embedder-tracing.h"
@@ -65,6 +68,13 @@ cppgc::HeapStatistics CppHeap::CollectStatistics(
detail_level);
}
+void CppHeap::CollectCustomSpaceStatisticsAtLastGC(
+ std::vector<cppgc::CustomSpaceIndex> custom_spaces,
+ std::unique_ptr<CustomSpaceStatisticsReceiver> receiver) {
+ return internal::CppHeap::From(this)->CollectCustomSpaceStatisticsAtLastGC(
+ std::move(custom_spaces), std::move(receiver));
+}
+
void CppHeap::EnableDetachedGarbageCollectionsForTesting() {
return internal::CppHeap::From(this)
->EnableDetachedGarbageCollectionsForTesting();
@@ -200,7 +210,7 @@ UnifiedHeapMarker::UnifiedHeapMarker(Key key, Heap* v8_heap,
void UnifiedHeapMarker::AddObject(void* object) {
mutator_marking_state_.MarkAndPush(
- cppgc::internal::HeapObjectHeader::FromPayload(object));
+ cppgc::internal::HeapObjectHeader::FromObject(object));
}
} // namespace
@@ -318,10 +328,6 @@ void CppHeap::TracePrologue(TraceFlags flags) {
}
bool CppHeap::AdvanceTracing(double deadline_in_ms) {
- // TODO(chromium:1154636): The kAtomicMark/kIncrementalMark scope below is
- // needed for recording all cpp marking time. Note that it can lead to double
- // accounting since this scope is also accounted under an outer v8 scope.
- // Make sure to only account this scope once.
cppgc::internal::StatsCollector::EnabledScope stats_scope(
stats_collector(),
in_atomic_pause_ ? cppgc::internal::StatsCollector::kAtomicMark
@@ -342,8 +348,6 @@ bool CppHeap::IsTracingDone() { return marking_done_; }
void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
CHECK(!in_disallow_gc_scope());
- cppgc::internal::StatsCollector::EnabledScope stats_scope(
- stats_collector(), cppgc::internal::StatsCollector::kAtomicMark);
in_atomic_pause_ = true;
if (override_stack_state_) {
stack_state = *override_stack_state_;
@@ -359,8 +363,6 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
CHECK(in_atomic_pause_);
CHECK(marking_done_);
{
- cppgc::internal::StatsCollector::EnabledScope stats_scope(
- stats_collector(), cppgc::internal::StatsCollector::kAtomicMark);
cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(*this);
marker_->LeaveAtomicPause();
}
@@ -376,7 +378,8 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
UnifiedHeapMarkingVerifier verifier(*this);
- verifier.Run(stack_state_of_prev_gc_);
+ verifier.Run(stack_state_of_prev_gc(), stack_end_of_current_gc(),
+ stats_collector()->marked_bytes());
#endif
{
@@ -422,12 +425,17 @@ void CppHeap::ReportBufferedAllocationSizeIfPossible() {
return;
}
- if (buffered_allocated_bytes_ < 0) {
- DecreaseAllocatedSize(static_cast<size_t>(-buffered_allocated_bytes_));
+ // The calls below may trigger full GCs that are synchronous and also execute
+ // epilogue callbacks. Since such callbacks may allocate, the counter must
+ // already be zeroed by that time.
+ const int64_t bytes_to_report = buffered_allocated_bytes_;
+ buffered_allocated_bytes_ = 0;
+
+ if (bytes_to_report < 0) {
+ DecreaseAllocatedSize(static_cast<size_t>(-bytes_to_report));
} else {
- IncreaseAllocatedSize(static_cast<size_t>(buffered_allocated_bytes_));
+ IncreaseAllocatedSize(static_cast<size_t>(bytes_to_report));
}
- buffered_allocated_bytes_ = 0;
}
void CppHeap::CollectGarbageForTesting(
@@ -437,6 +445,8 @@ void CppHeap::CollectGarbageForTesting(
// Finish sweeping in case it is still running.
sweeper().FinishIfRunning();
+ SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
+
if (isolate_) {
// Go through EmbedderHeapTracer API and perform a unified heap collection.
GarbageCollectionForTesting(stack_state);
@@ -481,5 +491,82 @@ void CppHeap::FinalizeIncrementalGarbageCollectionForTesting(
sweeper_.FinishIfRunning();
}
+namespace {
+
+void ReportCustomSpaceStatistics(
+ cppgc::internal::RawHeap& raw_heap,
+ std::vector<cppgc::CustomSpaceIndex> custom_spaces,
+ std::unique_ptr<CustomSpaceStatisticsReceiver> receiver) {
+ for (auto custom_space_index : custom_spaces) {
+ const cppgc::internal::BaseSpace* space =
+ raw_heap.CustomSpace(custom_space_index);
+ size_t allocated_bytes = std::accumulate(
+ space->begin(), space->end(), 0, [](size_t sum, auto* page) {
+ return sum + page->AllocatedBytesAtLastGC();
+ });
+ receiver->AllocatedBytes(custom_space_index, allocated_bytes);
+ }
+}
+
+class CollectCustomSpaceStatisticsAtLastGCTask final : public v8::Task {
+ public:
+ static constexpr v8::base::TimeDelta kTaskDelayMs =
+ v8::base::TimeDelta::FromMilliseconds(10);
+
+ CollectCustomSpaceStatisticsAtLastGCTask(
+ cppgc::internal::HeapBase& heap,
+ std::vector<cppgc::CustomSpaceIndex> custom_spaces,
+ std::unique_ptr<CustomSpaceStatisticsReceiver> receiver)
+ : heap_(heap),
+ custom_spaces_(std::move(custom_spaces)),
+ receiver_(std::move(receiver)) {}
+
+ void Run() final {
+ cppgc::internal::Sweeper& sweeper = heap_.sweeper();
+ if (sweeper.PerformSweepOnMutatorThread(
+ heap_.platform()->MonotonicallyIncreasingTime() +
+ kStepSizeMs.InSecondsF())) {
+ // Sweeping is done.
+ DCHECK(!sweeper.IsSweepingInProgress());
+ ReportCustomSpaceStatistics(heap_.raw_heap(), std::move(custom_spaces_),
+ std::move(receiver_));
+ } else {
+ heap_.platform()->GetForegroundTaskRunner()->PostDelayedTask(
+ std::make_unique<CollectCustomSpaceStatisticsAtLastGCTask>(
+ heap_, std::move(custom_spaces_), std::move(receiver_)),
+ kTaskDelayMs.InSecondsF());
+ }
+ }
+
+ private:
+ static constexpr v8::base::TimeDelta kStepSizeMs =
+ v8::base::TimeDelta::FromMilliseconds(5);
+
+ cppgc::internal::HeapBase& heap_;
+ std::vector<cppgc::CustomSpaceIndex> custom_spaces_;
+ std::unique_ptr<CustomSpaceStatisticsReceiver> receiver_;
+};
+
+constexpr v8::base::TimeDelta
+ CollectCustomSpaceStatisticsAtLastGCTask::kTaskDelayMs;
+constexpr v8::base::TimeDelta
+ CollectCustomSpaceStatisticsAtLastGCTask::kStepSizeMs;
+
+} // namespace
+
+void CppHeap::CollectCustomSpaceStatisticsAtLastGC(
+ std::vector<cppgc::CustomSpaceIndex> custom_spaces,
+ std::unique_ptr<CustomSpaceStatisticsReceiver> receiver) {
+ if (sweeper().IsSweepingInProgress()) {
+ platform()->GetForegroundTaskRunner()->PostDelayedTask(
+ std::make_unique<CollectCustomSpaceStatisticsAtLastGCTask>(
+ AsBase(), std::move(custom_spaces), std::move(receiver)),
+ CollectCustomSpaceStatisticsAtLastGCTask::kTaskDelayMs.InSecondsF());
+ return;
+ }
+ ReportCustomSpaceStatistics(raw_heap(), std::move(custom_spaces),
+ std::move(receiver));
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/cppgc-js/cpp-heap.h b/chromium/v8/src/heap/cppgc-js/cpp-heap.h
index b13fd25a323..cf99b8fe602 100644
--- a/chromium/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/chromium/v8/src/heap/cppgc-js/cpp-heap.h
@@ -5,6 +5,11 @@
#ifndef V8_HEAP_CPPGC_JS_CPP_HEAP_H_
#define V8_HEAP_CPPGC_JS_CPP_HEAP_H_
+#if CPPGC_IS_STANDALONE
+static_assert(
+ false, "V8 targets can not be built with cppgc_is_standalone set to true.");
+#endif
+
#include "include/v8-cppgc.h"
#include "include/v8.h"
#include "src/base/macros.h"
@@ -55,6 +60,10 @@ class V8_EXPORT_PRIVATE CppHeap final
void CollectGarbageForTesting(
cppgc::internal::GarbageCollector::Config::StackState);
+ void CollectCustomSpaceStatisticsAtLastGC(
+ std::vector<cppgc::CustomSpaceIndex>,
+ std::unique_ptr<CustomSpaceStatisticsReceiver>);
+
// v8::EmbedderHeapTracer interface.
void RegisterV8References(
const std::vector<std::pair<void*, void*> >& embedder_fields) final;
diff --git a/chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc b/chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc
index 79a863e3026..17929247ee4 100644
--- a/chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc
+++ b/chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -382,7 +382,7 @@ class CppGraphBuilderImpl final {
EmbedderNode* AddNode(const HeapObjectHeader& header) {
return static_cast<EmbedderNode*>(
graph_.AddNode(std::unique_ptr<v8::EmbedderGraph::Node>{
- new EmbedderNode(header.GetName().value, header.GetSize())}));
+ new EmbedderNode(header.GetName().value, header.AllocatedSize())}));
}
void AddEdge(State& parent, const HeapObjectHeader& header) {
@@ -418,7 +418,7 @@ class CppGraphBuilderImpl final {
if (HasEmbedderDataBackref(
reinterpret_cast<v8::internal::Isolate*>(cpp_heap_.isolate()),
- v8_value, parent.header()->Payload())) {
+ v8_value, parent.header()->ObjectStart())) {
parent.get_node()->SetWrapperNode(v8_node);
auto* profiler =
@@ -512,7 +512,7 @@ class VisiblityVisitor final : public JSVisitor {
void Visit(const void*, cppgc::TraceDescriptor desc) final {
graph_builder_.VisitForVisibility(
&parent_scope_.ParentAsRegularState(),
- HeapObjectHeader::FromPayload(desc.base_object_payload));
+ HeapObjectHeader::FromObject(desc.base_object_payload));
}
void VisitRoot(const void*, cppgc::TraceDescriptor,
const cppgc::SourceLocation&) final {}
@@ -556,13 +556,13 @@ class GraphBuildingVisitor final : public JSVisitor {
void Visit(const void*, cppgc::TraceDescriptor desc) final {
graph_builder_.AddEdge(
parent_scope_.ParentAsRegularState(),
- HeapObjectHeader::FromPayload(desc.base_object_payload));
+ HeapObjectHeader::FromObject(desc.base_object_payload));
}
void VisitRoot(const void*, cppgc::TraceDescriptor desc,
const cppgc::SourceLocation& loc) final {
graph_builder_.VisitRootForGraphBuilding(
parent_scope_.ParentAsRootState(),
- HeapObjectHeader::FromPayload(desc.base_object_payload), loc);
+ HeapObjectHeader::FromObject(desc.base_object_payload), loc);
}
void VisitWeakRoot(const void*, cppgc::TraceDescriptor, cppgc::WeakCallback,
const void*, const cppgc::SourceLocation&) final {}
diff --git a/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc b/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc
index ea14b520480..b0f8595ec74 100644
--- a/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc
+++ b/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc
@@ -35,7 +35,7 @@ class UnifiedHeapVerificationVisitor final : public JSVisitor {
void VisitWeakContainer(const void* object, cppgc::TraceDescriptor,
cppgc::TraceDescriptor weak_desc, cppgc::WeakCallback,
- const void*) {
+ const void*) final {
if (!object) return;
// Contents of weak containers are found themselves through page iteration
@@ -58,13 +58,8 @@ class UnifiedHeapVerificationVisitor final : public JSVisitor {
UnifiedHeapMarkingVerifier::UnifiedHeapMarkingVerifier(
cppgc::internal::HeapBase& heap_base)
: MarkingVerifierBase(
- heap_base, std::make_unique<UnifiedHeapVerificationVisitor>(state_)) {
-}
-
-void UnifiedHeapMarkingVerifier::SetCurrentParent(
- const cppgc::internal::HeapObjectHeader* parent) {
- state_.SetCurrentParent(parent);
-}
+ heap_base, state_,
+ std::make_unique<UnifiedHeapVerificationVisitor>(state_)) {}
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h b/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h
index 3a54b4dd323..bb2ac09e385 100644
--- a/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h
+++ b/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h
@@ -16,8 +16,6 @@ class V8_EXPORT_PRIVATE UnifiedHeapMarkingVerifier final
explicit UnifiedHeapMarkingVerifier(cppgc::internal::HeapBase&);
~UnifiedHeapMarkingVerifier() final = default;
- void SetCurrentParent(const cppgc::internal::HeapObjectHeader*) final;
-
private:
// TODO(chromium:1056170): Use a verification state that can handle JS
// references.
diff --git a/chromium/v8/src/heap/cppgc/caged-heap.cc b/chromium/v8/src/heap/cppgc/caged-heap.cc
index 951fb0e853c..3a8304f4481 100644
--- a/chromium/v8/src/heap/cppgc/caged-heap.cc
+++ b/chromium/v8/src/heap/cppgc/caged-heap.cc
@@ -45,6 +45,27 @@ VirtualMemory ReserveCagedHeap(PageAllocator* platform_allocator) {
UNREACHABLE();
}
+class CppgcBoundedPageAllocator final : public v8::base::BoundedPageAllocator {
+ public:
+ CppgcBoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
+ size_t size, size_t allocate_page_size)
+ : BoundedPageAllocator(page_allocator, start, size, allocate_page_size) {}
+
+ bool FreePages(void* address, size_t size) final {
+ // BoundedPageAllocator is not guaranteed to allocate zeroed page.
+ // Specifically it is possible that BPA frees a page and then tries to
+ // reallocate the same page before the OS has had a chance to asyncroniously
+ // reclaim that page. In such cases, the contents of the page would not have
+ // been cleared by the OS and the reallocated page will keep its previous
+ // contents. To mitigate this problem, CppgcBoundedPageAllocator clears all
+ // pages before they are freed. This also includes protected guard pages, so
+ // CppgcBoundedPageAllocator needs to update permissions before clearing.
+ SetPermissions(address, size, Permission::kReadWrite);
+ memset(address, 0, size);
+ return v8::base::BoundedPageAllocator::FreePages(address, size);
+ }
+};
+
} // namespace
CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
@@ -73,7 +94,7 @@ CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
caged_heap_start -
reinterpret_cast<CagedAddress>(reserved_area_.address());
- bounded_allocator_ = std::make_unique<CagedHeap::AllocatorType>(
+ bounded_allocator_ = std::make_unique<CppgcBoundedPageAllocator>(
platform_allocator, caged_heap_start,
reserved_area_.size() - local_data_size_with_padding, kPageSize);
}
diff --git a/chromium/v8/src/heap/cppgc/compactor.cc b/chromium/v8/src/heap/cppgc/compactor.cc
index f4498e7fbc3..5f687050144 100644
--- a/chromium/v8/src/heap/cppgc/compactor.cc
+++ b/chromium/v8/src/heap/cppgc/compactor.cc
@@ -15,6 +15,7 @@
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/memory.h"
#include "src/heap/cppgc/object-poisoner.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/stats-collector.h"
@@ -129,7 +130,7 @@ void MovableReferences::AddOrFilter(MovableReference* slot) {
interior_movable_references_.find(slot));
interior_movable_references_.emplace(slot, nullptr);
#if DEBUG
- interior_slot_to_object_.emplace(slot, slot_header.Payload());
+ interior_slot_to_object_.emplace(slot, slot_header.ObjectStart());
#endif // DEBUG
}
@@ -144,8 +145,8 @@ void MovableReferences::Relocate(Address from, Address to) {
// find the corresponding slot A.x. Object A may be moved already and the
// memory may have been freed, which would result in a crash.
if (!interior_movable_references_.empty()) {
- const HeapObjectHeader& header = HeapObjectHeader::FromPayload(to);
- const size_t size = header.GetSize() - sizeof(HeapObjectHeader);
+ const HeapObjectHeader& header = HeapObjectHeader::FromObject(to);
+ const size_t size = header.ObjectSize();
RelocateInteriorReferences(from, to, size);
}
@@ -275,14 +276,14 @@ class CompactionState final {
// Return remaining available pages to the free page pool, decommitting
// them from the pagefile.
for (NormalPage* page : available_pages_) {
- SET_MEMORY_INACCESSIBLE(page->PayloadStart(), page->PayloadSize());
+ SetMemoryInaccessible(page->PayloadStart(), page->PayloadSize());
NormalPage::Destroy(page);
}
}
void FinishCompactingPage(NormalPage* page) {
-#if DEBUG || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
- defined(MEMORY_SANITIZER)
+#if DEBUG || defined(V8_USE_MEMORY_SANITIZER) || \
+ defined(V8_USE_ADDRESS_SANITIZER)
// Zap the unused portion, until it is either compacted into or freed.
if (current_page_ != page) {
ZapMemory(page->PayloadStart(), page->PayloadSize());
@@ -303,7 +304,7 @@ class CompactionState final {
current_page_->PayloadSize() - used_bytes_in_current_page_;
Address payload = current_page_->PayloadStart();
Address free_start = payload + used_bytes_in_current_page_;
- SET_MEMORY_INACCESSIBLE(free_start, freed_size);
+ SetMemoryInaccessible(free_start, freed_size);
space_->free_list().Add({free_start, freed_size});
current_page_->object_start_bitmap().SetBit(free_start);
}
@@ -329,7 +330,7 @@ void CompactPage(NormalPage* page, CompactionState& compaction_state) {
header_address < page->PayloadEnd();) {
HeapObjectHeader* header =
reinterpret_cast<HeapObjectHeader*>(header_address);
- size_t size = header->GetSize();
+ size_t size = header->AllocatedSize();
DCHECK_GT(size, 0u);
DCHECK_LT(size, kPageSize);
@@ -349,8 +350,8 @@ void CompactPage(NormalPage* page, CompactionState& compaction_state) {
// As compaction is under way, leave the freed memory accessible
// while compacting the rest of the page. We just zap the payload
// to catch out other finalizers trying to access it.
-#if DEBUG || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
- defined(MEMORY_SANITIZER)
+#if DEBUG || defined(V8_USE_MEMORY_SANITIZER) || \
+ defined(V8_USE_ADDRESS_SANITIZER)
ZapMemory(header, size);
#endif
header_address += size;
diff --git a/chromium/v8/src/heap/cppgc/concurrent-marker.cc b/chromium/v8/src/heap/cppgc/concurrent-marker.cc
index 34953b9ec3b..6763515f280 100644
--- a/chromium/v8/src/heap/cppgc/concurrent-marker.cc
+++ b/chromium/v8/src/heap/cppgc/concurrent-marker.cc
@@ -125,7 +125,7 @@ void ConcurrentMarkingTask::ProcessWorklists(
BasePage::FromPayload(item.base_object_payload)
->SynchronizedLoad();
const HeapObjectHeader& header =
- HeapObjectHeader::FromPayload(item.base_object_payload);
+ HeapObjectHeader::FromObject(item.base_object_payload);
DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
DCHECK(header.IsMarked<AccessMode::kAtomic>());
concurrent_marking_state.AccountMarkedBytes(header);
diff --git a/chromium/v8/src/heap/cppgc/explicit-management.cc b/chromium/v8/src/heap/cppgc/explicit-management.cc
index b3ab5f5b515..6e327339aa1 100644
--- a/chromium/v8/src/heap/cppgc/explicit-management.cc
+++ b/chromium/v8/src/heap/cppgc/explicit-management.cc
@@ -9,50 +9,46 @@
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
-#include "src/heap/cppgc/sanitizers.h"
+#include "src/heap/cppgc/memory.h"
namespace cppgc {
namespace internal {
namespace {
-std::pair<bool, BasePage*> CanModifyObject(void* object) {
- // object is guaranteed to be of type GarbageCollected, so getting the
- // BasePage is okay for regular and large objects.
- auto* base_page = BasePage::FromPayload(object);
- auto* heap = base_page->heap();
+bool InGC(HeapHandle& heap_handle) {
+ const auto& heap = HeapBase::From(heap_handle);
// Whenever the GC is active, avoid modifying the object as it may mess with
// state that the GC needs.
- const bool in_gc = heap->in_atomic_pause() || heap->marker() ||
- heap->sweeper().IsSweepingInProgress();
- return {!in_gc, base_page};
+ return heap.in_atomic_pause() || heap.marker() ||
+ heap.sweeper().IsSweepingInProgress();
}
} // namespace
-void FreeUnreferencedObject(void* object) {
- bool can_free;
- BasePage* base_page;
- std::tie(can_free, base_page) = CanModifyObject(object);
- if (!can_free) {
+void FreeUnreferencedObject(HeapHandle& heap_handle, void* object) {
+ if (InGC(heap_handle)) {
return;
}
- auto& header = HeapObjectHeader::FromPayload(object);
+ auto& header = HeapObjectHeader::FromObject(object);
header.Finalize();
+ // `object` is guaranteed to be of type GarbageCollected, so getting the
+ // BasePage is okay for regular and large objects.
+ BasePage* base_page = BasePage::FromPayload(object);
if (base_page->is_large()) { // Large object.
base_page->space()->RemovePage(base_page);
base_page->heap()->stats_collector()->NotifyExplicitFree(
LargePage::From(base_page)->PayloadSize());
LargePage::Destroy(LargePage::From(base_page));
} else { // Regular object.
- const size_t header_size = header.GetSize();
+ const size_t header_size = header.AllocatedSize();
auto* normal_page = NormalPage::From(base_page);
auto& normal_space = *static_cast<NormalPageSpace*>(base_page->space());
auto& lab = normal_space.linear_allocation_buffer();
- ConstAddress payload_end = header.PayloadEnd();
- SET_MEMORY_INACCESSIBLE(&header, header_size);
+ ConstAddress payload_end = header.ObjectEnd();
+ SetMemoryInaccessible(&header, header_size);
if (payload_end == lab.start()) { // Returning to LAB.
lab.Set(reinterpret_cast<Address>(&header), lab.size() + header_size);
normal_page->object_start_bitmap().ClearBit(lab.start());
@@ -69,18 +65,18 @@ namespace {
bool Grow(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
size_t size_delta) {
- DCHECK_GE(new_size, header.GetSize() + kAllocationGranularity);
+ DCHECK_GE(new_size, header.AllocatedSize() + kAllocationGranularity);
DCHECK_GE(size_delta, kAllocationGranularity);
DCHECK(!base_page.is_large());
auto& normal_space = *static_cast<NormalPageSpace*>(base_page.space());
auto& lab = normal_space.linear_allocation_buffer();
- if (lab.start() == header.PayloadEnd() && lab.size() >= size_delta) {
+ if (lab.start() == header.ObjectEnd() && lab.size() >= size_delta) {
// LABs are considered used memory which means that no allocated size
// adjustments are needed.
Address delta_start = lab.Allocate(size_delta);
- SET_MEMORY_ACCESSIBLE(delta_start, size_delta);
- header.SetSize(new_size);
+ SetMemoryAccessible(delta_start, size_delta);
+ header.SetAllocatedSize(new_size);
return true;
}
return false;
@@ -88,30 +84,30 @@ bool Grow(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
size_t size_delta) {
- DCHECK_GE(header.GetSize(), new_size + kAllocationGranularity);
+ DCHECK_GE(header.AllocatedSize(), new_size + kAllocationGranularity);
DCHECK_GE(size_delta, kAllocationGranularity);
DCHECK(!base_page.is_large());
auto& normal_space = *static_cast<NormalPageSpace*>(base_page.space());
auto& lab = normal_space.linear_allocation_buffer();
- Address free_start = header.PayloadEnd() - size_delta;
- if (lab.start() == header.PayloadEnd()) {
+ Address free_start = header.ObjectEnd() - size_delta;
+ if (lab.start() == header.ObjectEnd()) {
DCHECK_EQ(free_start, lab.start() - size_delta);
// LABs are considered used memory which means that no allocated size
// adjustments are needed.
lab.Set(free_start, lab.size() + size_delta);
- SET_MEMORY_INACCESSIBLE(lab.start(), size_delta);
- header.SetSize(new_size);
+ SetMemoryInaccessible(lab.start(), size_delta);
+ header.SetAllocatedSize(new_size);
return true;
}
// Heuristic: Only return memory to the free list if the block is larger than
// the smallest size class.
if (size_delta >= ObjectAllocator::kSmallestSpaceSize) {
- SET_MEMORY_INACCESSIBLE(free_start, size_delta);
+ SetMemoryInaccessible(free_start, size_delta);
base_page.heap()->stats_collector()->NotifyExplicitFree(size_delta);
normal_space.free_list().Add({free_start, size_delta});
NormalPage::From(&base_page)->object_start_bitmap().SetBit(free_start);
- header.SetSize(new_size);
+ header.SetAllocatedSize(new_size);
}
// Return success in any case, as we want to avoid that embedders start
// copying memory because of small deltas.
@@ -121,10 +117,11 @@ bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
} // namespace
bool Resize(void* object, size_t new_object_size) {
- bool can_resize;
- BasePage* base_page;
- std::tie(can_resize, base_page) = CanModifyObject(object);
- if (!can_resize) {
+ // `object` is guaranteed to be of type GarbageCollected, so getting the
+ // BasePage is okay for regular and large objects.
+ BasePage* base_page = BasePage::FromPayload(object);
+
+ if (InGC(*base_page->heap())) {
return false;
}
@@ -136,8 +133,8 @@ bool Resize(void* object, size_t new_object_size) {
const size_t new_size = RoundUp<kAllocationGranularity>(
sizeof(HeapObjectHeader) + new_object_size);
- auto& header = HeapObjectHeader::FromPayload(object);
- const size_t old_size = header.GetSize();
+ auto& header = HeapObjectHeader::FromObject(object);
+ const size_t old_size = header.AllocatedSize();
if (new_size > old_size) {
return Grow(header, *base_page, new_size, new_size - old_size);
diff --git a/chromium/v8/src/heap/cppgc/free-list.cc b/chromium/v8/src/heap/cppgc/free-list.cc
index 705d31725ad..600e15312cd 100644
--- a/chromium/v8/src/heap/cppgc/free-list.cc
+++ b/chromium/v8/src/heap/cppgc/free-list.cc
@@ -8,9 +8,9 @@
#include "include/cppgc/internal/logging.h"
#include "src/base/bits.h"
+#include "src/base/sanitizer/asan.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/sanitizers.h"
namespace cppgc {
namespace internal {
@@ -132,7 +132,7 @@ FreeList::Block FreeList::Allocate(size_t allocation_size) {
// Final bucket candidate; check initial entry if it is able
// to service this allocation. Do not perform a linear scan,
// as it is considered too costly.
- if (!entry || entry->GetSize() < allocation_size) break;
+ if (!entry || entry->AllocatedSize() < allocation_size) break;
}
if (entry) {
if (!entry->Next()) {
@@ -141,7 +141,7 @@ FreeList::Block FreeList::Allocate(size_t allocation_size) {
}
entry->Unlink(&free_list_heads_[index]);
biggest_free_list_index_ = index;
- return {entry, entry->GetSize()};
+ return {entry, entry->AllocatedSize()};
}
}
biggest_free_list_index_ = index;
@@ -158,7 +158,7 @@ size_t FreeList::Size() const {
size_t size = 0;
for (auto* entry : free_list_heads_) {
while (entry) {
- size += entry->GetSize();
+ size += entry->AllocatedSize();
entry = entry->Next();
}
}
@@ -175,7 +175,7 @@ bool FreeList::ContainsForTesting(Block block) const {
for (Entry* entry = list; entry; entry = entry->Next()) {
if (entry <= block.address &&
(reinterpret_cast<Address>(block.address) + block.size <=
- reinterpret_cast<Address>(entry) + entry->GetSize()))
+ reinterpret_cast<Address>(entry) + entry->AllocatedSize()))
return true;
}
}
@@ -204,7 +204,7 @@ void FreeList::CollectStatistics(
size_t entry_size = 0;
for (Entry* entry = free_list_heads_[i]; entry; entry = entry->Next()) {
++entry_count;
- entry_size += entry->GetSize();
+ entry_size += entry->AllocatedSize();
}
bucket_size.push_back(static_cast<size_t>(1) << i);
free_count.push_back(entry_count);
diff --git a/chromium/v8/src/heap/cppgc/gc-info-table.cc b/chromium/v8/src/heap/cppgc/gc-info-table.cc
index 6b177848cbb..7462ba8a21d 100644
--- a/chromium/v8/src/heap/cppgc/gc-info-table.cc
+++ b/chromium/v8/src/heap/cppgc/gc-info-table.cc
@@ -35,8 +35,9 @@ PageAllocator* GetAllocator(PageAllocator* page_allocator) {
default_page_allocator;
page_allocator = default_page_allocator.get();
}
- // TODO(chromium:1056170): Wrap page_allocator into LsanPageAllocator when
- // running with LEAK_SANITIZER.
+ // No need to introduce LSAN support for PageAllocator, as `GCInfoTable` is
+ // already a leaky object and the table payload (`GCInfoTable::table_`) should
+ // not refer to dynamically allocated objects.
return page_allocator;
}
diff --git a/chromium/v8/src/heap/cppgc/heap-base.cc b/chromium/v8/src/heap/cppgc/heap-base.cc
index f89c4c9f112..c7664f09c69 100644
--- a/chromium/v8/src/heap/cppgc/heap-base.cc
+++ b/chromium/v8/src/heap/cppgc/heap-base.cc
@@ -5,8 +5,8 @@
#include "src/heap/cppgc/heap-base.h"
#include "include/cppgc/heap-consistency.h"
-#include "src/base/bounded-page-allocator.h"
#include "src/base/platform/platform.h"
+#include "src/base/sanitizer/lsan-page-allocator.h"
#include "src/heap/base/stack.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
@@ -15,6 +15,7 @@
#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/marking-verifier.h"
+#include "src/heap/cppgc/object-view.h"
#include "src/heap/cppgc/page-memory.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
@@ -35,13 +36,7 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
private:
static size_t ObjectSize(const HeapObjectHeader* header) {
- const size_t size =
- header->IsLargeObject()
- ? static_cast<const LargePage*>(BasePage::FromPayload(header))
- ->PayloadSize()
- : header->GetSize();
- DCHECK_GE(size, sizeof(HeapObjectHeader));
- return size - sizeof(HeapObjectHeader);
+ return ObjectView(*header).Size();
}
bool VisitHeapObjectHeader(HeapObjectHeader* header) {
@@ -62,13 +57,16 @@ HeapBase::HeapBase(
std::unique_ptr<MetricRecorder> histogram_recorder)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
+#if defined(LEAK_SANITIZER)
+ lsan_page_allocator_(std::make_unique<v8::base::LsanPageAllocator>(
+ platform_->GetPageAllocator())),
+#endif // LEAK_SANITIZER
#if defined(CPPGC_CAGED_HEAP)
- caged_heap_(this, platform_->GetPageAllocator()),
+ caged_heap_(this, page_allocator()),
page_backend_(std::make_unique<PageBackend>(&caged_heap_.allocator())),
-#else
- page_backend_(
- std::make_unique<PageBackend>(platform_->GetPageAllocator())),
-#endif
+#else // !CPPGC_CAGED_HEAP
+ page_backend_(std::make_unique<PageBackend>(page_allocator())),
+#endif // !CPPGC_CAGED_HEAP
stats_collector_(std::make_unique<StatsCollector>(
std::move(histogram_recorder), platform_.get())),
stack_(std::make_unique<heap::base::Stack>(
@@ -85,6 +83,14 @@ HeapBase::HeapBase(
HeapBase::~HeapBase() = default;
+PageAllocator* HeapBase::page_allocator() const {
+#if defined(LEAK_SANITIZER)
+ return lsan_page_allocator_.get();
+#else // !LEAK_SANITIZER
+ return platform_->GetPageAllocator();
+#endif // !LEAK_SANITIZER
+}
+
size_t HeapBase::ObjectPayloadSize() const {
return ObjectSizeCounter().GetSize(const_cast<RawHeap*>(&raw_heap()));
}
diff --git a/chromium/v8/src/heap/cppgc/heap-base.h b/chromium/v8/src/heap/cppgc/heap-base.h
index f9bdb95c04a..81365417180 100644
--- a/chromium/v8/src/heap/cppgc/heap-base.h
+++ b/chromium/v8/src/heap/cppgc/heap-base.h
@@ -19,6 +19,7 @@
#include "src/heap/cppgc/metric-recorder.h"
#include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/process-heap-statistics.h"
+#include "src/heap/cppgc/process-heap.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sweeper.h"
#include "v8config.h" // NOLINT(build/include_directory)
@@ -27,6 +28,12 @@
#include "src/heap/cppgc/caged-heap.h"
#endif
+namespace v8 {
+namespace base {
+class LsanPageAllocator;
+} // namespace base
+} // namespace v8
+
namespace heap {
namespace base {
class Stack;
@@ -152,6 +159,9 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
size_t ObjectPayloadSize() const;
StackSupport stack_support() const { return stack_support_; }
+ const EmbedderStackState* override_stack_state() const {
+ return override_stack_state_.get();
+ }
void AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
@@ -172,6 +182,11 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
stack_state_of_prev_gc_ = stack_state;
}
+ uintptr_t stack_end_of_current_gc() const { return stack_end_of_current_gc_; }
+ void SetStackEndOfCurrentGC(uintptr_t stack_end) {
+ stack_end_of_current_gc_ = stack_end;
+ }
+
void SetInAtomicPauseForTesting(bool value) { in_atomic_pause_ = value; }
virtual void StartIncrementalGarbageCollectionForTesting() = 0;
@@ -189,11 +204,20 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
void ExecutePreFinalizers();
+ PageAllocator* page_allocator() const;
+
RawHeap raw_heap_;
std::shared_ptr<cppgc::Platform> platform_;
+
+#if defined(LEAK_SANITIZER)
+ std::unique_ptr<v8::base::LsanPageAllocator> lsan_page_allocator_;
+#endif // LEAK_SANITIZER
+
+ HeapRegistry::Subscription heap_registry_subscription_{*this};
+
#if defined(CPPGC_CAGED_HEAP)
CagedHeap caged_heap_;
-#endif
+#endif // CPPGC_CAGED_HEAP
std::unique_ptr<PageBackend> page_backend_;
std::unique_ptr<StatsCollector> stats_collector_;
@@ -224,6 +248,10 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
EmbedderStackState::kNoHeapPointers;
std::unique_ptr<EmbedderStackState> override_stack_state_;
+ // Marker that signals end of the interesting stack region in which on-heap
+ // pointers can be found.
+ uintptr_t stack_end_of_current_gc_ = 0;
+
bool in_atomic_pause_ = false;
friend class MarkerBase::IncrementalMarkingTask;
diff --git a/chromium/v8/src/heap/cppgc/heap-object-header.cc b/chromium/v8/src/heap/cppgc/heap-object-header.cc
index 0f5530114cb..5ff0e230e7f 100644
--- a/chromium/v8/src/heap/cppgc/heap-object-header.cc
+++ b/chromium/v8/src/heap/cppgc/heap-object-header.cc
@@ -6,9 +6,9 @@
#include "include/cppgc/internal/api-constants.h"
#include "src/base/macros.h"
+#include "src/base/sanitizer/asan.h"
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/heap-page.h"
-#include "src/heap/cppgc/sanitizers.h"
namespace cppgc {
namespace internal {
@@ -28,17 +28,17 @@ void HeapObjectHeader::Finalize() {
IsLargeObject()
? LargePage::From(BasePage::FromPayload(this))->ObjectSize()
: ObjectSize();
- ASAN_UNPOISON_MEMORY_REGION(Payload(), size);
+ ASAN_UNPOISON_MEMORY_REGION(ObjectStart(), size);
#endif // V8_USE_ADDRESS_SANITIZER
const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
if (gc_info.finalize) {
- gc_info.finalize(Payload());
+ gc_info.finalize(ObjectStart());
}
}
HeapObjectName HeapObjectHeader::GetName() const {
const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
- return gc_info.name(Payload());
+ return gc_info.name(ObjectStart());
}
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/heap-object-header.h b/chromium/v8/src/heap/cppgc/heap-object-header.h
index 7e940ca3477..a50d115e52b 100644
--- a/chromium/v8/src/heap/cppgc/heap-object-header.h
+++ b/chromium/v8/src/heap/cppgc/heap-object-header.h
@@ -57,22 +57,22 @@ class HeapObjectHeader {
static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1;
static constexpr uint16_t kLargeObjectSizeInHeader = 0;
- inline static HeapObjectHeader& FromPayload(void* address);
- inline static const HeapObjectHeader& FromPayload(const void* address);
+ inline static HeapObjectHeader& FromObject(void* address);
+ inline static const HeapObjectHeader& FromObject(const void* address);
inline HeapObjectHeader(size_t size, GCInfoIndex gc_info_index);
- // The payload starts directly after the HeapObjectHeader.
- inline Address Payload() const;
+ // The object starts directly after the HeapObjectHeader.
+ inline Address ObjectStart() const;
template <AccessMode mode = AccessMode::kNonAtomic>
- inline Address PayloadEnd() const;
+ inline Address ObjectEnd() const;
template <AccessMode mode = AccessMode::kNonAtomic>
inline GCInfoIndex GetGCInfoIndex() const;
template <AccessMode mode = AccessMode::kNonAtomic>
- inline size_t GetSize() const;
- inline void SetSize(size_t size);
+ inline size_t AllocatedSize() const;
+ inline void SetAllocatedSize(size_t size);
template <AccessMode mode = AccessMode::kNonAtomic>
inline size_t ObjectSize() const;
@@ -149,15 +149,15 @@ static_assert(kAllocationGranularity == sizeof(HeapObjectHeader),
"guarantee alignment");
// static
-HeapObjectHeader& HeapObjectHeader::FromPayload(void* payload) {
- return *reinterpret_cast<HeapObjectHeader*>(static_cast<Address>(payload) -
+HeapObjectHeader& HeapObjectHeader::FromObject(void* object) {
+ return *reinterpret_cast<HeapObjectHeader*>(static_cast<Address>(object) -
sizeof(HeapObjectHeader));
}
// static
-const HeapObjectHeader& HeapObjectHeader::FromPayload(const void* payload) {
+const HeapObjectHeader& HeapObjectHeader::FromObject(const void* object) {
return *reinterpret_cast<const HeapObjectHeader*>(
- static_cast<ConstAddress>(payload) - sizeof(HeapObjectHeader));
+ static_cast<ConstAddress>(object) - sizeof(HeapObjectHeader));
}
HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
@@ -183,16 +183,16 @@ HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
#endif // DEBUG
}
-Address HeapObjectHeader::Payload() const {
+Address HeapObjectHeader::ObjectStart() const {
return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
sizeof(HeapObjectHeader);
}
template <AccessMode mode>
-Address HeapObjectHeader::PayloadEnd() const {
+Address HeapObjectHeader::ObjectEnd() const {
DCHECK(!IsLargeObject());
return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
- GetSize<mode>();
+ AllocatedSize<mode>();
}
template <AccessMode mode>
@@ -203,7 +203,7 @@ GCInfoIndex HeapObjectHeader::GetGCInfoIndex() const {
}
template <AccessMode mode>
-size_t HeapObjectHeader::GetSize() const {
+size_t HeapObjectHeader::AllocatedSize() const {
// Size is immutable after construction while either marking or sweeping
// is running so relaxed load (if mode == kAtomic) is enough.
uint16_t encoded_low_value =
@@ -212,19 +212,21 @@ size_t HeapObjectHeader::GetSize() const {
return size;
}
-void HeapObjectHeader::SetSize(size_t size) {
+void HeapObjectHeader::SetAllocatedSize(size_t size) {
DCHECK(!IsMarked());
encoded_low_ = EncodeSize(size);
}
template <AccessMode mode>
size_t HeapObjectHeader::ObjectSize() const {
- return GetSize<mode>() - sizeof(HeapObjectHeader);
+ // The following DCHECK also fails for large objects.
+ DCHECK_GT(AllocatedSize<mode>(), sizeof(HeapObjectHeader));
+ return AllocatedSize<mode>() - sizeof(HeapObjectHeader);
}
template <AccessMode mode>
bool HeapObjectHeader::IsLargeObject() const {
- return GetSize<mode>() == kLargeObjectSizeInHeader;
+ return AllocatedSize<mode>() == kLargeObjectSizeInHeader;
}
template <AccessMode mode>
@@ -235,7 +237,8 @@ bool HeapObjectHeader::IsInConstruction() const {
}
void HeapObjectHeader::MarkAsFullyConstructed() {
- MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(Payload());
+ MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(
+ ObjectStart());
}
template <AccessMode mode>
@@ -282,7 +285,7 @@ template <AccessMode mode>
void HeapObjectHeader::Trace(Visitor* visitor) const {
const GCInfo& gc_info =
GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex<mode>());
- return gc_info.trace(visitor, Payload());
+ return gc_info.trace(visitor, ObjectStart());
}
template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
diff --git a/chromium/v8/src/heap/cppgc/heap-page.cc b/chromium/v8/src/heap/cppgc/heap-page.cc
index d573d675ee4..f65b3fed9b8 100644
--- a/chromium/v8/src/heap/cppgc/heap-page.cc
+++ b/chromium/v8/src/heap/cppgc/heap-page.cc
@@ -69,6 +69,11 @@ ConstAddress BasePage::PayloadEnd() const {
return const_cast<BasePage*>(this)->PayloadEnd();
}
+size_t BasePage::AllocatedBytesAtLastGC() const {
+ return is_large() ? LargePage::From(this)->AllocatedBytesAtLastGC()
+ : NormalPage::From(this)->AllocatedBytesAtLastGC();
+}
+
HeapObjectHeader* BasePage::TryObjectHeaderFromInnerAddress(
void* address) const {
return const_cast<HeapObjectHeader*>(
diff --git a/chromium/v8/src/heap/cppgc/heap-page.h b/chromium/v8/src/heap/cppgc/heap-page.h
index 1a66b8593e6..39d5e644ee3 100644
--- a/chromium/v8/src/heap/cppgc/heap-page.h
+++ b/chromium/v8/src/heap/cppgc/heap-page.h
@@ -46,6 +46,10 @@ class V8_EXPORT_PRIVATE BasePage {
Address PayloadEnd();
ConstAddress PayloadEnd() const;
+ // Returns the size of live objects on the page at the last GC.
+ // The counter is update after sweeping.
+ size_t AllocatedBytesAtLastGC() const;
+
// |address| must refer to real object.
template <AccessMode = AccessMode::kNonAtomic>
HeapObjectHeader& ObjectHeaderFromInnerAddress(void* address) const;
@@ -107,7 +111,7 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
bool operator!=(IteratorImpl other) const { return !(*this == other); }
IteratorImpl& operator++() {
- const size_t size = p_->GetSize();
+ const size_t size = p_->AllocatedSize();
DCHECK_EQ(0, (size & (sizeof(T) - 1)));
p_ += (size / sizeof(T));
if (reinterpret_cast<ConstAddress>(p_) == lab_start_) {
@@ -169,6 +173,12 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
return (PayloadStart() <= address) && (address < PayloadEnd());
}
+ size_t AllocatedBytesAtLastGC() const { return allocated_bytes_at_last_gc_; }
+
+ void SetAllocatedBytesAtLastGC(size_t bytes) {
+ allocated_bytes_at_last_gc_ = bytes;
+ }
+
PlatformAwareObjectStartBitmap& object_start_bitmap() {
return object_start_bitmap_;
}
@@ -180,6 +190,7 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
NormalPage(HeapBase* heap, BaseSpace* space);
~NormalPage();
+ size_t allocated_bytes_at_last_gc_ = 0;
PlatformAwareObjectStartBitmap object_start_bitmap_;
};
@@ -210,7 +221,12 @@ class V8_EXPORT_PRIVATE LargePage final : public BasePage {
ConstAddress PayloadEnd() const;
size_t PayloadSize() const { return payload_size_; }
- size_t ObjectSize() const { return payload_size_ - sizeof(HeapObjectHeader); }
+ size_t ObjectSize() const {
+ DCHECK_GT(payload_size_, sizeof(HeapObjectHeader));
+ return payload_size_ - sizeof(HeapObjectHeader);
+ }
+
+ size_t AllocatedBytesAtLastGC() const { return ObjectSize(); }
bool PayloadContains(ConstAddress address) const {
return (PayloadStart() <= address) && (address < PayloadEnd());
@@ -248,7 +264,7 @@ const HeapObjectHeader* ObjectHeaderFromInnerAddressImpl(const BasePage* page,
const HeapObjectHeader* header =
bitmap.FindHeader<mode>(static_cast<ConstAddress>(address));
DCHECK_LT(address, reinterpret_cast<ConstAddress>(header) +
- header->GetSize<AccessMode::kAtomic>());
+ header->AllocatedSize<AccessMode::kAtomic>());
return header;
}
diff --git a/chromium/v8/src/heap/cppgc/heap-statistics-collector.cc b/chromium/v8/src/heap/cppgc/heap-statistics-collector.cc
index 961148babd8..ef283e856ad 100644
--- a/chromium/v8/src/heap/cppgc/heap-statistics-collector.cc
+++ b/chromium/v8/src/heap/cppgc/heap-statistics-collector.cc
@@ -148,7 +148,7 @@ bool HeapStatisticsCollector::VisitHeapObjectHeader(HeapObjectHeader* header) {
DCHECK_NOT_NULL(current_space_stats_);
DCHECK_NOT_NULL(current_page_stats_);
if (header->IsFree()) return true;
- size_t object_size = header->GetSize();
+ size_t object_size = header->AllocatedSize();
RecordObjectType(current_space_stats_, header, object_size);
current_page_stats_->used_size_bytes += object_size;
return true;
diff --git a/chromium/v8/src/heap/cppgc/heap.cc b/chromium/v8/src/heap/cppgc/heap.cc
index c0c9cec9292..dc127f8e51c 100644
--- a/chromium/v8/src/heap/cppgc/heap.cc
+++ b/chromium/v8/src/heap/cppgc/heap.cc
@@ -176,6 +176,7 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
if (override_stack_state_) {
config_.stack_state = *override_stack_state_;
}
+ SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
in_atomic_pause_ = true;
{
// This guards atomic pause marking, meaning that no internal method or
@@ -188,7 +189,8 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
MarkingVerifier verifier(*this);
- verifier.Run(config_.stack_state);
+ verifier.Run(config_.stack_state, stack_end_of_current_gc(),
+ stats_collector()->marked_bytes());
#endif
subtle::NoGarbageCollectionScope no_gc(*this);
diff --git a/chromium/v8/src/heap/cppgc/liveness-broker.cc b/chromium/v8/src/heap/cppgc/liveness-broker.cc
index 8c61268ba81..d3dd457e056 100644
--- a/chromium/v8/src/heap/cppgc/liveness-broker.cc
+++ b/chromium/v8/src/heap/cppgc/liveness-broker.cc
@@ -9,7 +9,7 @@
namespace cppgc {
bool LivenessBroker::IsHeapObjectAliveImpl(const void* payload) const {
- return internal::HeapObjectHeader::FromPayload(payload).IsMarked();
+ return internal::HeapObjectHeader::FromObject(payload).IsMarked();
}
namespace internal {
diff --git a/chromium/v8/src/heap/cppgc/marker.cc b/chromium/v8/src/heap/cppgc/marker.cc
index d30bb0a8ec2..d26fd580df9 100644
--- a/chromium/v8/src/heap/cppgc/marker.cc
+++ b/chromium/v8/src/heap/cppgc/marker.cc
@@ -191,7 +191,7 @@ MarkerBase::~MarkerBase() {
MarkingWorklists::EphemeronPairItem item;
while (mutator_marking_state_.discovered_ephemeron_pairs_worklist().Pop(
&item)) {
- DCHECK(!HeapObjectHeader::FromPayload(item.key).IsMarked());
+ DCHECK(!HeapObjectHeader::FromObject(item.key).IsMarked());
}
#else
marking_worklists_.discovered_ephemeron_pairs_worklist()->Clear();
@@ -231,6 +231,8 @@ void MarkerBase::StartMarking() {
}
void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
+ StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
+ StatsCollector::kAtomicMark);
StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
StatsCollector::kMarkAtomicPrologue);
@@ -261,30 +263,38 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
}
void MarkerBase::LeaveAtomicPause() {
- StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
- StatsCollector::kMarkAtomicEpilogue);
- DCHECK(!incremental_marking_handle_);
- ResetRememberedSet(heap());
- heap().stats_collector()->NotifyMarkingCompleted(
- // GetOverallMarkedBytes also includes concurrently marked bytes.
- schedule_.GetOverallMarkedBytes());
- is_marking_ = false;
+ {
+ StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
+ StatsCollector::kAtomicMark);
+ StatsCollector::EnabledScope stats_scope(
+ heap().stats_collector(), StatsCollector::kMarkAtomicEpilogue);
+ DCHECK(!incremental_marking_handle_);
+ ResetRememberedSet(heap());
+ heap().stats_collector()->NotifyMarkingCompleted(
+ // GetOverallMarkedBytes also includes concurrently marked bytes.
+ schedule_.GetOverallMarkedBytes());
+ is_marking_ = false;
+ }
{
// Weakness callbacks are forbidden from allocating objects.
cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(heap_);
ProcessWeakness();
}
+ // TODO(chromium:1056170): It would be better if the call to Unlock was
+ // covered by some cppgc scope.
g_process_mutex.Pointer()->Unlock();
heap().SetStackStateOfPrevGC(config_.stack_state);
}
void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
DCHECK(is_marking_);
- StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
- StatsCollector::kAtomicMark);
EnterAtomicPause(stack_state);
- CHECK(AdvanceMarkingWithLimits(v8::base::TimeDelta::Max(), SIZE_MAX));
- mutator_marking_state_.Publish();
+ {
+ StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
+ StatsCollector::kAtomicMark);
+ CHECK(AdvanceMarkingWithLimits(v8::base::TimeDelta::Max(), SIZE_MAX));
+ mutator_marking_state_.Publish();
+ }
LeaveAtomicPause();
}
@@ -367,6 +377,10 @@ bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
}
void MarkerBase::AdvanceMarkingOnAllocation() {
+ StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
+ StatsCollector::kIncrementalMark);
+ StatsCollector::EnabledScope nested_scope(heap().stats_collector(),
+ StatsCollector::kMarkOnAllocation);
if (AdvanceMarkingWithLimits()) {
// Schedule another incremental task for finalizing without a stack.
ScheduleIncrementalMarkingTask();
@@ -458,7 +472,7 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
mutator_marking_state_.marking_worklist(),
[this](const MarkingWorklists::MarkingItem& item) {
const HeapObjectHeader& header =
- HeapObjectHeader::FromPayload(item.base_object_payload);
+ HeapObjectHeader::FromObject(item.base_object_payload);
DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
DCHECK(header.IsMarked<AccessMode::kNonAtomic>());
mutator_marking_state_.AccountMarkedBytes(header);
diff --git a/chromium/v8/src/heap/cppgc/marking-state.h b/chromium/v8/src/heap/cppgc/marking-state.h
index 6e08fc3e10e..c4627463338 100644
--- a/chromium/v8/src/heap/cppgc/marking-state.h
+++ b/chromium/v8/src/heap/cppgc/marking-state.h
@@ -174,9 +174,9 @@ MarkingStateBase::MarkingStateBase(HeapBase& heap,
void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) {
DCHECK_NOT_NULL(object);
- MarkAndPush(HeapObjectHeader::FromPayload(
- const_cast<void*>(desc.base_object_payload)),
- desc);
+ MarkAndPush(
+ HeapObjectHeader::FromObject(const_cast<void*>(desc.base_object_payload)),
+ desc);
}
void MarkingStateBase::MarkAndPush(HeapObjectHeader& header,
@@ -202,7 +202,7 @@ bool MarkingStateBase::MarkNoPush(HeapObjectHeader& header) {
void MarkingStateBase::MarkAndPush(HeapObjectHeader& header) {
MarkAndPush(
header,
- {header.Payload(),
+ {header.ObjectStart(),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
@@ -222,7 +222,7 @@ void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
// Filter out already marked values. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
- if (HeapObjectHeader::FromPayload(desc.base_object_payload)
+ if (HeapObjectHeader::FromObject(desc.base_object_payload)
.IsMarked<AccessMode::kAtomic>())
return;
RegisterWeakCallback(weak_callback, parameter);
@@ -245,7 +245,7 @@ void MarkingStateBase::ProcessWeakContainer(const void* object,
DCHECK_NOT_NULL(object);
HeapObjectHeader& header =
- HeapObjectHeader::FromPayload(const_cast<void*>(object));
+ HeapObjectHeader::FromObject(const_cast<void*>(object));
if (header.IsInConstruction<AccessMode::kAtomic>()) {
not_fully_constructed_worklist_.Push<AccessMode::kAtomic>(&header);
@@ -255,6 +255,7 @@ void MarkingStateBase::ProcessWeakContainer(const void* object,
// Only mark the container initially. Its buckets will be processed after
// marking.
if (!MarkNoPush(header)) return;
+
RegisterWeakContainer(header);
// Register final weak processing of the backing store.
@@ -264,7 +265,13 @@ void MarkingStateBase::ProcessWeakContainer(const void* object,
// the TraceDescriptor will be nullptr. For ephemerons the callback will be
// non-nullptr so that the container is traced and the ephemeron pairs are
// processed.
- if (desc.callback) PushMarked(header, desc);
+ if (desc.callback) {
+ PushMarked(header, desc);
+ } else {
+ // For weak containers, there's no trace callback and no processing loop to
+ // update the marked bytes, hence inline that here.
+ AccountMarkedBytes(header);
+ }
}
void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
@@ -273,7 +280,7 @@ void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
// Filter out already marked keys. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
- if (HeapObjectHeader::FromPayload(key).IsMarked<AccessMode::kAtomic>()) {
+ if (HeapObjectHeader::FromObject(key).IsMarked<AccessMode::kAtomic>()) {
if (value_desc.base_object_payload) {
MarkAndPush(value_desc.base_object_payload, value_desc);
} else {
@@ -291,7 +298,7 @@ void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
header.IsLargeObject<AccessMode::kAtomic>()
? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
->PayloadSize()
- : header.GetSize<AccessMode::kAtomic>());
+ : header.AllocatedSize<AccessMode::kAtomic>());
}
void MarkingStateBase::AccountMarkedBytes(size_t marked_bytes) {
@@ -308,7 +315,7 @@ class MutatorMarkingState : public MarkingStateBase {
return MutatorMarkingState::MarkingStateBase::MarkNoPush(header);
}
- inline void PushMarkedWeakContainer(HeapObjectHeader&);
+ inline void ReTraceMarkedWeakContainer(cppgc::Visitor&, HeapObjectHeader&);
inline void DynamicallyMarkAddress(ConstAddress);
@@ -343,13 +350,13 @@ class MutatorMarkingState : public MarkingStateBase {
} recently_retraced_weak_containers_;
};
-void MutatorMarkingState::PushMarkedWeakContainer(HeapObjectHeader& header) {
+void MutatorMarkingState::ReTraceMarkedWeakContainer(cppgc::Visitor& visitor,
+ HeapObjectHeader& header) {
DCHECK(weak_containers_worklist_.Contains(&header));
recently_retraced_weak_containers_.Insert(&header);
- PushMarked(
- header,
- {header.Payload(),
- GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
+ // Don't push to the marking worklist to avoid double accounting of marked
+ // bytes as the container is already accounted for.
+ header.Trace(&visitor);
}
void MutatorMarkingState::DynamicallyMarkAddress(ConstAddress address) {
@@ -359,7 +366,7 @@ void MutatorMarkingState::DynamicallyMarkAddress(ConstAddress address) {
DCHECK(!header.IsInConstruction());
if (MarkNoPush(header)) {
marking_worklist_.Push(
- {reinterpret_cast<void*>(header.Payload()),
+ {reinterpret_cast<void*>(header.ObjectStart()),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
}
@@ -371,7 +378,7 @@ void MutatorMarkingState::InvokeWeakRootsCallbackIfNeeded(
// the callback instead of registering it.
#if DEBUG
const HeapObjectHeader& header =
- HeapObjectHeader::FromPayload(desc.base_object_payload);
+ HeapObjectHeader::FromObject(desc.base_object_payload);
DCHECK_IMPLIES(header.IsInConstruction(), header.IsMarked());
#endif // DEBUG
weak_callback(LivenessBrokerFactory::Create(), parameter);
diff --git a/chromium/v8/src/heap/cppgc/marking-verifier.cc b/chromium/v8/src/heap/cppgc/marking-verifier.cc
index 42e3c4eb3e8..2bbf8878e42 100644
--- a/chromium/v8/src/heap/cppgc/marking-verifier.cc
+++ b/chromium/v8/src/heap/cppgc/marking-verifier.cc
@@ -9,50 +9,81 @@
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/object-view.h"
namespace cppgc {
namespace internal {
MarkingVerifierBase::MarkingVerifierBase(
- HeapBase& heap, std::unique_ptr<cppgc::Visitor> visitor)
+ HeapBase& heap, VerificationState& verification_state,
+ std::unique_ptr<cppgc::Visitor> visitor)
: ConservativeTracingVisitor(heap, *heap.page_backend(), *visitor.get()),
+ verification_state_(verification_state),
visitor_(std::move(visitor)) {}
-void MarkingVerifierBase::Run(Heap::Config::StackState stack_state) {
+void MarkingVerifierBase::Run(Heap::Config::StackState stack_state,
+ uintptr_t stack_end,
+ size_t expected_marked_bytes) {
Traverse(&heap_.raw_heap());
if (stack_state == Heap::Config::StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
- heap_.stack()->IteratePointers(this);
- CHECK_EQ(in_construction_objects_stack_, in_construction_objects_heap_);
+ heap_.stack()->IteratePointersUnsafe(this, stack_end);
+ // The objects found through the unsafe iteration are only a subset of the
+ // regular iteration as they miss objects held alive only from callee-saved
+ // registers that are never pushed on the stack and SafeStack.
+ CHECK_LE(in_construction_objects_stack_.size(),
+ in_construction_objects_heap_.size());
+ for (auto* header : in_construction_objects_stack_) {
+ CHECK_NE(in_construction_objects_heap_.end(),
+ in_construction_objects_heap_.find(header));
+ }
}
+#ifdef CPPGC_VERIFY_LIVE_BYTES
+ CHECK_EQ(expected_marked_bytes, found_marked_bytes_);
+#endif // CPPGC_VERIFY_LIVE_BYTES
}
void VerificationState::VerifyMarked(const void* base_object_payload) const {
const HeapObjectHeader& child_header =
- HeapObjectHeader::FromPayload(base_object_payload);
+ HeapObjectHeader::FromObject(base_object_payload);
if (!child_header.IsMarked()) {
FATAL(
"MarkingVerifier: Encountered unmarked object.\n"
"#\n"
"# Hint:\n"
- "# %s\n"
- "# \\-> %s",
- parent_->GetName().value, child_header.GetName().value);
+ "# %s (%p)\n"
+ "# \\-> %s (%p)",
+ parent_ ? parent_->GetName().value : "Stack",
+ parent_ ? parent_->ObjectStart() : nullptr,
+ child_header.GetName().value, child_header.ObjectStart());
}
}
void MarkingVerifierBase::VisitInConstructionConservatively(
HeapObjectHeader& header, TraceConservativelyCallback callback) {
- CHECK(header.IsMarked());
if (in_construction_objects_->find(&header) !=
in_construction_objects_->end())
return;
in_construction_objects_->insert(&header);
+
+ // Stack case: Parent is stack and this is merely ensuring that the object
+ // itself is marked. If the object is marked, then it is being processed by
+ // the on-heap phase.
+ if (verification_state_.IsParentOnStack()) {
+ verification_state_.VerifyMarked(header.ObjectStart());
+ return;
+ }
+
+ // Heap case: Dispatching parent object that must be marked (pre-condition).
+ CHECK(header.IsMarked());
callback(this, header);
}
void MarkingVerifierBase::VisitPointer(const void* address) {
+ // Entry point for stack walk. The conservative visitor dispatches as follows:
+ // - Fully constructed objects: Visit()
+ // - Objects in construction: VisitInConstructionConservatively()
TraceConservativelyIfNeeded(address);
}
@@ -62,7 +93,7 @@ bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader* header) {
DCHECK(!header->IsFree());
- SetCurrentParent(header);
+ verification_state_.SetCurrentParent(header);
if (!header->IsInConstruction()) {
header->Trace(visitor_.get());
@@ -71,6 +102,10 @@ bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader* header) {
TraceConservativelyIfNeeded(*header);
}
+ found_marked_bytes_ += ObjectView(*header).Size() + sizeof(HeapObjectHeader);
+
+ verification_state_.SetCurrentParent(nullptr);
+
return true;
}
@@ -112,12 +147,8 @@ class VerificationVisitor final : public cppgc::Visitor {
} // namespace
MarkingVerifier::MarkingVerifier(HeapBase& heap_base)
- : MarkingVerifierBase(heap_base,
+ : MarkingVerifierBase(heap_base, state_,
std::make_unique<VerificationVisitor>(state_)) {}
-void MarkingVerifier::SetCurrentParent(const HeapObjectHeader* parent) {
- state_.SetCurrentParent(parent);
-}
-
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/marking-verifier.h b/chromium/v8/src/heap/cppgc/marking-verifier.h
index eeced684497..95475f5191a 100644
--- a/chromium/v8/src/heap/cppgc/marking-verifier.h
+++ b/chromium/v8/src/heap/cppgc/marking-verifier.h
@@ -21,6 +21,9 @@ class VerificationState {
void VerifyMarked(const void*) const;
void SetCurrentParent(const HeapObjectHeader* header) { parent_ = header; }
+ // No parent means parent was on stack.
+ bool IsParentOnStack() const { return !parent_; }
+
private:
const HeapObjectHeader* parent_ = nullptr;
};
@@ -37,12 +40,11 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
MarkingVerifierBase(const MarkingVerifierBase&) = delete;
MarkingVerifierBase& operator=(const MarkingVerifierBase&) = delete;
- void Run(Heap::Config::StackState);
+ void Run(Heap::Config::StackState, uintptr_t, size_t);
protected:
- MarkingVerifierBase(HeapBase&, std::unique_ptr<cppgc::Visitor>);
-
- virtual void SetCurrentParent(const HeapObjectHeader*) = 0;
+ MarkingVerifierBase(HeapBase&, VerificationState&,
+ std::unique_ptr<cppgc::Visitor>);
private:
void VisitInConstructionConservatively(HeapObjectHeader&,
@@ -51,12 +53,14 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
bool VisitHeapObjectHeader(HeapObjectHeader*);
+ VerificationState& verification_state_;
std::unique_ptr<cppgc::Visitor> visitor_;
std::unordered_set<const HeapObjectHeader*> in_construction_objects_heap_;
std::unordered_set<const HeapObjectHeader*> in_construction_objects_stack_;
std::unordered_set<const HeapObjectHeader*>* in_construction_objects_ =
&in_construction_objects_heap_;
+ size_t found_marked_bytes_ = 0;
};
class V8_EXPORT_PRIVATE MarkingVerifier final : public MarkingVerifierBase {
@@ -64,8 +68,6 @@ class V8_EXPORT_PRIVATE MarkingVerifier final : public MarkingVerifierBase {
explicit MarkingVerifier(HeapBase&);
~MarkingVerifier() final = default;
- void SetCurrentParent(const HeapObjectHeader*) final;
-
private:
VerificationState state_;
};
diff --git a/chromium/v8/src/heap/cppgc/marking-visitor.cc b/chromium/v8/src/heap/cppgc/marking-visitor.cc
index fb51ccc303e..a740d33a841 100644
--- a/chromium/v8/src/heap/cppgc/marking-visitor.cc
+++ b/chromium/v8/src/heap/cppgc/marking-visitor.cc
@@ -56,7 +56,7 @@ void ConservativeMarkingVisitor::VisitFullyConstructedConservatively(
HeapObjectHeader& header) {
if (header.IsMarked()) {
if (marking_state_.IsMarkedWeakContainer(header))
- marking_state_.PushMarkedWeakContainer(header);
+ marking_state_.ReTraceMarkedWeakContainer(visitor_, header);
return;
}
ConservativeTracingVisitor::VisitFullyConstructedConservatively(header);
diff --git a/chromium/v8/src/heap/cppgc/memory.cc b/chromium/v8/src/heap/cppgc/memory.cc
new file mode 100644
index 00000000000..aa3baeaa8a0
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/memory.cc
@@ -0,0 +1,22 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/memory.h"
+
+#include <cstddef>
+
+#include "src/heap/cppgc/globals.h"
+
+namespace cppgc {
+namespace internal {
+
+void NoSanitizeMemset(void* address, char c, size_t bytes) {
+ volatile Address base = reinterpret_cast<Address>(address);
+ for (size_t i = 0; i < bytes; ++i) {
+ base[i] = c;
+ }
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/memory.h b/chromium/v8/src/heap/cppgc/memory.h
new file mode 100644
index 00000000000..d31af33ee3f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/memory.h
@@ -0,0 +1,76 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_MEMORY_H_
+#define V8_HEAP_CPPGC_MEMORY_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#include "src/base/macros.h"
+#include "src/base/sanitizer/asan.h"
+#include "src/base/sanitizer/msan.h"
+
+namespace cppgc {
+namespace internal {
+
+V8_NOINLINE DISABLE_ASAN void NoSanitizeMemset(void* address, char c,
+ size_t bytes);
+
+inline void ZapMemory(void* address, size_t size) {
+ // The lowest bit of the zapped value should be 0 so that zapped object are
+ // never viewed as fully constructed objects.
+ static constexpr uint8_t kZappedValue = 0xdc;
+ memset(address, kZappedValue, size);
+}
+
+// Together `SetMemoryAccessible()` and `SetMemoryInaccessible()` form the
+// memory access model for allocation and free.
+V8_INLINE void SetMemoryAccessible(void* address, size_t size) {
+#if defined(V8_USE_MEMORY_SANITIZER)
+
+ MSAN_MEMORY_IS_INITIALIZED(address, size);
+
+#elif defined(V8_USE_ADDRESS_SANITIZER)
+
+ ASAN_UNPOISON_MEMORY_REGION(address, size);
+
+#elif DEBUG
+
+ memset(address, 0, size);
+
+#else // Release builds.
+
+ // Nothing to be done for release builds.
+
+#endif // Release builds.
+}
+
+V8_INLINE void SetMemoryInaccessible(void* address, size_t size) {
+#if defined(V8_USE_MEMORY_SANITIZER)
+
+ memset(address, 0, size);
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(address, size);
+
+#elif defined(V8_USE_ADDRESS_SANITIZER)
+
+ NoSanitizeMemset(address, 0, size);
+ ASAN_POISON_MEMORY_REGION(address, size);
+
+#elif DEBUG
+
+ ::cppgc::internal::ZapMemory(address, size);
+
+#else // Release builds.
+
+ memset(address, 0, size);
+
+#endif // Release builds.
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_MEMORY_H_
diff --git a/chromium/v8/src/heap/cppgc/metric-recorder.h b/chromium/v8/src/heap/cppgc/metric-recorder.h
index 6e9d4d0787c..6118627d01d 100644
--- a/chromium/v8/src/heap/cppgc/metric-recorder.h
+++ b/chromium/v8/src/heap/cppgc/metric-recorder.h
@@ -14,7 +14,7 @@ class StatsCollector;
/**
* Base class used for reporting GC statistics histograms. Embedders interested
- * in collecting histgorams should implement the virtual AddMainThreadEvent
+ * in collecting histograms should implement the virtual AddMainThreadEvent
* methods below and pass an instance of the implementation during Heap
* creation.
*/
diff --git a/chromium/v8/src/heap/cppgc/object-allocator.cc b/chromium/v8/src/heap/cppgc/object-allocator.cc
index 366900b0f92..1197356c29d 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator.cc
+++ b/chromium/v8/src/heap/cppgc/object-allocator.cc
@@ -95,7 +95,7 @@ void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
stats_collector->NotifyAllocation(size);
MarkRangeAsYoung(page, page->PayloadStart(), page->PayloadEnd());
- return header->Payload();
+ return header->ObjectStart();
}
} // namespace
diff --git a/chromium/v8/src/heap/cppgc/object-allocator.h b/chromium/v8/src/heap/cppgc/object-allocator.h
index 56faef1c833..dd99d83ba56 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator.h
+++ b/chromium/v8/src/heap/cppgc/object-allocator.h
@@ -12,9 +12,9 @@
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/memory.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/raw-heap.h"
-#include "src/heap/cppgc/sanitizers.h"
namespace cppgc {
@@ -111,10 +111,10 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace* space,
#if !defined(V8_USE_MEMORY_SANITIZER) && !defined(V8_USE_ADDRESS_SANITIZER) && \
DEBUG
// For debug builds, unzap only the payload.
- SET_MEMORY_ACCESSIBLE(static_cast<char*>(raw) + sizeof(HeapObjectHeader),
- size - sizeof(HeapObjectHeader));
+ SetMemoryAccessible(static_cast<char*>(raw) + sizeof(HeapObjectHeader),
+ size - sizeof(HeapObjectHeader));
#else
- SET_MEMORY_ACCESSIBLE(raw, size);
+ SetMemoryAccessible(raw, size);
#endif
auto* header = new (raw) HeapObjectHeader(size, gcinfo);
@@ -123,7 +123,7 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace* space,
->object_start_bitmap()
.SetBit<AccessMode::kAtomic>(reinterpret_cast<ConstAddress>(header));
- return header->Payload();
+ return header->ObjectStart();
}
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/object-poisoner.h b/chromium/v8/src/heap/cppgc/object-poisoner.h
index fd2462d6694..632dea9b9d7 100644
--- a/chromium/v8/src/heap/cppgc/object-poisoner.h
+++ b/chromium/v8/src/heap/cppgc/object-poisoner.h
@@ -5,10 +5,10 @@
#ifndef V8_HEAP_CPPGC_OBJECT_POISONER_H_
#define V8_HEAP_CPPGC_OBJECT_POISONER_H_
+#include "src/base/sanitizer/asan.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-visitor.h"
-#include "src/heap/cppgc/sanitizers.h"
namespace cppgc {
namespace internal {
@@ -27,7 +27,7 @@ class UnmarkedObjectsPoisoner : public HeapVisitor<UnmarkedObjectsPoisoner> {
header->IsLargeObject()
? LargePage::From(BasePage::FromPayload(header))->ObjectSize()
: header->ObjectSize();
- ASAN_POISON_MEMORY_REGION(header->Payload(), size);
+ ASAN_POISON_MEMORY_REGION(header->ObjectStart(), size);
return true;
}
};
diff --git a/chromium/v8/src/heap/cppgc/object-size-trait.cc b/chromium/v8/src/heap/cppgc/object-size-trait.cc
index 11c50b3c4d1..7b82239a610 100644
--- a/chromium/v8/src/heap/cppgc/object-size-trait.cc
+++ b/chromium/v8/src/heap/cppgc/object-size-trait.cc
@@ -6,6 +6,7 @@
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/object-view.h"
namespace cppgc {
namespace internal {
@@ -13,11 +14,7 @@ namespace internal {
// static
size_t BaseObjectSizeTrait::GetObjectSizeForGarbageCollected(
const void* object) {
- const auto& header = HeapObjectHeader::FromPayload(object);
- return header.IsLargeObject()
- ? static_cast<const LargePage*>(BasePage::FromPayload(&header))
- ->ObjectSize()
- : header.ObjectSize();
+ return ObjectView(HeapObjectHeader::FromObject(object)).Size();
}
// static
diff --git a/chromium/v8/src/heap/cppgc/object-view.h b/chromium/v8/src/heap/cppgc/object-view.h
new file mode 100644
index 00000000000..e83145cc319
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/object-view.h
@@ -0,0 +1,54 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_OBJECT_VIEW_H_
+#define V8_HEAP_CPPGC_OBJECT_VIEW_H_
+
+#include "include/v8config.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+
+namespace cppgc {
+namespace internal {
+
+// ObjectView allows accessing a header within the bounds of the actual object.
+// It is not exposed externally and does not keep the underlying object alive.
+class ObjectView final {
+ public:
+ V8_INLINE explicit ObjectView(const HeapObjectHeader& header);
+
+ V8_INLINE Address Start() const;
+ V8_INLINE ConstAddress End() const;
+ V8_INLINE size_t Size() const;
+
+ private:
+ const HeapObjectHeader& header_;
+ const BasePage* base_page_;
+ const bool is_large_object_;
+};
+
+ObjectView::ObjectView(const HeapObjectHeader& header)
+ : header_(header),
+ base_page_(
+ BasePage::FromPayload(const_cast<HeapObjectHeader*>(&header_))),
+ is_large_object_(header_.IsLargeObject()) {
+ DCHECK_EQ(Start() + Size(), End());
+}
+
+Address ObjectView::Start() const { return header_.ObjectStart(); }
+
+ConstAddress ObjectView::End() const {
+ return is_large_object_ ? LargePage::From(base_page_)->PayloadEnd()
+ : header_.ObjectEnd();
+}
+
+size_t ObjectView::Size() const {
+ return is_large_object_ ? LargePage::From(base_page_)->ObjectSize()
+ : header_.ObjectSize();
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_OBJECT_VIEW_H_
diff --git a/chromium/v8/src/heap/cppgc/page-memory.cc b/chromium/v8/src/heap/cppgc/page-memory.cc
index 76b9458517e..49b44aff91c 100644
--- a/chromium/v8/src/heap/cppgc/page-memory.cc
+++ b/chromium/v8/src/heap/cppgc/page-memory.cc
@@ -5,7 +5,7 @@
#include "src/heap/cppgc/page-memory.h"
#include "src/base/macros.h"
-#include "src/heap/cppgc/sanitizers.h"
+#include "src/base/sanitizer/asan.h"
namespace cppgc {
namespace internal {
diff --git a/chromium/v8/src/heap/cppgc/pointer-policies.cc b/chromium/v8/src/heap/cppgc/pointer-policies.cc
index fdc435af17c..23ad552c7aa 100644
--- a/chromium/v8/src/heap/cppgc/pointer-policies.cc
+++ b/chromium/v8/src/heap/cppgc/pointer-policies.cc
@@ -4,21 +4,70 @@
#include "include/cppgc/internal/pointer-policies.h"
+#include "include/cppgc/internal/caged-heap-local-data.h"
#include "include/cppgc/internal/persistent-node.h"
+#include "src/base/logging.h"
#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/process-heap.h"
namespace cppgc {
namespace internal {
-EnabledCheckingPolicy::EnabledCheckingPolicy() {
- USE(impl_);
- // TODO(chromium:1056170): Save creating heap state.
+namespace {
+
+#if defined(DEBUG)
+bool IsOnStack(const void* address) {
+ return v8::base::Stack::GetCurrentStackPosition() <= address &&
+ address < v8::base::Stack::GetStackStart();
}
+#endif // defined(DEBUG)
+
+} // namespace
+
+void EnabledCheckingPolicy::CheckPointerImpl(const void* ptr,
+ bool points_to_payload) {
+ // `ptr` must not reside on stack.
+ DCHECK(!IsOnStack(ptr));
+ auto* base_page = BasePage::FromPayload(ptr);
+ // Large objects do not support mixins. This also means that `base_page` is
+ // valid for large objects.
+ DCHECK_IMPLIES(base_page->is_large(), points_to_payload);
+
+ // References cannot change their heap association which means that state is
+ // immutable once it is set.
+ if (!heap_) {
+ heap_ = base_page->heap();
+ if (!heap_->page_backend()->Lookup(reinterpret_cast<Address>(this))) {
+ // If `this` is not contained within the heap of `ptr`, we must deal with
+ // an on-stack or off-heap reference. For both cases there should be no
+ // heap registered.
+ CHECK(!HeapRegistry::TryFromManagedPointer(this));
+ }
+ }
+
+ // Member references should never mix heaps.
+ DCHECK_EQ(heap_, base_page->heap());
+
+ // Header checks.
+ const HeapObjectHeader* header = nullptr;
+ if (points_to_payload) {
+ header = &HeapObjectHeader::FromObject(ptr);
+ } else if (!heap_->sweeper().IsSweepingInProgress()) {
+ // Mixin case.
+ header = &base_page->ObjectHeaderFromInnerAddress(ptr);
+ DCHECK_LE(header->ObjectStart(), ptr);
+ DCHECK_GT(header->ObjectEnd(), ptr);
+ }
+ if (header) {
+ DCHECK(!header->IsFree());
+ }
-void EnabledCheckingPolicy::CheckPointer(const void* ptr) {
- // TODO(chromium:1056170): Provide implementation.
+ // TODO(v8:11749): Check mark bits when during pre-finalizer phase.
}
PersistentRegion& StrongPersistentPolicy::GetPersistentRegion(
diff --git a/chromium/v8/src/heap/cppgc/process-heap-statistics.h b/chromium/v8/src/heap/cppgc/process-heap-statistics.h
index 2d7bfa117f8..da7683b616a 100644
--- a/chromium/v8/src/heap/cppgc/process-heap-statistics.h
+++ b/chromium/v8/src/heap/cppgc/process-heap-statistics.h
@@ -15,7 +15,7 @@ class ProcessHeapStatisticsUpdater {
public:
// Allocation observer implementation for heaps should register to contribute
// to ProcessHeapStatistics. The heap is responsible for allocating and
- // registering the obsrever impl with its stats collector.
+ // registering the observer impl with its stats collector.
class AllocationObserverImpl final
: public StatsCollector::AllocationObserver {
public:
diff --git a/chromium/v8/src/heap/cppgc/process-heap.cc b/chromium/v8/src/heap/cppgc/process-heap.cc
index e084ea1264b..6f8bb05c6cf 100644
--- a/chromium/v8/src/heap/cppgc/process-heap.cc
+++ b/chromium/v8/src/heap/cppgc/process-heap.cc
@@ -4,10 +4,66 @@
#include "src/heap/cppgc/process-heap.h"
+#include <algorithm>
+#include <vector>
+
+#include "src/base/lazy-instance.h"
+#include "src/base/platform/mutex.h"
+#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/page-memory.h"
+
namespace cppgc {
namespace internal {
v8::base::LazyMutex g_process_mutex = LAZY_MUTEX_INITIALIZER;
+namespace {
+
+v8::base::LazyMutex g_heap_registry_mutex = LAZY_MUTEX_INITIALIZER;
+
+HeapRegistry::Storage& GetHeapRegistryStorage() {
+ static v8::base::LazyInstance<HeapRegistry::Storage>::type heap_registry =
+ LAZY_INSTANCE_INITIALIZER;
+ return *heap_registry.Pointer();
+}
+
+} // namespace
+
+// static
+void HeapRegistry::RegisterHeap(HeapBase& heap) {
+ v8::base::MutexGuard guard(g_heap_registry_mutex.Pointer());
+
+ auto& storage = GetHeapRegistryStorage();
+ DCHECK_EQ(storage.end(), std::find(storage.begin(), storage.end(), &heap));
+ storage.push_back(&heap);
+}
+
+// static
+void HeapRegistry::UnregisterHeap(HeapBase& heap) {
+ v8::base::MutexGuard guard(g_heap_registry_mutex.Pointer());
+
+ auto& storage = GetHeapRegistryStorage();
+ const auto pos = std::find(storage.begin(), storage.end(), &heap);
+ DCHECK_NE(storage.end(), pos);
+ storage.erase(pos);
+}
+
+// static
+HeapBase* HeapRegistry::TryFromManagedPointer(const void* needle) {
+ v8::base::MutexGuard guard(g_heap_registry_mutex.Pointer());
+
+ for (auto* heap : GetHeapRegistryStorage()) {
+ const auto address =
+ heap->page_backend()->Lookup(reinterpret_cast<ConstAddress>(needle));
+ if (address) return heap;
+ }
+ return nullptr;
+}
+
+// static
+const HeapRegistry::Storage& HeapRegistry::GetRegisteredHeapsForTesting() {
+ return GetHeapRegistryStorage();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/process-heap.h b/chromium/v8/src/heap/cppgc/process-heap.h
index 8afc7c88eb5..c581bad29c5 100644
--- a/chromium/v8/src/heap/cppgc/process-heap.h
+++ b/chromium/v8/src/heap/cppgc/process-heap.h
@@ -5,13 +5,48 @@
#ifndef V8_HEAP_CPPGC_PROCESS_HEAP_H_
#define V8_HEAP_CPPGC_PROCESS_HEAP_H_
+#include <vector>
+
+#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
namespace cppgc {
namespace internal {
+class HeapBase;
+
extern v8::base::LazyMutex g_process_mutex;
+class V8_EXPORT_PRIVATE HeapRegistry final {
+ public:
+ using Storage = std::vector<HeapBase*>;
+
+ class Subscription final {
+ public:
+ inline explicit Subscription(HeapBase&);
+ inline ~Subscription();
+
+ private:
+ HeapBase& heap_;
+ };
+
+ static HeapBase* TryFromManagedPointer(const void* needle);
+
+ static const Storage& GetRegisteredHeapsForTesting();
+
+ private:
+ static void RegisterHeap(HeapBase&);
+ static void UnregisterHeap(HeapBase&);
+};
+
+HeapRegistry::Subscription::Subscription(HeapBase& heap) : heap_(heap) {
+ HeapRegistry::RegisterHeap(heap_);
+}
+
+HeapRegistry::Subscription::~Subscription() {
+ HeapRegistry::UnregisterHeap(heap_);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/sanitizers.h b/chromium/v8/src/heap/cppgc/sanitizers.h
deleted file mode 100644
index c3a8ff684d7..00000000000
--- a/chromium/v8/src/heap/cppgc/sanitizers.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_CPPGC_SANITIZERS_H_
-#define V8_HEAP_CPPGC_SANITIZERS_H_
-
-#include <stdint.h>
-#include <string.h>
-
-#include "src/base/macros.h"
-
-//
-// TODO(chromium:1056170): Find a place in base for sanitizer support.
-//
-
-#ifdef V8_USE_ADDRESS_SANITIZER
-
-#include <sanitizer/asan_interface.h>
-
-#define NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
-#if !defined(ASAN_POISON_MEMORY_REGION) || !defined(ASAN_UNPOISON_MEMORY_REGION)
-#error "ASAN_POISON_MEMORY_REGION must be defined"
-#endif
-
-#else // !V8_USE_ADDRESS_SANITIZER
-
-#define NO_SANITIZE_ADDRESS
-#define ASAN_POISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
-#define ASAN_UNPOISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
-
-#endif // V8_USE_ADDRESS_SANITIZER
-
-#ifdef V8_USE_MEMORY_SANITIZER
-
-#include <sanitizer/msan_interface.h>
-
-#define MSAN_POISON(addr, size) __msan_allocated_memory(addr, size)
-#define MSAN_UNPOISON(addr, size) __msan_unpoison(addr, size)
-
-#else // !V8_USE_MEMORY_SANITIZER
-
-#define MSAN_POISON(addr, size) ((void)(addr), (void)(size))
-#define MSAN_UNPOISON(addr, size) ((void)(addr), (void)(size))
-
-#endif // V8_USE_MEMORY_SANITIZER
-
-// API for newly allocated or reclaimed memory.
-#if defined(V8_USE_MEMORY_SANITIZER)
-#define SET_MEMORY_ACCESSIBLE(address, size) MSAN_UNPOISON(address, size);
-#define SET_MEMORY_INACCESSIBLE(address, size) \
- memset((address), 0, (size)); \
- MSAN_POISON((address), (size))
-#elif defined(V8_USE_ADDRESS_SANITIZER)
-#define SET_MEMORY_ACCESSIBLE(address, size) \
- ASAN_UNPOISON_MEMORY_REGION(address, size);
-#define SET_MEMORY_INACCESSIBLE(address, size) \
- memset((address), 0, (size)); \
- ASAN_POISON_MEMORY_REGION(address, size)
-#elif DEBUG
-#define SET_MEMORY_ACCESSIBLE(address, size) memset((address), 0, (size))
-#define SET_MEMORY_INACCESSIBLE(address, size) \
- ::cppgc::internal::ZapMemory((address), (size));
-#else
-#define SET_MEMORY_ACCESSIBLE(address, size) ((void)(address), (void)(size))
-#define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size))
-#endif
-
-namespace cppgc {
-namespace internal {
-
-inline void ZapMemory(void* address, size_t size) {
- // The lowest bit of the zapped value should be 0 so that zapped object
- // are never viewed as fully constructed objects.
- static constexpr uint8_t kZappedValue = 0xdc;
- memset(address, kZappedValue, size);
-}
-
-} // namespace internal
-} // namespace cppgc
-
-#endif // V8_HEAP_CPPGC_SANITIZERS_H_
diff --git a/chromium/v8/src/heap/cppgc/stats-collector.h b/chromium/v8/src/heap/cppgc/stats-collector.h
index 2a8583c7304..4709f227035 100644
--- a/chromium/v8/src/heap/cppgc/stats-collector.h
+++ b/chromium/v8/src/heap/cppgc/stats-collector.h
@@ -39,6 +39,7 @@ namespace internal {
V(MarkTransitiveClosure) \
V(MarkTransitiveClosureWithDeadline) \
V(MarkFlushEphemerons) \
+ V(MarkOnAllocation) \
V(MarkProcessBailOutObjects) \
V(MarkProcessMarkingWorklist) \
V(MarkProcessWriteBarrierWorklist) \
@@ -52,6 +53,7 @@ namespace internal {
V(MarkVisitRememberedSets) \
V(SweepInvokePreFinalizers) \
V(SweepIdleStep) \
+ V(SweepInTask) \
V(SweepOnAllocation) \
V(SweepFinalize)
@@ -256,7 +258,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
void NotifyAllocation(size_t);
void NotifyExplicitFree(size_t);
- // Safepoints should only be invoked when garabge collections are possible.
+ // Safepoints should only be invoked when garbage collections are possible.
// This is necessary as increments and decrements are reported as close to
// their actual allocation/reclamation as possible.
void NotifySafePointForConservativeCollection();
diff --git a/chromium/v8/src/heap/cppgc/sweeper.cc b/chromium/v8/src/heap/cppgc/sweeper.cc
index 937a52afc59..3e740f7924a 100644
--- a/chromium/v8/src/heap/cppgc/sweeper.cc
+++ b/chromium/v8/src/heap/cppgc/sweeper.cc
@@ -18,10 +18,10 @@
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/cppgc/memory.h"
#include "src/heap/cppgc/object-poisoner.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/raw-heap.h"
-#include "src/heap/cppgc/sanitizers.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/task-handle.h"
@@ -133,7 +133,7 @@ class InlinedFinalizationBuilder final {
void AddFinalizer(HeapObjectHeader* header, size_t size) {
header->Finalize();
- SET_MEMORY_INACCESSIBLE(header, size);
+ SetMemoryInaccessible(header, size);
}
void AddFreeListEntry(Address start, size_t size) {
@@ -161,10 +161,7 @@ class DeferredFinalizationBuilder final {
result_.unfinalized_objects.push_back({header});
found_finalizer_ = true;
} else {
- // Unmarked memory may have been poisoned. In the non-concurrent case this
- // is taken care of by finalizing a header.
- ASAN_UNPOISON_MEMORY_REGION(header, size);
- SET_MEMORY_INACCESSIBLE(header, size);
+ SetMemoryInaccessible(header, size);
}
}
@@ -197,15 +194,16 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
bitmap.Clear();
size_t largest_new_free_list_entry = 0;
+ size_t live_bytes = 0;
Address start_of_gap = page->PayloadStart();
for (Address begin = page->PayloadStart(), end = page->PayloadEnd();
begin != end;) {
HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(begin);
- const size_t size = header->GetSize();
+ const size_t size = header->AllocatedSize();
// Check if this is a free list entry.
if (header->IsFree<kAtomicAccess>()) {
- SET_MEMORY_INACCESSIBLE(header, std::min(kFreeListEntrySize, size));
+ SetMemoryInaccessible(header, std::min(kFreeListEntrySize, size));
begin += size;
continue;
}
@@ -229,6 +227,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
bitmap.SetBit(begin);
begin += size;
start_of_gap = begin;
+ live_bytes += size;
}
if (start_of_gap != page->PayloadStart() &&
@@ -237,6 +236,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
start_of_gap, static_cast<size_t>(page->PayloadEnd() - start_of_gap));
bitmap.SetBit(start_of_gap);
}
+ page->SetAllocatedBytesAtLastGC(live_bytes);
const bool is_empty = (start_of_gap == page->PayloadStart());
return builder.GetResult(is_empty, largest_new_free_list_entry);
@@ -290,9 +290,9 @@ class SweepFinalizer final {
// Call finalizers.
for (HeapObjectHeader* object : page_state->unfinalized_objects) {
- const size_t size = object->GetSize();
+ const size_t size = object->AllocatedSize();
object->Finalize();
- SET_MEMORY_INACCESSIBLE(object, size);
+ SetMemoryInaccessible(object, size);
}
// Unmap page if empty.
@@ -665,6 +665,33 @@ class Sweeper::SweeperImpl final {
bool IsSweepingInProgress() const { return is_in_progress_; }
+ bool PerformSweepOnMutatorThread(double deadline_in_seconds,
+ StatsCollector::ScopeId internal_scope_id) {
+ if (!is_in_progress_) return true;
+
+ MutatorThreadSweepingScope sweeping_in_progresss(*this);
+
+ bool sweep_complete;
+ {
+ StatsCollector::EnabledScope stats_scope(
+ stats_collector_, StatsCollector::kIncrementalSweep);
+
+ MutatorThreadSweeper sweeper(&space_states_, platform_);
+ {
+ StatsCollector::EnabledScope stats_scope(
+ stats_collector_, internal_scope_id, "deltaInSeconds",
+ deadline_in_seconds - platform_->MonotonicallyIncreasingTime());
+
+ sweep_complete = sweeper.SweepWithDeadline(deadline_in_seconds);
+ }
+ if (sweep_complete) {
+ FinalizeSweep();
+ }
+ }
+ if (sweep_complete) NotifyDone();
+ return sweep_complete;
+ }
+
private:
class MutatorThreadSweepingScope final {
public:
@@ -701,33 +728,12 @@ class Sweeper::SweeperImpl final {
private:
void Run(double deadline_in_seconds) override {
- if (handle_.IsCanceled() || !sweeper_->is_in_progress_) return;
+ if (handle_.IsCanceled()) return;
- MutatorThreadSweepingScope sweeping_in_progresss(*sweeper_);
-
- bool sweep_complete;
- {
- StatsCollector::EnabledScope stats_scope(
- sweeper_->stats_collector_, StatsCollector::kIncrementalSweep);
-
- MutatorThreadSweeper sweeper(&sweeper_->space_states_,
- sweeper_->platform_);
- {
- StatsCollector::EnabledScope stats_scope(
- sweeper_->stats_collector_, StatsCollector::kSweepIdleStep,
- "idleDeltaInSeconds",
- (deadline_in_seconds -
- sweeper_->platform_->MonotonicallyIncreasingTime()));
-
- sweep_complete = sweeper.SweepWithDeadline(deadline_in_seconds);
- }
- if (sweep_complete) {
- sweeper_->FinalizeSweep();
- } else {
- sweeper_->ScheduleIncrementalSweeping();
- }
+ if (!sweeper_->PerformSweepOnMutatorThread(
+ deadline_in_seconds, StatsCollector::kSweepIdleStep)) {
+ sweeper_->ScheduleIncrementalSweeping();
}
- if (sweep_complete) sweeper_->NotifyDone();
}
Handle GetHandle() const { return handle_; }
@@ -807,5 +813,10 @@ bool Sweeper::IsSweepingInProgress() const {
return impl_->IsSweepingInProgress();
}
+bool Sweeper::PerformSweepOnMutatorThread(double deadline_in_seconds) {
+ return impl_->PerformSweepOnMutatorThread(deadline_in_seconds,
+ StatsCollector::kSweepInTask);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/sweeper.h b/chromium/v8/src/heap/cppgc/sweeper.h
index 4c77ec69173..a13962aa914 100644
--- a/chromium/v8/src/heap/cppgc/sweeper.h
+++ b/chromium/v8/src/heap/cppgc/sweeper.h
@@ -9,6 +9,7 @@
#include "include/cppgc/heap.h"
#include "src/base/macros.h"
+#include "src/base/platform/time.h"
namespace cppgc {
@@ -49,6 +50,9 @@ class V8_EXPORT_PRIVATE Sweeper final {
bool IsSweepingOnMutatorThread() const;
bool IsSweepingInProgress() const;
+ // Assist with sweeping. Returns true if sweeping is done.
+ bool PerformSweepOnMutatorThread(double deadline_in_seconds);
+
private:
void WaitForConcurrentSweepingForTesting();
diff --git a/chromium/v8/src/heap/cppgc/trace-trait.cc b/chromium/v8/src/heap/cppgc/trace-trait.cc
index bf3759881b4..df14e3698be 100644
--- a/chromium/v8/src/heap/cppgc/trace-trait.cc
+++ b/chromium/v8/src/heap/cppgc/trace-trait.cc
@@ -18,9 +18,10 @@ TraceDescriptor TraceTraitFromInnerAddressImpl::GetTraceDescriptor(
page->SynchronizedLoad();
const HeapObjectHeader& header =
page->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(address);
- return {header.Payload(), GlobalGCInfoTable::GCInfoFromIndex(
- header.GetGCInfoIndex<AccessMode::kAtomic>())
- .trace};
+ return {header.ObjectStart(),
+ GlobalGCInfoTable::GCInfoFromIndex(
+ header.GetGCInfoIndex<AccessMode::kAtomic>())
+ .trace};
}
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/visitor.cc b/chromium/v8/src/heap/cppgc/visitor.cc
index 33786f6fce3..9d90e4ab3e3 100644
--- a/chromium/v8/src/heap/cppgc/visitor.cc
+++ b/chromium/v8/src/heap/cppgc/visitor.cc
@@ -4,11 +4,12 @@
#include "src/heap/cppgc/visitor.h"
+#include "src/base/sanitizer/msan.h"
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/object-view.h"
#include "src/heap/cppgc/page-memory.h"
-#include "src/heap/cppgc/sanitizers.h"
namespace cppgc {
@@ -29,15 +30,15 @@ namespace {
void TraceConservatively(ConservativeTracingVisitor* conservative_visitor,
const HeapObjectHeader& header) {
- Address* payload = reinterpret_cast<Address*>(header.Payload());
- const size_t payload_size = header.GetSize();
- for (size_t i = 0; i < (payload_size / sizeof(Address)); ++i) {
- Address maybe_ptr = payload[i];
+ const auto object_view = ObjectView(header);
+ Address* object = reinterpret_cast<Address*>(object_view.Start());
+ for (size_t i = 0; i < (object_view.Size() / sizeof(Address)); ++i) {
+ Address maybe_ptr = object[i];
#if defined(MEMORY_SANITIZER)
- // |payload| may be uninitialized by design or just contain padding bytes.
+ // |object| may be uninitialized by design or just contain padding bytes.
// Copy into a local variable that is not poisoned for conservative marking.
// Copy into a temporary variable to maintain the original MSAN state.
- MSAN_UNPOISON(&maybe_ptr, sizeof(maybe_ptr));
+ MSAN_MEMORY_IS_INITIALIZED(&maybe_ptr, sizeof(maybe_ptr));
#endif
if (maybe_ptr) {
conservative_visitor->TraceConservativelyIfNeeded(maybe_ptr);
@@ -49,8 +50,6 @@ void TraceConservatively(ConservativeTracingVisitor* conservative_visitor,
void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
const void* address) {
- // TODO(chromium:1056170): Add page bloom filter
-
const BasePage* page = reinterpret_cast<const BasePage*>(
page_backend_.Lookup(static_cast<ConstAddress>(address)));
@@ -78,8 +77,8 @@ void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
void ConservativeTracingVisitor::VisitFullyConstructedConservatively(
HeapObjectHeader& header) {
visitor_.Visit(
- header.Payload(),
- {header.Payload(),
+ header.ObjectStart(),
+ {header.ObjectStart(),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
diff --git a/chromium/v8/src/heap/embedder-tracing.cc b/chromium/v8/src/heap/embedder-tracing.cc
index 4fd747a964e..72bdde571a6 100644
--- a/chromium/v8/src/heap/embedder-tracing.cc
+++ b/chromium/v8/src/heap/embedder-tracing.cc
@@ -17,6 +17,7 @@ void LocalEmbedderHeapTracer::SetRemoteTracer(EmbedderHeapTracer* tracer) {
if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
remote_tracer_ = tracer;
+ default_embedder_roots_handler_.SetTracer(tracer);
if (remote_tracer_)
remote_tracer_->isolate_ = reinterpret_cast<v8::Isolate*>(isolate_);
}
@@ -75,9 +76,8 @@ void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
if (!InUse()) return;
embedder_stack_state_ = stack_state;
- if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state) {
- remote_tracer()->NotifyEmptyEmbedderStack();
- }
+ if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
+ NotifyEmptyEmbedderStack();
}
namespace {
@@ -164,5 +164,33 @@ void LocalEmbedderHeapTracer::StartIncrementalMarkingIfNeeded() {
}
}
+void LocalEmbedderHeapTracer::NotifyEmptyEmbedderStack() {
+ auto* overriden_stack_state = isolate_->heap()->overriden_stack_state();
+ if (overriden_stack_state &&
+ (*overriden_stack_state ==
+ cppgc::EmbedderStackState::kMayContainHeapPointers))
+ return;
+
+ isolate_->global_handles()->NotifyEmptyEmbedderStack();
+}
+
+bool DefaultEmbedderRootsHandler::IsRoot(
+ const v8::TracedReference<v8::Value>& handle) {
+ return !tracer_ || tracer_->IsRootForNonTracingGC(handle);
+}
+
+bool DefaultEmbedderRootsHandler::IsRoot(
+ const v8::TracedGlobal<v8::Value>& handle) {
+ return !tracer_ || tracer_->IsRootForNonTracingGC(handle);
+}
+
+void DefaultEmbedderRootsHandler::ResetRoot(
+ const v8::TracedReference<v8::Value>& handle) {
+ // Resetting is only called when IsRoot() returns false which
+ // can only happen the EmbedderHeapTracer is set on API level.
+ DCHECK(tracer_);
+ tracer_->ResetHandleInNonTracingGC(handle);
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/embedder-tracing.h b/chromium/v8/src/heap/embedder-tracing.h
index 8a1b14a32b4..befb1a7e7ac 100644
--- a/chromium/v8/src/heap/embedder-tracing.h
+++ b/chromium/v8/src/heap/embedder-tracing.h
@@ -16,6 +16,19 @@ namespace internal {
class Heap;
class JSObject;
+class V8_EXPORT_PRIVATE DefaultEmbedderRootsHandler final
+ : public EmbedderRootsHandler {
+ public:
+ bool IsRoot(const v8::TracedReference<v8::Value>& handle) final;
+ bool IsRoot(const v8::TracedGlobal<v8::Value>& handle) final;
+ void ResetRoot(const v8::TracedReference<v8::Value>& handle) final;
+
+ void SetTracer(EmbedderHeapTracer* tracer) { tracer_ = tracer; }
+
+ private:
+ EmbedderHeapTracer* tracer_ = nullptr;
+};
+
class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
public:
using WrapperInfo = std::pair<void*, void*>;
@@ -74,21 +87,6 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
bool Trace(double deadline);
bool IsRemoteTracingDone();
- bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) {
- return !InUse() || remote_tracer_->IsRootForNonTracingGC(handle);
- }
-
- bool IsRootForNonTracingGC(const v8::TracedReference<v8::Value>& handle) {
- return !InUse() || remote_tracer_->IsRootForNonTracingGC(handle);
- }
-
- void ResetHandleInNonTracingGC(const v8::TracedReference<v8::Value>& handle) {
- // Resetting is only called when IsRootForNonTracingGC returns false which
- // can only happen the EmbedderHeapTracer is set on API level.
- DCHECK(InUse());
- remote_tracer_->ResetHandleInNonTracingGC(handle);
- }
-
bool ShouldFinalizeIncrementalMarking() {
return !FLAG_incremental_marking_wrappers || !InUse() ||
(IsRemoteTracingDone() && embedder_worklist_empty_);
@@ -130,6 +128,12 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
void UpdateRemoteStats(size_t, double);
+ DefaultEmbedderRootsHandler& default_embedder_roots_handler() {
+ return default_embedder_roots_handler_;
+ }
+
+ void NotifyEmptyEmbedderStack();
+
private:
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
@@ -147,6 +151,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
Isolate* const isolate_;
EmbedderHeapTracer* remote_tracer_ = nullptr;
+ DefaultEmbedderRootsHandler default_embedder_roots_handler_;
EmbedderHeapTracer::EmbedderStackState embedder_stack_state_ =
EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
@@ -183,11 +188,8 @@ class V8_EXPORT_PRIVATE V8_NODISCARD EmbedderStackStateScope final {
: local_tracer_(local_tracer),
old_stack_state_(local_tracer_->embedder_stack_state_) {
local_tracer_->embedder_stack_state_ = stack_state;
- if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers ==
- stack_state) {
- if (local_tracer->remote_tracer())
- local_tracer->remote_tracer()->NotifyEmptyEmbedderStack();
- }
+ if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
+ local_tracer_->NotifyEmptyEmbedderStack();
}
~EmbedderStackStateScope() {
diff --git a/chromium/v8/src/heap/factory-base-inl.h b/chromium/v8/src/heap/factory-base-inl.h
index 6f218b8248c..6c1cede212c 100644
--- a/chromium/v8/src/heap/factory-base-inl.h
+++ b/chromium/v8/src/heap/factory-base-inl.h
@@ -6,9 +6,10 @@
#define V8_HEAP_FACTORY_BASE_INL_H_
#include "src/heap/factory-base.h"
-
#include "src/numbers/conversions.h"
#include "src/objects/heap-number.h"
+#include "src/objects/map.h"
+#include "src/objects/slots-inl.h"
#include "src/objects/smi.h"
#include "src/roots/roots.h"
@@ -93,6 +94,29 @@ Handle<HeapNumber> FactoryBase<Impl>::NewHeapNumberWithHoleNaN() {
return NewHeapNumberFromBits<allocation>(kHoleNanInt64);
}
+template <typename Impl>
+template <typename StructType>
+StructType FactoryBase<Impl>::NewStructInternal(InstanceType type,
+ AllocationType allocation) {
+ ReadOnlyRoots roots = read_only_roots();
+ Map map = Map::GetInstanceTypeMap(roots, type);
+ int size = StructType::kSize;
+ return StructType::cast(NewStructInternal(roots, map, size, allocation));
+}
+
+template <typename Impl>
+Struct FactoryBase<Impl>::NewStructInternal(ReadOnlyRoots roots, Map map,
+ int size,
+ AllocationType allocation) {
+ DCHECK_EQ(size, map.instance_size());
+ HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
+ Struct str = Struct::cast(result);
+ Object value = roots.undefined_value();
+ int length = (size >> kTaggedSizeLog2) - 1;
+ MemsetTagged(str.RawField(Struct::kHeaderSize), value, length);
+ return str;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/factory-base.cc b/chromium/v8/src/heap/factory-base.cc
index 45577f7bf91..df78716a8de 100644
--- a/chromium/v8/src/heap/factory-base.cc
+++ b/chromium/v8/src/heap/factory-base.cc
@@ -53,36 +53,30 @@ FactoryBase<LocalFactory>::NewHeapNumber<AllocationType::kOld>();
template <typename Impl>
Handle<Struct> FactoryBase<Impl>::NewStruct(InstanceType type,
AllocationType allocation) {
- return handle(NewStructInternal(type, allocation), isolate());
-}
-
-template <typename Impl>
-Struct FactoryBase<Impl>::NewStructInternal(InstanceType type,
- AllocationType allocation) {
- Map map = Map::GetInstanceTypeMap(read_only_roots(), type);
+ ReadOnlyRoots roots = read_only_roots();
+ Map map = Map::GetInstanceTypeMap(roots, type);
int size = map.instance_size();
- HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
- Struct str = Struct::cast(result);
- str.InitializeBody(size);
- return str;
+ return handle(NewStructInternal(roots, map, size, allocation), isolate());
}
template <typename Impl>
Handle<AccessorPair> FactoryBase<Impl>::NewAccessorPair() {
- Handle<AccessorPair> accessors = Handle<AccessorPair>::cast(
- NewStruct(ACCESSOR_PAIR_TYPE, AllocationType::kOld));
- AccessorPair raw = *accessors;
+ auto accessors =
+ NewStructInternal<AccessorPair>(ACCESSOR_PAIR_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
- raw.set_getter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
- raw.set_setter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
- return accessors;
+ accessors.set_getter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
+ accessors.set_setter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
+ return handle(accessors, isolate());
}
template <typename Impl>
Handle<FixedArray> FactoryBase<Impl>::NewFixedArray(int length,
AllocationType allocation) {
- DCHECK_LE(0, length);
if (length == 0) return impl()->empty_fixed_array();
+ if (length < 0 || length > FixedArray::kMaxLength) {
+ FATAL("Fatal JavaScript invalid size error %d", length);
+ UNREACHABLE();
+ }
return NewFixedArrayWithFiller(
read_only_roots().fixed_array_map_handle(), length,
read_only_roots().undefined_value_handle(), allocation);
@@ -128,7 +122,8 @@ Handle<FixedArrayBase> FactoryBase<Impl>::NewFixedDoubleArray(
int length, AllocationType allocation) {
if (length == 0) return impl()->empty_fixed_array();
if (length < 0 || length > FixedDoubleArray::kMaxLength) {
- isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ FATAL("Fatal JavaScript invalid size error %d", length);
+ UNREACHABLE();
}
int size = FixedDoubleArray::SizeFor(length);
Map map = read_only_roots().fixed_double_array_map();
@@ -172,7 +167,8 @@ template <typename Impl>
Handle<ByteArray> FactoryBase<Impl>::NewByteArray(int length,
AllocationType allocation) {
if (length < 0 || length > ByteArray::kMaxLength) {
- isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ FATAL("Fatal JavaScript invalid size error %d", length);
+ UNREACHABLE();
}
int size = ByteArray::SizeFor(length);
HeapObject result = AllocateRawWithImmortalMap(
@@ -189,7 +185,8 @@ Handle<BytecodeArray> FactoryBase<Impl>::NewBytecodeArray(
int length, const byte* raw_bytecodes, int frame_size, int parameter_count,
Handle<FixedArray> constant_pool) {
if (length < 0 || length > BytecodeArray::kMaxLength) {
- isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ FATAL("Fatal JavaScript invalid size error %d", length);
+ UNREACHABLE();
}
// Bytecode array is AllocationType::kOld, so constant pool array should be
// too.
@@ -230,8 +227,8 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(
DCHECK(source->IsString() || source->IsUndefined());
// Create and initialize script object.
ReadOnlyRoots roots = read_only_roots();
- Handle<Script> script =
- Handle<Script>::cast(NewStruct(SCRIPT_TYPE, AllocationType::kOld));
+ Handle<Script> script = handle(
+ NewStructInternal<Script>(SCRIPT_TYPE, AllocationType::kOld), isolate());
{
DisallowGarbageCollection no_gc;
Script raw = *script;
@@ -243,8 +240,8 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(
raw.set_context_data(roots.undefined_value(), SKIP_WRITE_BARRIER);
raw.set_type(Script::TYPE_NORMAL);
raw.set_line_ends(roots.undefined_value(), SKIP_WRITE_BARRIER);
- raw.set_eval_from_shared_or_wrapped_arguments(roots.undefined_value(),
- SKIP_WRITE_BARRIER);
+ raw.set_eval_from_shared_or_wrapped_arguments_or_sfi_table(
+ roots.undefined_value(), SKIP_WRITE_BARRIER);
raw.set_eval_from_position(0);
raw.set_shared_function_infos(roots.empty_weak_fixed_array(),
SKIP_WRITE_BARRIER);
@@ -394,14 +391,12 @@ template <typename Impl>
Handle<ArrayBoilerplateDescription>
FactoryBase<Impl>::NewArrayBoilerplateDescription(
ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) {
- Handle<ArrayBoilerplateDescription> result =
- Handle<ArrayBoilerplateDescription>::cast(
- NewStruct(ARRAY_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld));
+ auto result = NewStructInternal<ArrayBoilerplateDescription>(
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
- ArrayBoilerplateDescription raw = *result;
- raw.set_elements_kind(elements_kind);
- raw.set_constant_elements(*constant_values);
- return result;
+ result.set_elements_kind(elements_kind);
+ result.set_constant_elements(*constant_values);
+ return handle(result, isolate());
}
template <typename Impl>
@@ -409,15 +404,13 @@ Handle<RegExpBoilerplateDescription>
FactoryBase<Impl>::NewRegExpBoilerplateDescription(Handle<FixedArray> data,
Handle<String> source,
Smi flags) {
- Handle<RegExpBoilerplateDescription> result =
- Handle<RegExpBoilerplateDescription>::cast(NewStruct(
- REG_EXP_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld));
+ auto result = NewStructInternal<RegExpBoilerplateDescription>(
+ REG_EXP_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
- RegExpBoilerplateDescription raw = *result;
- raw.set_data(*data);
- raw.set_source(*source);
- raw.set_flags(flags.value());
- return result;
+ result.set_data(*data);
+ result.set_source(*source);
+ result.set_flags(flags.value());
+ return handle(result, isolate());
}
template <typename Impl>
@@ -426,14 +419,12 @@ FactoryBase<Impl>::NewTemplateObjectDescription(
Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings) {
DCHECK_EQ(raw_strings->length(), cooked_strings->length());
DCHECK_LT(0, raw_strings->length());
- Handle<TemplateObjectDescription> result =
- Handle<TemplateObjectDescription>::cast(
- NewStruct(TEMPLATE_OBJECT_DESCRIPTION_TYPE, AllocationType::kOld));
+ auto result = NewStructInternal<TemplateObjectDescription>(
+ TEMPLATE_OBJECT_DESCRIPTION_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
- TemplateObjectDescription raw = *result;
- raw.set_raw_strings(*raw_strings);
- raw.set_cooked_strings(*cooked_strings);
- return result;
+ result.set_raw_strings(*raw_strings);
+ result.set_cooked_strings(*cooked_strings);
+ return handle(result, isolate());
}
template <typename Impl>
@@ -691,7 +682,8 @@ template <typename Impl>
Handle<FreshlyAllocatedBigInt> FactoryBase<Impl>::NewBigInt(
int length, AllocationType allocation) {
if (length < 0 || length > BigInt::kMaxLength) {
- isolate()->FatalProcessOutOfHeapMemory("invalid BigInt length");
+ FATAL("Fatal JavaScript invalid size error %d", length);
+ UNREACHABLE();
}
HeapObject result = AllocateRawWithImmortalMap(
BigInt::SizeFor(length), allocation, read_only_roots().bigint_map());
@@ -760,11 +752,11 @@ Handle<DescriptorArray> FactoryBase<Impl>::NewDescriptorArray(
template <typename Impl>
Handle<ClassPositions> FactoryBase<Impl>::NewClassPositions(int start,
int end) {
- Handle<ClassPositions> class_positions = Handle<ClassPositions>::cast(
- NewStruct(CLASS_POSITIONS_TYPE, AllocationType::kOld));
- class_positions->set_start(start);
- class_positions->set_end(end);
- return class_positions;
+ auto result = NewStructInternal<ClassPositions>(CLASS_POSITIONS_TYPE,
+ AllocationType::kOld);
+ result.set_start(start);
+ result.set_end(end);
+ return handle(result, isolate());
}
template <typename Impl>
@@ -825,7 +817,8 @@ template <typename Impl>
HeapObject FactoryBase<Impl>::AllocateRawFixedArray(int length,
AllocationType allocation) {
if (length < 0 || length > FixedArray::kMaxLength) {
- isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ FATAL("Fatal JavaScript invalid size error %d", length);
+ UNREACHABLE();
}
return AllocateRawArray(FixedArray::SizeFor(length), allocation);
}
@@ -834,7 +827,8 @@ template <typename Impl>
HeapObject FactoryBase<Impl>::AllocateRawWeakArrayList(
int capacity, AllocationType allocation) {
if (capacity < 0 || capacity > WeakArrayList::kMaxCapacity) {
- isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ FATAL("Fatal JavaScript invalid size error %d", capacity);
+ UNREACHABLE();
}
return AllocateRawArray(WeakArrayList::SizeForCapacity(capacity), allocation);
}
@@ -878,8 +872,9 @@ FactoryBase<Impl>::NewSwissNameDictionaryWithCapacity(
return read_only_roots().empty_swiss_property_dictionary_handle();
}
- if (capacity > SwissNameDictionary::MaxCapacity()) {
- isolate()->FatalProcessOutOfHeapMemory("invalid table size");
+ if (capacity < 0 || capacity > SwissNameDictionary::MaxCapacity()) {
+ FATAL("Fatal JavaScript invalid size error %d", capacity);
+ UNREACHABLE();
}
int meta_table_length = SwissNameDictionary::MetaTableSizeFor(capacity);
@@ -902,6 +897,18 @@ Handle<SwissNameDictionary> FactoryBase<Impl>::NewSwissNameDictionary(
SwissNameDictionary::CapacityFor(at_least_space_for), allocation);
}
+template <typename Impl>
+Handle<FunctionTemplateRareData>
+FactoryBase<Impl>::NewFunctionTemplateRareData() {
+ auto function_template_rare_data =
+ NewStructInternal<FunctionTemplateRareData>(
+ FUNCTION_TEMPLATE_RARE_DATA_TYPE, AllocationType::kOld);
+ DisallowGarbageCollection no_gc;
+ function_template_rare_data.set_c_function_overloads(
+ *impl()->empty_fixed_array(), SKIP_WRITE_BARRIER);
+ return handle(function_template_rare_data, isolate());
+}
+
// Instantiate FactoryBase for the two variants we want.
template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) FactoryBase<Factory>;
template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
diff --git a/chromium/v8/src/heap/factory-base.h b/chromium/v8/src/heap/factory-base.h
index b964f6b2346..4e3d5efbe40 100644
--- a/chromium/v8/src/heap/factory-base.h
+++ b/chromium/v8/src/heap/factory-base.h
@@ -228,14 +228,19 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<SwissNameDictionary> NewSwissNameDictionaryWithCapacity(
int capacity, AllocationType allocation);
+ Handle<FunctionTemplateRareData> NewFunctionTemplateRareData();
+
protected:
// Allocate memory for an uninitialized array (e.g., a FixedArray or similar).
HeapObject AllocateRawArray(int size, AllocationType allocation);
HeapObject AllocateRawFixedArray(int length, AllocationType allocation);
HeapObject AllocateRawWeakArrayList(int length, AllocationType allocation);
- Struct NewStructInternal(InstanceType type,
- AllocationType allocation = AllocationType::kYoung);
+ template <typename StructType>
+ inline StructType NewStructInternal(InstanceType type,
+ AllocationType allocation);
+ Struct NewStructInternal(ReadOnlyRoots roots, Map map, int size,
+ AllocationType allocation);
HeapObject AllocateRawWithImmortalMap(
int size, AllocationType allocation, Map map,
diff --git a/chromium/v8/src/heap/factory.cc b/chromium/v8/src/heap/factory.cc
index 0c89a3fa9cc..6f753d23b42 100644
--- a/chromium/v8/src/heap/factory.cc
+++ b/chromium/v8/src/heap/factory.cc
@@ -53,6 +53,7 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/megadom-handler-inl.h"
#include "src/objects/microtask-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
@@ -153,9 +154,8 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
if (is_executable_) {
DCHECK(IsAligned(code->address(), kCodeAlignment));
DCHECK_IMPLIES(
- !V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
- !heap->memory_allocator()->code_range().is_empty(),
- heap->memory_allocator()->code_range().contains(code->address()));
+ !V8_ENABLE_THIRD_PARTY_HEAP_BOOL && !heap->code_region().is_empty(),
+ heap->code_region().contains(code->address()));
}
constexpr bool kIsNotOffHeapTrampoline = false;
@@ -314,8 +314,8 @@ Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
}
Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
- PrototypeInfo result = PrototypeInfo::cast(
- NewStructInternal(PROTOTYPE_INFO_TYPE, AllocationType::kOld));
+ auto result = NewStructInternal<PrototypeInfo>(PROTOTYPE_INFO_TYPE,
+ AllocationType::kOld);
DisallowGarbageCollection no_gc;
result.set_prototype_users(Smi::zero());
result.set_registry_slot(PrototypeInfo::UNREGISTERED);
@@ -326,8 +326,8 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<EnumCache> Factory::NewEnumCache(Handle<FixedArray> keys,
Handle<FixedArray> indices) {
- EnumCache result =
- EnumCache::cast(NewStructInternal(ENUM_CACHE_TYPE, AllocationType::kOld));
+ auto result =
+ NewStructInternal<EnumCache>(ENUM_CACHE_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
result.set_keys(*keys);
result.set_indices(*indices);
@@ -336,7 +336,7 @@ Handle<EnumCache> Factory::NewEnumCache(Handle<FixedArray> keys,
Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
AllocationType allocation) {
- Tuple2 result = Tuple2::cast(NewStructInternal(TUPLE2_TYPE, allocation));
+ auto result = NewStructInternal<Tuple2>(TUPLE2_TYPE, allocation);
DisallowGarbageCollection no_gc;
result.set_value1(*value1);
result.set_value2(*value2);
@@ -345,8 +345,8 @@ Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
Handle<BaselineData> Factory::NewBaselineData(
Handle<Code> code, Handle<HeapObject> function_data) {
- BaselineData baseline_data = BaselineData::cast(
- NewStructInternal(BASELINE_DATA_TYPE, AllocationType::kOld));
+ auto baseline_data =
+ NewStructInternal<BaselineData>(BASELINE_DATA_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
baseline_data.set_baseline_code(*code);
baseline_data.set_data(*function_data);
@@ -410,20 +410,6 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(
return handle(array, isolate());
}
-Handle<FixedArray> Factory::NewUninitializedFixedArray(int length) {
- if (length == 0) return empty_fixed_array();
- if (length < 0 || length > FixedArray::kMaxLength) {
- isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
- }
-
- // TODO(ulan): As an experiment this temporarily returns an initialized fixed
- // array. After getting canary/performance coverage, either remove the
- // function or revert to returning uninitilized array.
- return NewFixedArrayWithFiller(read_only_roots().fixed_array_map_handle(),
- length, undefined_value(),
- AllocationType::kYoung);
-}
-
Handle<ClosureFeedbackCellArray> Factory::NewClosureFeedbackCellArray(
int length) {
if (length == 0) return empty_closure_feedback_cell_array();
@@ -557,9 +543,8 @@ Handle<NameDictionary> Factory::NewNameDictionary(int at_least_space_for) {
}
Handle<PropertyDescriptorObject> Factory::NewPropertyDescriptorObject() {
- PropertyDescriptorObject object =
- PropertyDescriptorObject::cast(NewStructInternal(
- PROPERTY_DESCRIPTOR_OBJECT_TYPE, AllocationType::kYoung));
+ auto object = NewStructInternal<PropertyDescriptorObject>(
+ PROPERTY_DESCRIPTOR_OBJECT_TYPE, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
object.set_flags(0);
Oddball the_hole = read_only_roots().the_hole_value();
@@ -1095,7 +1080,7 @@ Handle<NativeContext> Factory::NewNativeContext() {
// The ExternalPointerTable is a C++ object.
context.AllocateExternalPointerEntries(isolate());
context.set_scope_info(*native_scope_info());
- context.set_previous(Context::unchecked_cast(Smi::zero()));
+ context.set_previous(Context());
context.set_extension(*undefined_value());
context.set_errors_thrown(Smi::zero());
context.set_math_random_index(Smi::zero());
@@ -1179,7 +1164,7 @@ Handle<Context> Factory::NewCatchContext(Handle<Context> previous,
isolate()->catch_context_map(), Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
- DCHECK(Heap::InYoungGeneration(context));
+ DCHECK_IMPLIES(!FLAG_single_generation, Heap::InYoungGeneration(context));
context.set_scope_info(*scope_info, SKIP_WRITE_BARRIER);
context.set_previous(*previous, SKIP_WRITE_BARRIER);
context.set(Context::THROWN_OBJECT_INDEX, *thrown_object, SKIP_WRITE_BARRIER);
@@ -1204,7 +1189,7 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
- DCHECK(Heap::InYoungGeneration(context));
+ DCHECK_IMPLIES(!FLAG_single_generation, Heap::InYoungGeneration(context));
context.set_scope_info(*scope_info, SKIP_WRITE_BARRIER);
context.set_previous(*previous, SKIP_WRITE_BARRIER);
context.set_extension(*ext, SKIP_WRITE_BARRIER);
@@ -1227,7 +1212,7 @@ Handle<Context> Factory::NewWithContext(Handle<Context> previous,
isolate()->with_context_map(), Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
- DCHECK(Heap::InYoungGeneration(context));
+ DCHECK_IMPLIES(!FLAG_single_generation, Heap::InYoungGeneration(context));
context.set_scope_info(*scope_info, SKIP_WRITE_BARRIER);
context.set_previous(*previous, SKIP_WRITE_BARRIER);
context.set_extension(*extension, SKIP_WRITE_BARRIER);
@@ -1243,7 +1228,7 @@ Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
isolate()->block_context_map(), Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
- DCHECK(Heap::InYoungGeneration(context));
+ DCHECK_IMPLIES(!FLAG_single_generation, Heap::InYoungGeneration(context));
context.set_scope_info(*scope_info, SKIP_WRITE_BARRIER);
context.set_previous(*previous, SKIP_WRITE_BARRIER);
return handle(context, isolate());
@@ -1256,7 +1241,7 @@ Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
isolate()->function_context_map(), Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
- DCHECK(Heap::InYoungGeneration(context));
+ DCHECK_IMPLIES(!FLAG_single_generation, Heap::InYoungGeneration(context));
context.set_scope_info(read_only_roots().empty_scope_info(),
SKIP_WRITE_BARRIER);
context.set_previous(*native_context, SKIP_WRITE_BARRIER);
@@ -1265,15 +1250,15 @@ Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
int aliased_context_slot) {
- AliasedArgumentsEntry entry = AliasedArgumentsEntry::cast(
- NewStructInternal(ALIASED_ARGUMENTS_ENTRY_TYPE, AllocationType::kYoung));
+ auto entry = NewStructInternal<AliasedArgumentsEntry>(
+ ALIASED_ARGUMENTS_ENTRY_TYPE, AllocationType::kYoung);
entry.set_aliased_context_slot(aliased_context_slot);
return handle(entry, isolate());
}
Handle<AccessorInfo> Factory::NewAccessorInfo() {
- AccessorInfo info = AccessorInfo::cast(
- NewStructInternal(ACCESSOR_INFO_TYPE, AllocationType::kOld));
+ auto info =
+ NewStructInternal<AccessorInfo>(ACCESSOR_INFO_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
info.set_name(*empty_string(), SKIP_WRITE_BARRIER);
info.set_flags(0); // Must clear the flags, it was initialized as undefined.
@@ -1311,8 +1296,8 @@ Handle<Script> Factory::CloneScript(Handle<Script> script) {
new_script.set_context_data(old_script.context_data());
new_script.set_type(old_script.type());
new_script.set_line_ends(*undefined_value(), SKIP_WRITE_BARRIER);
- new_script.set_eval_from_shared_or_wrapped_arguments(
- script->eval_from_shared_or_wrapped_arguments());
+ new_script.set_eval_from_shared_or_wrapped_arguments_or_sfi_table(
+ script->eval_from_shared_or_wrapped_arguments_or_sfi_table());
new_script.set_shared_function_infos(*empty_weak_fixed_array(),
SKIP_WRITE_BARRIER);
new_script.set_eval_from_position(old_script.eval_from_position());
@@ -1330,8 +1315,8 @@ Handle<Script> Factory::CloneScript(Handle<Script> script) {
Handle<CallableTask> Factory::NewCallableTask(Handle<JSReceiver> callable,
Handle<Context> context) {
DCHECK(callable->IsCallable());
- CallableTask microtask = CallableTask::cast(
- NewStructInternal(CALLABLE_TASK_TYPE, AllocationType::kYoung));
+ auto microtask = NewStructInternal<CallableTask>(CALLABLE_TASK_TYPE,
+ AllocationType::kYoung);
DisallowGarbageCollection no_gc;
microtask.set_callable(*callable, SKIP_WRITE_BARRIER);
microtask.set_context(*context, SKIP_WRITE_BARRIER);
@@ -1340,8 +1325,8 @@ Handle<CallableTask> Factory::NewCallableTask(Handle<JSReceiver> callable,
Handle<CallbackTask> Factory::NewCallbackTask(Handle<Foreign> callback,
Handle<Foreign> data) {
- CallbackTask microtask = CallbackTask::cast(
- NewStructInternal(CALLBACK_TASK_TYPE, AllocationType::kYoung));
+ auto microtask = NewStructInternal<CallbackTask>(CALLBACK_TASK_TYPE,
+ AllocationType::kYoung);
DisallowGarbageCollection no_gc;
microtask.set_callback(*callback, SKIP_WRITE_BARRIER);
microtask.set_data(*data, SKIP_WRITE_BARRIER);
@@ -1352,9 +1337,8 @@ Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> thenable,
Handle<JSReceiver> then, Handle<Context> context) {
DCHECK(then->IsCallable());
- PromiseResolveThenableJobTask microtask =
- PromiseResolveThenableJobTask::cast(NewStructInternal(
- PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, AllocationType::kYoung));
+ auto microtask = NewStructInternal<PromiseResolveThenableJobTask>(
+ PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
microtask.set_promise_to_resolve(*promise_to_resolve, SKIP_WRITE_BARRIER);
microtask.set_thenable(*thenable, SKIP_WRITE_BARRIER);
@@ -1377,24 +1361,78 @@ Handle<Foreign> Factory::NewForeign(Address addr) {
#if V8_ENABLE_WEBASSEMBLY
Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(Address type_address,
- Handle<Map> opt_parent) {
+ Handle<Map> opt_parent,
+ int instance_size_bytes) {
+ // We pretenure WasmTypeInfo objects because they are refererenced by Maps,
+ // which are assumed to be long-lived. The supertypes list is constant
+ // after initialization, so we pretenure that too.
+ // The subtypes list, however, is expected to grow (and hence be replaced),
+ // so we don't pretenure it.
Handle<ArrayList> subtypes = ArrayList::New(isolate(), 0);
Handle<FixedArray> supertypes;
if (opt_parent.is_null()) {
- supertypes = NewUninitializedFixedArray(0);
+ supertypes = NewFixedArray(0);
} else {
- supertypes = CopyFixedArrayAndGrow(
- handle(opt_parent->wasm_type_info().supertypes(), isolate()), 1);
+ supertypes = CopyArrayAndGrow(
+ handle(opt_parent->wasm_type_info().supertypes(), isolate()), 1,
+ AllocationType::kOld);
supertypes->set(supertypes->length() - 1, *opt_parent);
}
Map map = *wasm_type_info_map();
WasmTypeInfo result = WasmTypeInfo::cast(AllocateRawWithImmortalMap(
- map.instance_size(), AllocationType::kYoung, map));
+ map.instance_size(), AllocationType::kOld, map));
DisallowGarbageCollection no_gc;
result.AllocateExternalPointerEntries(isolate());
result.set_foreign_address(isolate(), type_address);
result.set_supertypes(*supertypes, SKIP_WRITE_BARRIER);
- result.set_subtypes(*subtypes, SKIP_WRITE_BARRIER);
+ result.set_subtypes(*subtypes);
+ result.set_instance_size(instance_size_bytes);
+ return handle(result, isolate());
+}
+
+Handle<WasmJSFunctionData> Factory::NewWasmJSFunctionData(
+ Address opt_call_target, Handle<JSReceiver> callable, int return_count,
+ int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
+ Handle<Code> wrapper_code) {
+ Handle<Tuple2> pair = NewTuple2(null_value(), callable, AllocationType::kOld);
+ Map map = *wasm_js_function_data_map();
+ WasmJSFunctionData result =
+ WasmJSFunctionData::cast(AllocateRawWithImmortalMap(
+ map.instance_size(), AllocationType::kOld, map));
+ DisallowGarbageCollection no_gc;
+ result.AllocateExternalPointerEntries(isolate());
+ result.set_foreign_address(isolate(), opt_call_target);
+ result.set_ref(*pair);
+ result.set_serialized_return_count(return_count);
+ result.set_serialized_parameter_count(parameter_count);
+ result.set_serialized_signature(*serialized_sig);
+ result.set_wrapper_code(*wrapper_code);
+ // Default value, will be overwritten by the caller.
+ result.set_wasm_to_js_wrapper_code(
+ isolate()->heap()->builtin(Builtins::kAbort));
+ return handle(result, isolate());
+}
+
+Handle<WasmExportedFunctionData> Factory::NewWasmExportedFunctionData(
+ Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
+ Address call_target, Handle<Object> ref, int func_index,
+ Address sig_address, int wrapper_budget) {
+ Handle<Foreign> sig_foreign = NewForeign(sig_address);
+ Map map = *wasm_exported_function_data_map();
+ WasmExportedFunctionData result =
+ WasmExportedFunctionData::cast(AllocateRawWithImmortalMap(
+ map.instance_size(), AllocationType::kOld, map));
+ DisallowGarbageCollection no_gc;
+ result.AllocateExternalPointerEntries(isolate());
+ result.set_foreign_address(isolate(), call_target);
+ result.set_ref(*ref);
+ result.set_wrapper_code(*export_wrapper);
+ result.set_instance(*instance);
+ result.set_function_index(func_index);
+ result.set_signature(*sig_foreign);
+ result.set_wrapper_budget(wrapper_budget);
+ result.set_c_wrapper_code(Smi::zero(), SKIP_WRITE_BARRIER);
+ result.set_packed_args_size(0);
return handle(result, isolate());
}
@@ -1566,9 +1604,7 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
map.SetInstanceDescriptors(isolate(), *empty_descriptor_array(), 0);
// Must be called only after |instance_type| and |instance_size| are set.
map.set_visitor_id(Map::GetVisitorId(map));
- // TODO(solanes, v8:7790, v8:11353): set_relaxed_bit_field could be an atomic
- // set if TSAN could see the transitions happening in StoreIC.
- map.set_relaxed_bit_field(0);
+ map.set_bit_field(0);
map.set_bit_field2(Map::Bits2::NewTargetIsBaseBit::encode(true));
int bit_field3 =
Map::Bits3::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
@@ -1892,7 +1928,7 @@ Handle<JSObject> Factory::NewError(Handle<JSFunction> constructor,
Handle<Object> no_caller;
return ErrorUtils::Construct(isolate(), constructor, constructor, message,
- SKIP_NONE, no_caller,
+ undefined_value(), SKIP_NONE, no_caller,
ErrorUtils::StackTraceCollection::kDetailed)
.ToHandleChecked();
}
@@ -2075,9 +2111,8 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
#endif
DCHECK(IsAligned(new_code->address(), kCodeAlignment));
DCHECK_IMPLIES(
- !V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
- !heap->memory_allocator()->code_range().is_empty(),
- heap->memory_allocator()->code_range().contains(new_code->address()));
+ !V8_ENABLE_THIRD_PARTY_HEAP_BOOL && !heap->code_region().is_empty(),
+ heap->code_region().contains(new_code->address()));
return new_code;
}
@@ -2175,7 +2210,7 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
// Set up the global object as a normalized object.
global->set_global_dictionary(*dictionary, kReleaseStore);
- global->synchronized_set_map(raw_map);
+ global->set_map(raw_map, kReleaseStore);
// Make sure result is a global object with properties in dictionary.
DCHECK(global->IsJSGlobalObject() && !global->HasFastProperties());
@@ -2212,13 +2247,9 @@ void Factory::InitializeJSObjectBody(JSObject obj, Map map, int start_offset) {
// In case of Array subclassing the |map| could already be transitioned
// to different elements kind from the initial map on which we track slack.
bool in_progress = map.IsInobjectSlackTrackingInProgress();
- Object filler;
- if (in_progress) {
- filler = *one_pointer_filler_map();
- } else {
- filler = *undefined_value();
- }
- obj.InitializeBody(map, start_offset, *undefined_value(), filler);
+ obj.InitializeBody(map, start_offset, in_progress,
+ ReadOnlyRoots(isolate()).one_pointer_filler_map_word(),
+ *undefined_value());
if (in_progress) {
map.FindRootMap(isolate()).InobjectSlackTrackingStep(isolate());
}
@@ -2369,7 +2400,7 @@ Handle<FixedArrayBase> Factory::NewJSArrayStorage(
} else {
DCHECK(IsSmiOrObjectElementsKind(elements_kind));
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
- elms = NewUninitializedFixedArray(capacity);
+ elms = NewFixedArray(capacity);
} else {
DCHECK(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
elms = NewFixedArrayWithHoles(capacity);
@@ -2444,7 +2475,8 @@ Handle<SourceTextModule> Factory::NewSourceTextModule(
module.set_status(Module::kUninstantiated);
module.set_exception(roots.the_hole_value(), SKIP_WRITE_BARRIER);
module.set_top_level_capability(roots.undefined_value(), SKIP_WRITE_BARRIER);
- module.set_import_meta(roots.the_hole_value(), SKIP_WRITE_BARRIER);
+ module.set_import_meta(roots.the_hole_value(), kReleaseStore,
+ SKIP_WRITE_BARRIER);
module.set_dfs_index(-1);
module.set_dfs_ancestor_index(-1);
module.set_flags(0);
@@ -2487,7 +2519,8 @@ Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(
isolate());
auto result =
Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
- result->Setup(SharedFlag::kNotShared, std::move(backing_store));
+ result->Setup(SharedFlag::kNotShared, ResizableFlag::kNotResizable,
+ std::move(backing_store));
return result;
}
@@ -2505,18 +2538,32 @@ MaybeHandle<JSArrayBuffer> Factory::NewJSArrayBufferAndBackingStore(
isolate());
auto array_buffer =
Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
- array_buffer->Setup(SharedFlag::kNotShared, std::move(backing_store));
+ array_buffer->Setup(SharedFlag::kNotShared, ResizableFlag::kNotResizable,
+ std::move(backing_store));
return array_buffer;
}
Handle<JSArrayBuffer> Factory::NewJSSharedArrayBuffer(
std::shared_ptr<BackingStore> backing_store) {
- Handle<Map> map(
- isolate()->native_context()->shared_array_buffer_fun().initial_map(),
- isolate());
+ Handle<Map> map;
+ if (backing_store->is_resizable()) {
+ DCHECK(FLAG_harmony_rab_gsab);
+ map = Handle<Map>(isolate()
+ ->native_context()
+ ->growable_shared_array_buffer_fun()
+ .initial_map(),
+ isolate());
+ } else {
+ map = Handle<Map>(
+ isolate()->native_context()->shared_array_buffer_fun().initial_map(),
+ isolate());
+ }
auto result = Handle<JSArrayBuffer>::cast(
NewJSObjectFromMap(map, AllocationType::kYoung));
- result->Setup(SharedFlag::kShared, std::move(backing_store));
+ ResizableFlag resizable = backing_store->is_resizable()
+ ? ResizableFlag::kResizable
+ : ResizableFlag::kNotResizable;
+ result->Setup(SharedFlag::kShared, resizable, std::move(backing_store));
return result;
}
@@ -2571,6 +2618,7 @@ void Factory::TypeAndSizeForElementsKind(ElementsKind kind,
*element_size = sizeof(ctype); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS_WITH_TYPED_ARRAY_TYPE(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
default:
@@ -2653,6 +2701,8 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
raw.AllocateExternalPointerEntries(isolate());
raw.set_length(length);
raw.SetOffHeapDataPtr(isolate(), buffer->backing_store(), byte_offset);
+ raw.set_is_length_tracking(false);
+ raw.set_is_backed_by_rab(!buffer->is_shared() && buffer->is_resizable());
return typed_array;
}
@@ -2791,7 +2841,7 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
// Reset the map for the object.
JSGlobalProxy raw = *object;
- raw.synchronized_set_map(*map);
+ raw.set_map(*map, kReleaseStore);
// Reinitialize the object from the constructor map.
InitializeJSObjectFromMap(raw, *raw_properties_or_hash, *map);
@@ -3016,11 +3066,11 @@ Handle<String> Factory::SizeToString(size_t value, bool check_cache) {
Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
DCHECK(!shared->HasDebugInfo());
- DebugInfo debug_info =
- DebugInfo::cast(NewStructInternal(DEBUG_INFO_TYPE, AllocationType::kOld));
+ auto debug_info =
+ NewStructInternal<DebugInfo>(DEBUG_INFO_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
SharedFunctionInfo raw_shared = *shared;
- debug_info.set_flags(DebugInfo::kNone);
+ debug_info.set_flags(DebugInfo::kNone, kRelaxedStore);
debug_info.set_shared(raw_shared);
debug_info.set_debugger_hints(0);
DCHECK_EQ(DebugInfo::kNoDebuggingId, debug_info.debugging_id());
@@ -3039,8 +3089,8 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
}
Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
- BreakPointInfo new_break_point_info = BreakPointInfo::cast(
- NewStructInternal(BREAK_POINT_INFO_TYPE, AllocationType::kOld));
+ auto new_break_point_info = NewStructInternal<BreakPointInfo>(
+ BREAK_POINT_INFO_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
new_break_point_info.set_source_position(source_position);
new_break_point_info.set_break_points(*undefined_value(), SKIP_WRITE_BARRIER);
@@ -3048,8 +3098,8 @@ Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
}
Handle<BreakPoint> Factory::NewBreakPoint(int id, Handle<String> condition) {
- BreakPoint new_break_point = BreakPoint::cast(
- NewStructInternal(BREAK_POINT_TYPE, AllocationType::kOld));
+ auto new_break_point =
+ NewStructInternal<BreakPoint>(BREAK_POINT_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
new_break_point.set_id(id);
new_break_point.set_condition(*condition);
@@ -3060,8 +3110,8 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
Handle<Object> receiver_or_instance, Handle<Object> function,
Handle<HeapObject> code_object, int code_offset_or_source_position,
int flags, Handle<FixedArray> parameters) {
- StackFrameInfo info = StackFrameInfo::cast(
- NewStructInternal(STACK_FRAME_INFO_TYPE, AllocationType::kYoung));
+ auto info = NewStructInternal<StackFrameInfo>(STACK_FRAME_INFO_TYPE,
+ AllocationType::kYoung);
DisallowGarbageCollection no_gc;
info.set_receiver_or_instance(*receiver_or_instance, SKIP_WRITE_BARRIER);
info.set_function(*function, SKIP_WRITE_BARRIER);
@@ -3136,6 +3186,16 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
return map;
}
+Handle<MegaDomHandler> Factory::NewMegaDomHandler(MaybeObjectHandle accessor,
+ MaybeObjectHandle context) {
+ Handle<Map> map = read_only_roots().mega_dom_handler_map_handle();
+ MegaDomHandler handler = MegaDomHandler::cast(New(map, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ handler.set_accessor(*accessor);
+ handler.set_context(*context);
+ return handle(handler, isolate());
+}
+
Handle<LoadHandler> Factory::NewLoadHandler(int data_count,
AllocationType allocation) {
Handle<Map> map;
@@ -3496,7 +3556,8 @@ Handle<JSPromise> Factory::NewJSPromiseWithoutHook() {
Handle<JSPromise> Factory::NewJSPromise() {
Handle<JSPromise> promise = NewJSPromiseWithoutHook();
- isolate()->RunPromiseHook(PromiseHookType::kInit, promise, undefined_value());
+ isolate()->RunAllPromiseHooks(PromiseHookType::kInit, promise,
+ undefined_value());
return promise;
}
@@ -3538,20 +3599,12 @@ Handle<JSFunction> Factory::JSFunctionBuilder::Build() {
PrepareMap();
PrepareFeedbackCell();
- // Determine the associated Code object.
- Handle<Code> code;
- const bool have_cached_code =
- sfi_->TryGetCachedCode(isolate_).ToHandle(&code);
- if (!have_cached_code) code = handle(sfi_->GetCode(), isolate_);
-
+ Handle<Code> code = handle(sfi_->GetCode(), isolate_);
Handle<JSFunction> result = BuildRaw(code);
- if (have_cached_code || code->kind() == CodeKind::BASELINE) {
+ if (code->kind() == CodeKind::BASELINE) {
IsCompiledScope is_compiled_scope(sfi_->is_compiled_scope(isolate_));
JSFunction::EnsureFeedbackVector(result, &is_compiled_scope);
- if (FLAG_trace_turbo_nci && have_cached_code) {
- CompilationCacheCode::TraceHit(sfi_, code);
- }
}
Compiler::PostInstantiation(result);
@@ -3583,7 +3636,8 @@ Handle<JSFunction> Factory::JSFunctionBuilder::BuildRaw(Handle<Code> code) {
function.set_code(*code, kReleaseStore, mode);
if (function.has_prototype_slot()) {
function.set_prototype_or_initial_map(
- ReadOnlyRoots(isolate).the_hole_value(), SKIP_WRITE_BARRIER);
+ ReadOnlyRoots(isolate).the_hole_value(), kReleaseStore,
+ SKIP_WRITE_BARRIER);
}
// Potentially body initialization.
diff --git a/chromium/v8/src/heap/factory.h b/chromium/v8/src/heap/factory.h
index ebec483de47..7f99c557095 100644
--- a/chromium/v8/src/heap/factory.h
+++ b/chromium/v8/src/heap/factory.h
@@ -131,9 +131,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
MaybeHandle<FixedArray> TryNewFixedArray(
int length, AllocationType allocation = AllocationType::kYoung);
- // Allocates an uninitialized fixed array. It must be filled by the caller.
- Handle<FixedArray> NewUninitializedFixedArray(int length);
-
// Allocates a closure feedback cell array whose feedback cells are
// initialized with undefined values.
Handle<ClosureFeedbackCellArray> NewClosureFeedbackCellArray(int num_slots);
@@ -557,7 +554,18 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
#if V8_ENABLE_WEBASSEMBLY
Handle<WasmTypeInfo> NewWasmTypeInfo(Address type_address,
- Handle<Map> opt_parent);
+ Handle<Map> opt_parent,
+ int instance_size_bytes);
+ Handle<WasmExportedFunctionData> NewWasmExportedFunctionData(
+ Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
+ Address call_target, Handle<Object> ref, int func_index,
+ Address sig_address, int wrapper_budget);
+ // {opt_call_target} is kNullAddress for JavaScript functions, and
+ // non-null for exported Wasm functions.
+ Handle<WasmJSFunctionData> NewWasmJSFunctionData(
+ Address opt_call_target, Handle<JSReceiver> callable, int return_count,
+ int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
+ Handle<Code> wrapper_code);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmExportedFunction(
Handle<String> name, Handle<WasmExportedFunctionData> data);
@@ -732,7 +740,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<LoadHandler> NewLoadHandler(
int data_count, AllocationType allocation = AllocationType::kOld);
Handle<StoreHandler> NewStoreHandler(int data_count);
-
+ Handle<MegaDomHandler> NewMegaDomHandler(MaybeObjectHandle accessor,
+ MaybeObjectHandle context);
Handle<RegExpMatchInfo> NewRegExpMatchInfo();
// Creates a new FixedArray that holds the data associated with the
diff --git a/chromium/v8/src/heap/finalization-registry-cleanup-task.cc b/chromium/v8/src/heap/finalization-registry-cleanup-task.cc
index 2acfa31ffba..18222e783d1 100644
--- a/chromium/v8/src/heap/finalization-registry-cleanup-task.cc
+++ b/chromium/v8/src/heap/finalization-registry-cleanup-task.cc
@@ -59,8 +59,21 @@ void FinalizationRegistryCleanupTask::RunInternal() {
Context::cast(finalization_registry->native_context()), isolate);
Handle<Object> callback(finalization_registry->cleanup(), isolate);
v8::Context::Scope context_scope(v8::Utils::ToLocal(context));
- v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8::TryCatch catcher(v8_isolate);
catcher.SetVerbose(true);
+ std::unique_ptr<MicrotasksScope> microtasks_scope;
+ MicrotaskQueue* microtask_queue =
+ finalization_registry->native_context().microtask_queue();
+ if (!microtask_queue) microtask_queue = isolate->default_microtask_queue();
+ if (microtask_queue &&
+ microtask_queue->microtasks_policy() == v8::MicrotasksPolicy::kScoped) {
+ // InvokeFinalizationRegistryCleanupFromTask will call into V8 API methods,
+ // so we need a valid microtasks scope on the stack to avoid running into
+ // the CallDepthScope check.
+ microtasks_scope.reset(new v8::MicrotasksScope(
+ v8_isolate, microtask_queue, v8::MicrotasksScope::kDoNotRunMicrotasks));
+ }
// Exceptions are reported via the message handler. This is ensured by the
// verbose TryCatch.
diff --git a/chromium/v8/src/heap/free-list.cc b/chromium/v8/src/heap/free-list.cc
index 80b4a4f01f2..9f13247a2e6 100644
--- a/chromium/v8/src/heap/free-list.cc
+++ b/chromium/v8/src/heap/free-list.cc
@@ -48,7 +48,7 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
for (FreeSpace cur_node = top(); !cur_node.is_null();
cur_node = cur_node.next()) {
DCHECK(Page::FromHeapObject(cur_node)->CanAllocate());
- size_t size = cur_node.size();
+ size_t size = cur_node.size(kRelaxedLoad);
if (size >= minimum_size) {
DCHECK_GE(available_, size);
UpdateCountersAfterAllocation(size);
@@ -91,10 +91,10 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
FreeSpace n = top();
while (!n.is_null()) {
ObjectSlot map_slot = n.map_slot();
- if (map_slot.contains_value(kNullAddress)) {
- map_slot.store(free_space_map);
+ if (map_slot.contains_map_value(kNullAddress)) {
+ map_slot.store_map(free_space_map);
} else {
- DCHECK(map_slot.contains_value(free_space_map.ptr()));
+ DCHECK(map_slot.contains_map_value(free_space_map.ptr()));
}
n = n.next();
}
@@ -504,12 +504,13 @@ size_t FreeListCategory::SumFreeList() {
while (!cur.is_null()) {
// We can't use "cur->map()" here because both cur's map and the
// root can be null during bootstrapping.
- DCHECK(cur.map_slot().contains_value(Page::FromHeapObject(cur)
- ->heap()
- ->isolate()
- ->root(RootIndex::kFreeSpaceMap)
- .ptr()));
- sum += cur.relaxed_read_size();
+ DCHECK(
+ cur.map_slot().contains_map_value(Page::FromHeapObject(cur)
+ ->heap()
+ ->isolate()
+ ->root(RootIndex::kFreeSpaceMap)
+ .ptr()));
+ sum += cur.size(kRelaxedLoad);
cur = cur.next();
}
return sum;
diff --git a/chromium/v8/src/heap/gc-tracer.cc b/chromium/v8/src/heap/gc-tracer.cc
index b4f86cc2a15..70f8c276270 100644
--- a/chromium/v8/src/heap/gc-tracer.cc
+++ b/chromium/v8/src/heap/gc-tracer.cc
@@ -13,7 +13,9 @@
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
-#include "src/logging/counters-inl.h"
+#include "src/logging/counters.h"
+#include "src/logging/tracing-flags.h"
+#include "src/tracing/tracing-category-observer.h"
namespace v8 {
namespace internal {
@@ -28,6 +30,8 @@ static size_t CountTotalHolesSize(Heap* heap) {
}
return holes_size;
}
+
+#ifdef V8_RUNTIME_CALL_STATS
WorkerThreadRuntimeCallStats* GCTracer::worker_thread_runtime_call_stats() {
return heap_->isolate()->counters()->worker_thread_runtime_call_stats();
}
@@ -38,6 +42,7 @@ RuntimeCallCounterId GCTracer::RCSCounterFromScope(Scope::ScopeId id) {
static_cast<int>(RuntimeCallCounterId::kGC_MC_INCREMENTAL) +
static_cast<int>(id));
}
+#endif // defined(V8_RUNTIME_CALL_STATS)
double GCTracer::MonotonicallyIncreasingTimeInMs() {
if (V8_UNLIKELY(FLAG_predictable)) {
@@ -61,6 +66,7 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind)
start_time_ = tracer_->MonotonicallyIncreasingTimeInMs();
if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
+#ifdef V8_RUNTIME_CALL_STATS
if (thread_kind_ == ThreadKind::kMain) {
DCHECK_EQ(tracer_->heap_->isolate()->thread_id(), ThreadId::Current());
runtime_stats_ =
@@ -72,6 +78,7 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind)
runtime_stats_ = runtime_call_stats_scope_->Get();
runtime_stats_->Enter(&timer_, GCTracer::RCSCounterFromScope(scope));
}
+#endif // defined(V8_RUNTIME_CALL_STATS)
}
GCTracer::Scope::~Scope() {
@@ -80,12 +87,23 @@ GCTracer::Scope::~Scope() {
if (thread_kind_ == ThreadKind::kMain) {
DCHECK_EQ(tracer_->heap_->isolate()->thread_id(), ThreadId::Current());
tracer_->AddScopeSample(scope_, duration_ms);
+ if (scope_ == ScopeId::MC_INCREMENTAL ||
+ scope_ == ScopeId::MC_INCREMENTAL_START ||
+ scope_ == MC_INCREMENTAL_FINALIZE) {
+ auto* long_task_stats =
+ tracer_->heap_->isolate()->GetCurrentLongTaskStats();
+ long_task_stats->gc_full_incremental_wall_clock_duration_us +=
+ static_cast<int64_t>(duration_ms *
+ base::Time::kMicrosecondsPerMillisecond);
+ }
} else {
tracer_->AddScopeSampleBackground(scope_, duration_ms);
}
+#ifdef V8_RUNTIME_CALL_STATS
if (V8_LIKELY(runtime_stats_ == nullptr)) return;
runtime_stats_->Leave(&timer_);
+#endif // defined(V8_RUNTIME_CALL_STATS)
}
const char* GCTracer::Scope::Name(ScopeId id) {
@@ -290,8 +308,10 @@ void GCTracer::StartInSafepoint() {
current_.start_object_size = heap_->SizeOfObjects();
current_.start_memory_size = heap_->memory_allocator()->Size();
current_.start_holes_size = CountTotalHolesSize(heap_);
- current_.young_object_size =
- heap_->new_space()->Size() + heap_->new_lo_space()->SizeOfObjects();
+ size_t new_space_size = (heap_->new_space() ? heap_->new_space()->Size() : 0);
+ size_t new_lo_space_size =
+ (heap_->new_lo_space() ? heap_->new_lo_space()->SizeOfObjects() : 0);
+ current_.young_object_size = new_space_size + new_lo_space_size;
}
void GCTracer::ResetIncrementalMarkingCounters() {
@@ -333,6 +353,9 @@ void GCTracer::Stop(GarbageCollector collector) {
AddAllocation(current_.end_time);
double duration = current_.end_time - current_.start_time;
+ int64_t duration_us =
+ static_cast<int64_t>(duration * base::Time::kMicrosecondsPerMillisecond);
+ auto* long_task_stats = heap_->isolate()->GetCurrentLongTaskStats();
switch (current_.type) {
case Event::SCAVENGER:
@@ -342,6 +365,7 @@ void GCTracer::Stop(GarbageCollector collector) {
recorded_minor_gcs_survived_.Push(
MakeBytesAndDuration(current_.survived_young_object_size, duration));
FetchBackgroundMinorGCCounters();
+ long_task_stats->gc_young_wall_clock_duration_us += duration_us;
break;
case Event::INCREMENTAL_MARK_COMPACTOR:
current_.incremental_marking_bytes = incremental_marking_bytes_;
@@ -361,6 +385,7 @@ void GCTracer::Stop(GarbageCollector collector) {
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
FetchBackgroundMarkCompactCounters();
+ long_task_stats->gc_full_atomic_wall_clock_duration_us += duration_us;
break;
case Event::MARK_COMPACTOR:
DCHECK_EQ(0u, current_.incremental_marking_bytes);
@@ -373,6 +398,7 @@ void GCTracer::Stop(GarbageCollector collector) {
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
FetchBackgroundMarkCompactCounters();
+ long_task_stats->gc_full_atomic_wall_clock_duration_us += duration_us;
break;
case Event::START:
UNREACHABLE();
diff --git a/chromium/v8/src/heap/gc-tracer.h b/chromium/v8/src/heap/gc-tracer.h
index 011889ba66e..3a665726ca7 100644
--- a/chromium/v8/src/heap/gc-tracer.h
+++ b/chromium/v8/src/heap/gc-tracer.h
@@ -113,9 +113,11 @@ class V8_EXPORT_PRIVATE GCTracer {
ScopeId scope_;
ThreadKind thread_kind_;
double start_time_;
+#ifdef V8_RUNTIME_CALL_STATS
RuntimeCallTimer timer_;
RuntimeCallStats* runtime_stats_ = nullptr;
base::Optional<WorkerThreadRuntimeCallStatsScope> runtime_call_stats_scope_;
+#endif // defined(V8_RUNTIME_CALL_STATS)
};
class Event {
@@ -195,7 +197,9 @@ class V8_EXPORT_PRIVATE GCTracer {
static double CombineSpeedsInBytesPerMillisecond(double default_speed,
double optional_speed);
+#ifdef V8_RUNTIME_CALL_STATS
static RuntimeCallCounterId RCSCounterFromScope(Scope::ScopeId id);
+#endif // defined(V8_RUNTIME_CALL_STATS)
explicit GCTracer(Heap* heap);
@@ -335,7 +339,9 @@ class V8_EXPORT_PRIVATE GCTracer {
double AverageTimeToIncrementalMarkingTask() const;
void RecordTimeToIncrementalMarkingTask(double time_to_task);
+#ifdef V8_RUNTIME_CALL_STATS
WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats();
+#endif // defined(V8_RUNTIME_CALL_STATS)
CollectionEpoch CurrentEpoch(Scope::ScopeId id);
diff --git a/chromium/v8/src/heap/heap-inl.h b/chromium/v8/src/heap/heap-inl.h
index 8372dd518d7..8c2649e0ef8 100644
--- a/chromium/v8/src/heap/heap-inl.h
+++ b/chromium/v8/src/heap/heap-inl.h
@@ -8,22 +8,17 @@
#include <cmath>
// Clients of this interface shouldn't depend on lots of heap internals.
-// Do not include anything from src/heap other than src/heap/heap.h and its
-// write barrier here!
+// Avoid including anything but `heap.h` from `src/heap` where possible.
#include "src/base/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/platform/platform.h"
+#include "src/base/sanitizer/msan.h"
#include "src/common/assert-scope.h"
-#include "src/heap/heap-write-barrier.h"
-#include "src/heap/heap.h"
-#include "src/heap/third-party/heap-api.h"
-#include "src/objects/feedback-vector.h"
-
-// TODO(gc): There is one more include to remove in order to no longer
-// leak heap internals to users of this interface!
#include "src/execution/isolate-data.h"
#include "src/execution/isolate.h"
#include "src/heap/code-object-registry.h"
+#include "src/heap/heap-write-barrier.h"
+#include "src/heap/heap.h"
#include "src/heap/large-spaces.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk.h"
@@ -31,11 +26,13 @@
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/spaces-inl.h"
+#include "src/heap/third-party/heap-api.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks-inl.h"
#include "src/objects/cell-inl.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
@@ -45,7 +42,6 @@
#include "src/objects/slots-inl.h"
#include "src/objects/struct-inl.h"
#include "src/profiler/heap-profiler.h"
-#include "src/sanitizer/msan.h"
#include "src/strings/string-hasher.h"
#include "src/zone/zone-list-inl.h"
@@ -162,19 +158,12 @@ Address* Heap::OldSpaceAllocationLimitAddress() {
return old_space_->allocation_limit_address();
}
-void Heap::UpdateNewSpaceAllocationCounter() {
- new_space_allocation_counter_ = NewSpaceAllocationCounter();
-}
-
-size_t Heap::NewSpaceAllocationCounter() {
- return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
-}
-
-inline const base::AddressRegion& Heap::code_range() {
+inline const base::AddressRegion& Heap::code_region() {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
return tp_heap_->GetCodeRange();
#else
- return memory_allocator_->code_range();
+ static constexpr base::AddressRegion kEmptyRegion;
+ return code_range_ ? code_range_->reservation()->region() : kEmptyRegion;
#endif
}
@@ -189,7 +178,8 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
- return AllocationResult::Retry();
+ AllocationSpace space = FLAG_single_generation ? OLD_SPACE : NEW_SPACE;
+ return AllocationResult::Retry(space);
}
}
#endif
@@ -197,6 +187,10 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
IncrementObjectCounters();
#endif
+ if (CanSafepoint()) {
+ main_thread_local_heap()->Safepoint();
+ }
+
size_t large_object_threshold = MaxRegularHeapObjectSize(type);
bool large_object =
static_cast<size_t>(size_in_bytes) > large_object_threshold;
@@ -245,6 +239,12 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(CanAllocateInReadOnlySpace());
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
+ } else if (AllocationType::kSharedOld == type) {
+ allocation =
+ shared_old_allocator_->AllocateRaw(size_in_bytes, alignment, origin);
+ } else if (AllocationType::kSharedMap == type) {
+ allocation =
+ shared_map_allocator_->AllocateRaw(size_in_bytes, alignment, origin);
} else {
UNREACHABLE();
}
@@ -285,10 +285,9 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_EQ(gc_state(), NOT_IN_GC);
Heap* heap = isolate()->heap();
- if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
- allocation == AllocationType::kYoung &&
+ if (allocation == AllocationType::kYoung &&
alignment == AllocationAlignment::kWordAligned &&
- size <= MaxRegularHeapObjectSize(allocation)) {
+ size <= MaxRegularHeapObjectSize(allocation) && !FLAG_single_generation) {
Address* top = heap->NewSpaceAllocationTopAddress();
Address* limit = heap->NewSpaceAllocationLimitAddress();
if ((*limit - *top >= static_cast<unsigned>(size)) &&
@@ -383,17 +382,21 @@ void Heap::RegisterExternalString(String string) {
void Heap::FinalizeExternalString(String string) {
DCHECK(string.IsExternalString());
- Page* page = Page::FromHeapObject(string);
ExternalString ext_string = ExternalString::cast(string);
- page->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kExternalString,
- ext_string.ExternalPayloadSize());
+ if (!FLAG_enable_third_party_heap) {
+ Page* page = Page::FromHeapObject(string);
+ page->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString,
+ ext_string.ExternalPayloadSize());
+ }
ext_string.DisposeResource(isolate());
}
-Address Heap::NewSpaceTop() { return new_space_->top(); }
+Address Heap::NewSpaceTop() {
+ return new_space_ ? new_space_->top() : kNullAddress;
+}
bool Heap::InYoungGeneration(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
@@ -458,7 +461,12 @@ bool Heap::InToPage(HeapObject heap_object) {
return BasicMemoryChunk::FromHeapObject(heap_object)->IsToPage();
}
-bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
+bool Heap::InOldSpace(Object object) {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
+ return object.IsHeapObject() &&
+ third_party_heap::Heap::InOldSpace(object.ptr());
+ return old_space_->Contains(object);
+}
// static
Heap* Heap::FromWritableHeapObject(HeapObject obj) {
@@ -502,7 +510,7 @@ AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
MSAN_MEMORY_IS_INITIALIZED(candidate_map_slot.address(), kTaggedSize);
- if (!candidate_map_slot.contains_value(
+ if (!candidate_map_slot.contains_map_value(
ReadOnlyRoots(this).allocation_memento_map().ptr())) {
return AllocationMemento();
}
@@ -577,18 +585,23 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
bool Heap::IsPendingAllocation(HeapObject object) {
// TODO(ulan): Optimize this function to perform 3 loads at most.
Address addr = object.address();
- Address top = new_space_->original_top_acquire();
- Address limit = new_space_->original_limit_relaxed();
- if (top <= addr && addr < limit) return true;
+ Address top, limit;
+
+ if (new_space_) {
+ top = new_space_->original_top_acquire();
+ limit = new_space_->original_limit_relaxed();
+ if (top && top <= addr && addr < limit) return true;
+ }
+
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
top = space->original_top_acquire();
limit = space->original_limit_relaxed();
- if (top <= addr && addr < limit) return true;
+ if (top && top <= addr && addr < limit) return true;
}
if (addr == lo_space_->pending_object()) return true;
- if (addr == new_lo_space_->pending_object()) return true;
+ if (new_lo_space_ && addr == new_lo_space_->pending_object()) return true;
if (addr == code_lo_space_->pending_object()) return true;
return false;
}
@@ -644,8 +657,8 @@ int Heap::NextDebuggingId() {
}
int Heap::GetNextTemplateSerialNumber() {
- int next_serial_number = next_template_serial_number().value() + 1;
- set_next_template_serial_number(Smi::FromInt(next_serial_number));
+ int next_serial_number = next_template_serial_number().value();
+ set_next_template_serial_number(Smi::FromInt(next_serial_number + 1));
return next_serial_number;
}
diff --git a/chromium/v8/src/heap/heap-write-barrier.cc b/chromium/v8/src/heap/heap-write-barrier.cc
index 63949de2433..0030615bab4 100644
--- a/chromium/v8/src/heap/heap-write-barrier.cc
+++ b/chromium/v8/src/heap/heap-write-barrier.cc
@@ -68,7 +68,16 @@ void WriteBarrier::MarkingSlow(Heap* heap, DescriptorArray descriptor_array,
int WriteBarrier::MarkingFromCode(Address raw_host, Address raw_slot) {
HeapObject host = HeapObject::cast(Object(raw_host));
MaybeObjectSlot slot(raw_slot);
- WriteBarrier::Marking(host, slot, *slot);
+ Address value = (*slot).ptr();
+#ifdef V8_MAP_PACKING
+ if (slot.address() == host.address()) {
+ // Clear metadata bits and fix object tag.
+ value = (value & ~Internals::kMapWordMetadataMask &
+ ~Internals::kMapWordXorMask) |
+ (uint64_t)kHeapObjectTag;
+ }
+#endif
+ WriteBarrier::Marking(host, slot, MaybeObject(value));
// Called by RecordWriteCodeStubAssembler, which doesnt accept void type
return 0;
}
diff --git a/chromium/v8/src/heap/heap.cc b/chromium/v8/src/heap/heap.cc
index 8b8c30a1b0b..9da67b75344 100644
--- a/chromium/v8/src/heap/heap.cc
+++ b/chromium/v8/src/heap/heap.cc
@@ -14,6 +14,7 @@
#include "src/api/api-inl.h"
#include "src/base/bits.h"
#include "src/base/flags.h"
+#include "src/base/logging.h"
#include "src/base/once.h"
#include "src/base/platform/mutex.h"
#include "src/base/utils/random-number-generator.h"
@@ -35,6 +36,7 @@
#include "src/heap/barrier.h"
#include "src/heap/base/stack.h"
#include "src/heap/code-object-registry.h"
+#include "src/heap/code-range.h"
#include "src/heap/code-stats.h"
#include "src/heap/collection-barrier.h"
#include "src/heap/combined-heap.h"
@@ -323,7 +325,9 @@ size_t Heap::SemiSpaceSizeFromYoungGenerationSize(
size_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
- return new_space_->Capacity() + OldGenerationCapacity();
+ if (FLAG_enable_third_party_heap) return tp_heap_->Capacity();
+
+ return NewSpaceCapacity() + OldGenerationCapacity();
}
size_t Heap::OldGenerationCapacity() {
@@ -358,7 +362,10 @@ size_t Heap::CommittedMemoryOfUnmapper() {
size_t Heap::CommittedMemory() {
if (!HasBeenSetUp()) return 0;
- return new_space_->CommittedMemory() + new_lo_space_->Size() +
+ size_t new_space_committed = new_space_ ? new_space_->CommittedMemory() : 0;
+ size_t new_lo_space_committed = new_lo_space_ ? new_lo_space_->Size() : 0;
+
+ return new_space_committed + new_lo_space_committed +
CommittedOldGenerationMemory();
}
@@ -421,14 +428,17 @@ bool Heap::CanExpandOldGenerationBackground(LocalHeap* local_heap,
}
bool Heap::CanPromoteYoungAndExpandOldGeneration(size_t size) {
+ size_t new_space_capacity = NewSpaceCapacity();
+ size_t new_lo_space_capacity = new_lo_space_ ? new_lo_space_->Size() : 0;
+
// Over-estimate the new space size using capacity to allow some slack.
- return CanExpandOldGeneration(size + new_space_->Capacity() +
- new_lo_space_->Size());
+ return CanExpandOldGeneration(size + new_space_capacity +
+ new_lo_space_capacity);
}
bool Heap::HasBeenSetUp() const {
- // We will always have a new space when the heap is set up.
- return new_space_ != nullptr;
+ // We will always have an old space when the heap is set up.
+ return old_space_ != nullptr;
}
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
@@ -440,7 +450,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return MARK_COMPACTOR;
}
- if (FLAG_gc_global || ShouldStressCompaction()) {
+ if (FLAG_gc_global || ShouldStressCompaction() || FLAG_single_generation) {
*reason = "GC in old space forced by flags";
return MARK_COMPACTOR;
}
@@ -485,7 +495,7 @@ void Heap::PrintShortHeapStatistics() {
"New space, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
- new_space_->Size() / KB, new_space_->Available() / KB,
+ NewSpaceSize() / KB, new_space_->Available() / KB,
new_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
"New large object space, used: %6zu KB"
@@ -788,6 +798,58 @@ void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
PrintF("-------------------------------------------------\n");
}
+void UpdateRetainersMapAfterScavenge(
+ std::unordered_map<HeapObject, HeapObject, Object::Hasher>* map) {
+ std::unordered_map<HeapObject, HeapObject, Object::Hasher> updated_map;
+
+ for (auto pair : *map) {
+ HeapObject object = pair.first;
+ HeapObject retainer = pair.second;
+
+ if (Heap::InFromPage(object)) {
+ MapWord map_word = object.map_word(kRelaxedLoad);
+ if (!map_word.IsForwardingAddress()) continue;
+ object = map_word.ToForwardingAddress();
+ }
+
+ if (Heap::InFromPage(retainer)) {
+ MapWord map_word = retainer.map_word(kRelaxedLoad);
+ if (!map_word.IsForwardingAddress()) continue;
+ retainer = map_word.ToForwardingAddress();
+ }
+
+ updated_map[object] = retainer;
+ }
+
+ *map = std::move(updated_map);
+}
+
+void Heap::UpdateRetainersAfterScavenge() {
+ if (!incremental_marking()->IsMarking()) return;
+
+ // This isn't supported for Minor MC.
+ DCHECK(!FLAG_minor_mc);
+
+ UpdateRetainersMapAfterScavenge(&retainer_);
+ UpdateRetainersMapAfterScavenge(&ephemeron_retainer_);
+
+ std::unordered_map<HeapObject, Root, Object::Hasher> updated_retaining_root;
+
+ for (auto pair : retaining_root_) {
+ HeapObject object = pair.first;
+
+ if (Heap::InFromPage(object)) {
+ MapWord map_word = object.map_word(kRelaxedLoad);
+ if (!map_word.IsForwardingAddress()) continue;
+ object = map_word.ToForwardingAddress();
+ }
+
+ updated_retaining_root[object] = pair.second;
+ }
+
+ retaining_root_ = std::move(updated_retaining_root);
+}
+
void Heap::AddRetainer(HeapObject retainer, HeapObject object) {
if (retainer_.count(object)) return;
retainer_[object] = retainer;
@@ -850,7 +912,7 @@ void Heap::GarbageCollectionPrologue() {
if (FLAG_gc_verbose) Print();
#endif // DEBUG
- if (new_space_->IsAtMaximumCapacity()) {
+ if (new_space_ && new_space_->IsAtMaximumCapacity()) {
maximum_size_scavenges_++;
} else {
maximum_size_scavenges_ = 0;
@@ -867,9 +929,20 @@ void Heap::GarbageCollectionPrologueInSafepoint() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE_SAFEPOINT);
gc_count_++;
- UpdateNewSpaceAllocationCounter();
- CheckNewSpaceExpansionCriteria();
- new_space_->ResetParkedAllocationBuffers();
+ if (new_space_) {
+ UpdateNewSpaceAllocationCounter();
+ CheckNewSpaceExpansionCriteria();
+ new_space_->ResetParkedAllocationBuffers();
+ }
+}
+
+void Heap::UpdateNewSpaceAllocationCounter() {
+ new_space_allocation_counter_ = NewSpaceAllocationCounter();
+}
+
+size_t Heap::NewSpaceAllocationCounter() {
+ return new_space_allocation_counter_ +
+ (new_space_ ? new_space()->AllocatedSinceLastGC() : 0);
}
size_t Heap::SizeOfObjects() {
@@ -894,7 +967,7 @@ void Heap::MergeAllocationSitePretenuringFeedback(
AllocationSite site;
for (auto& site_and_count : local_pretenuring_feedback) {
site = site_and_count.first;
- MapWord map_word = site_and_count.first.map_word();
+ MapWord map_word = site_and_count.first.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
site = AllocationSite::cast(map_word.ToForwardingAddress());
}
@@ -944,14 +1017,15 @@ void Heap::RemoveAllocationObserversFromAllSpaces(
}
void Heap::PublishPendingAllocations() {
- new_space_->MarkLabStartInitialized();
+ if (FLAG_enable_third_party_heap) return;
+ if (new_space_) new_space_->MarkLabStartInitialized();
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
space->MoveOriginalTopForward();
}
lo_space_->ResetPendingObject();
- new_lo_space_->ResetPendingObject();
+ if (new_lo_space_) new_lo_space_->ResetPendingObject();
code_lo_space_->ResetPendingObject();
}
@@ -981,6 +1055,12 @@ inline bool MakePretenureDecision(
return false;
}
+// Clear feedback calculation fields until the next gc.
+inline void ResetPretenuringFeedback(AllocationSite site) {
+ site.set_memento_found_count(0);
+ site.set_memento_create_count(0);
+}
+
inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
bool maximum_size_scavenge) {
bool deopt = false;
@@ -1008,11 +1088,34 @@ inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
site.PretenureDecisionName(site.pretenure_decision()));
}
- // Clear feedback calculation fields until the next gc.
- site.set_memento_found_count(0);
- site.set_memento_create_count(0);
+ ResetPretenuringFeedback(site);
return deopt;
}
+
+bool PretenureAllocationSiteManually(Isolate* isolate, AllocationSite site) {
+ AllocationSite::PretenureDecision current_decision =
+ site.pretenure_decision();
+ bool deopt = true;
+ if (current_decision == AllocationSite::kUndecided ||
+ current_decision == AllocationSite::kMaybeTenure) {
+ site.set_deopt_dependent_code(true);
+ site.set_pretenure_decision(AllocationSite::kTenure);
+ } else {
+ deopt = false;
+ }
+ if (FLAG_trace_pretenuring_statistics) {
+ PrintIsolate(isolate,
+ "pretenuring manually requested: AllocationSite(%p): "
+ "%s => %s\n",
+ reinterpret_cast<void*>(site.ptr()),
+ site.PretenureDecisionName(current_decision),
+ site.PretenureDecisionName(site.pretenure_decision()));
+ }
+
+ ResetPretenuringFeedback(site);
+ return deopt;
+}
+
} // namespace
void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
@@ -1020,7 +1123,8 @@ void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
}
bool Heap::DeoptMaybeTenuredAllocationSites() {
- return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
+ return new_space_ && new_space_->IsAtMaximumCapacity() &&
+ maximum_size_scavenges_ == 0;
}
void Heap::ProcessPretenuringFeedback() {
@@ -1060,7 +1164,18 @@ void Heap::ProcessPretenuringFeedback() {
}
}
- // Step 2: Deopt maybe tenured allocation sites if necessary.
+ // Step 2: Pretenure allocation sites for manual requests.
+ if (allocation_sites_to_pretenure_) {
+ while (!allocation_sites_to_pretenure_->empty()) {
+ auto site = allocation_sites_to_pretenure_->Pop();
+ if (PretenureAllocationSiteManually(isolate_, site)) {
+ trigger_deoptimization = true;
+ }
+ }
+ allocation_sites_to_pretenure_.reset();
+ }
+
+ // Step 3: Deopt maybe tenured allocation sites if necessary.
bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
if (deopt_maybe_tenured) {
ForeachAllocationSite(
@@ -1096,6 +1211,14 @@ void Heap::ProcessPretenuringFeedback() {
}
}
+void Heap::PretenureAllocationSiteOnNextCollection(AllocationSite site) {
+ if (!allocation_sites_to_pretenure_) {
+ allocation_sites_to_pretenure_.reset(
+ new GlobalHandleVector<AllocationSite>(this));
+ }
+ allocation_sites_to_pretenure_->Push(site);
+}
+
void Heap::InvalidateCodeDeoptimizationData(Code code) {
CodePageMemoryModificationScope modification_scope(code);
code.set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
@@ -1145,7 +1268,10 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
UPDATE_COUNTERS_FOR_SPACE(space) \
UPDATE_FRAGMENTATION_FOR_SPACE(space)
- UPDATE_COUNTERS_FOR_SPACE(new_space)
+ if (new_space()) {
+ UPDATE_COUNTERS_FOR_SPACE(new_space)
+ }
+
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
@@ -1175,16 +1301,14 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
ZapFromSpace();
}
- {
+ if (new_space()) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
ReduceNewSpaceSize();
}
// Set main thread state back to Running from CollectionRequested.
- LocalHeap* main_thread_local_heap = isolate()->main_thread_local_heap();
-
LocalHeap::ThreadState old_state =
- main_thread_local_heap->state_.exchange(LocalHeap::kRunning);
+ main_thread_local_heap()->state_.exchange(LocalHeap::kRunning);
CHECK(old_state == LocalHeap::kRunning ||
old_state == LocalHeap::kCollectionRequested);
@@ -1404,8 +1528,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
if (gc_reason == GarbageCollectionReason::kLastResort) {
InvokeNearHeapLimitCallback();
}
- RuntimeCallTimerScope runtime_timer(
- isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
// The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
@@ -1512,6 +1635,7 @@ void Heap::EnsureFillerObjectAtTop() {
// evacuation of a non-full new space (or if we are on the last page) there
// may be uninitialized memory behind top. We fill the remainder of the page
// with a filler.
+ if (!new_space_) return;
Address to_top = new_space_->top();
Page* page = Page::FromAddress(to_top - kTaggedSize);
if (page->Contains(to_top)) {
@@ -1593,6 +1717,16 @@ bool Heap::CollectGarbage(AllocationSpace space,
if (collector == MARK_COMPACTOR) {
committed_memory_before = CommittedOldGenerationMemory();
+ if (cpp_heap()) {
+ // CppHeap needs a stack marker at the top of all entry points to allow
+ // deterministic passes over the stack. E.g., a verifier that should only
+ // find a subset of references of the marker.
+ //
+ // TODO(chromium:1056170): Consider adding a component that keeps track
+ // of relevant GC stack regions where interesting pointers can be found.
+ static_cast<v8::internal::CppHeap*>(cpp_heap())
+ ->SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
+ }
}
{
@@ -1800,7 +1934,7 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
} else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
StartIncrementalMarking(
gc_flags,
- OldGenerationSpaceAvailable() <= new_space_->Capacity()
+ OldGenerationSpaceAvailable() <= NewSpaceCapacity()
? GarbageCollectionReason::kAllocationLimit
: GarbageCollectionReason::kGlobalAllocationLimit,
gc_callback_flags);
@@ -1816,7 +1950,7 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
const size_t old_generation_space_available = OldGenerationSpaceAvailable();
- if (old_generation_space_available < new_space_->Capacity()) {
+ if (old_generation_space_available < NewSpaceCapacity()) {
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
}
}
@@ -1943,6 +2077,7 @@ static void VerifyStringTable(Isolate* isolate) {
#endif // VERIFY_HEAP
void Heap::EnsureFromSpaceIsCommitted() {
+ if (!new_space_) return;
if (new_space_->CommitFromSpaceIfNeeded()) return;
// Committing memory to from space failed.
@@ -2024,6 +2159,9 @@ size_t Heap::PerformGarbageCollection(
SafepointScope safepoint_scope(this);
+ // Shared isolates cannot have any clients when running GC at the moment.
+ DCHECK_IMPLIES(IsShared(), !isolate()->HasClientIsolates());
+
collection_barrier_->StopTimeToCollectionTimer();
#ifdef VERIFY_HEAP
@@ -2039,7 +2177,7 @@ size_t Heap::PerformGarbageCollection(
EnsureFromSpaceIsCommitted();
size_t start_young_generation_size =
- Heap::new_space()->Size() + new_lo_space()->SizeOfObjects();
+ NewSpaceSize() + (new_lo_space() ? new_lo_space()->SizeOfObjects() : 0);
switch (collector) {
case MARK_COMPACTOR:
@@ -2173,7 +2311,7 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
}
size_t old_gen_size = OldGenerationSizeOfObjects();
- size_t new_space_capacity = new_space()->Capacity();
+ size_t new_space_capacity = NewSpaceCapacity();
HeapGrowingMode mode = CurrentHeapGrowingMode();
if (collector == MARK_COMPACTOR) {
@@ -2219,8 +2357,7 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
}
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- RuntimeCallTimerScope runtime_timer(
- isolate(), RuntimeCallCounterId::kGCPrologueCallback);
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCPrologueCallback);
for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
if (gc_type & info.gc_type) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
@@ -2230,8 +2367,7 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
}
void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- RuntimeCallTimerScope runtime_timer(
- isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
if (gc_type & info.gc_type) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
@@ -2280,7 +2416,7 @@ void Heap::MarkCompact() {
void Heap::MinorMarkCompact() {
#ifdef ENABLE_MINOR_MC
- DCHECK(FLAG_minor_mc);
+ DCHECK(FLAG_minor_mc && !FLAG_single_generation);
PauseAllocationObserversScope pause_observers(this);
SetGCState(MINOR_MARK_COMPACT);
@@ -2387,6 +2523,8 @@ void Heap::EvacuateYoungGeneration() {
}
void Heap::Scavenge() {
+ DCHECK(!FLAG_single_generation);
+
if (fast_promotion_mode_ && CanPromoteYoungAndExpandOldGeneration(0)) {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kFastPromotionDuringScavenge);
@@ -2435,12 +2573,15 @@ void Heap::Scavenge() {
}
void Heap::ComputeFastPromotionMode() {
+ if (!new_space_) return;
+
const size_t survived_in_new_space =
- survived_last_scavenge_ * 100 / new_space_->Capacity();
+ survived_last_scavenge_ * 100 / NewSpaceCapacity();
fast_promotion_mode_ =
!FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
!ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
+
if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
PrintIsolate(isolate(), "Fast promotion mode: %s survival rate: %zu%%\n",
fast_promotion_mode_ ? "true" : "false",
@@ -2488,6 +2629,8 @@ bool Heap::ExternalStringTable::Contains(String string) {
void Heap::UpdateExternalString(String string, size_t old_payload,
size_t new_payload) {
DCHECK(string.IsExternalString());
+ if (FLAG_enable_third_party_heap) return;
+
Page* page = Page::FromHeapObject(string);
if (old_payload > new_payload) {
@@ -2502,7 +2645,7 @@ void Heap::UpdateExternalString(String string, size_t old_payload,
String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
HeapObject obj = HeapObject::cast(*p);
- MapWord first_word = obj.map_word();
+ MapWord first_word = obj.map_word(kRelaxedLoad);
String new_string;
@@ -2863,7 +3006,7 @@ HeapObject Heap::AlignWithFiller(ReadOnlyRoots roots, HeapObject object,
void* Heap::AllocateExternalBackingStore(
const std::function<void*(size_t)>& allocate, size_t byte_length) {
- if (!always_allocate()) {
+ if (!always_allocate() && new_space()) {
size_t new_space_backing_store_bytes =
new_space()->ExternalBackingStoreBytes();
if (new_space_backing_store_bytes >= 2 * kMaxSemiSpaceSize &&
@@ -2874,8 +3017,6 @@ void* Heap::AllocateExternalBackingStore(
GarbageCollectionReason::kExternalMemoryPressure);
}
}
- // TODO(ulan): Perform GCs proactively based on the byte_length and
- // the current external backing store counters.
void* result = allocate(byte_length);
if (result) return result;
if (!always_allocate()) {
@@ -2948,7 +3089,7 @@ HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
DCHECK_GT(size, 2 * kTaggedSize);
filler.set_map_after_allocation(roots.unchecked_free_space_map(),
SKIP_WRITE_BARRIER);
- FreeSpace::cast(filler).relaxed_write_size(size);
+ FreeSpace::cast(filler).set_size(size, kRelaxedStore);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
(size / kTaggedSize) - 2);
@@ -2957,7 +3098,7 @@ HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are nullptr.
- DCHECK((filler.map_slot().contains_value(kNullAddress) &&
+ DCHECK((filler.map_slot().contains_map_value(kNullAddress) &&
!Heap::FromWritableHeapObject(filler)->deserialization_complete()) ||
filler.map().IsMap());
@@ -2969,8 +3110,7 @@ void VerifyNoNeedToClearSlots(Address start, Address end) {
BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromAddress(start);
if (basic_chunk->InReadOnlySpace()) return;
MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
- // TODO(ulan): Support verification of large pages.
- if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
+ if (chunk->InYoungGeneration()) return;
BaseSpace* space = chunk->owner();
space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
}
@@ -3046,6 +3186,9 @@ bool Heap::IsImmovable(HeapObject object) {
}
bool Heap::IsLargeObject(HeapObject object) {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
+ return third_party_heap::Heap::InLargeObjectSpace(object.address()) ||
+ third_party_heap::Heap::InSpace(object.address(), CODE_LO_SPACE);
return BasicMemoryChunk::FromHeapObject(object)->IsLargePage();
}
@@ -3109,6 +3252,9 @@ void Heap::OnMoveEvent(HeapObject target, HeapObject source,
if (target.IsSharedFunctionInfo()) {
LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(),
target.address()));
+ } else if (target.IsNativeContext()) {
+ PROFILE(isolate_,
+ NativeContextMoveEvent(source.address(), target.address()));
}
if (FLAG_verify_predictable) {
@@ -3184,7 +3330,8 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
// object does not require synchronization.
- RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
+ RELAXED_WRITE_FIELD(object, bytes_to_trim,
+ Object(MapWord::FromMap(map).ptr()));
RELAXED_WRITE_FIELD(object, bytes_to_trim + kTaggedSize,
Smi::FromInt(len - elements_to_trim));
@@ -3298,7 +3445,7 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
// Initialize header of the trimmed array. We are storing the new length
// using release store after creating a filler for the left-over space to
// avoid races with the sweeper thread.
- object.synchronized_set_length(object.length() - elements_to_trim);
+ object.set_length(object.length() - elements_to_trim, kReleaseStore);
// Notify the heap object allocation tracker of change in object layout. The
// array may not be moved during GC, and size has to be adjusted nevertheless.
@@ -3451,8 +3598,6 @@ void Heap::ActivateMemoryReducerIfNeeded() {
}
void Heap::ReduceNewSpaceSize() {
- // TODO(ulan): Unify this constant with the similar constant in
- // GCIdleTimeHandler once the change is merged to 4.5.
static const size_t kLowAllocationThroughput = 1000;
const double allocation_throughput =
tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
@@ -3468,6 +3613,12 @@ void Heap::ReduceNewSpaceSize() {
}
}
+size_t Heap::NewSpaceSize() { return new_space() ? new_space()->Size() : 0; }
+
+size_t Heap::NewSpaceCapacity() {
+ return new_space() ? new_space()->Capacity() : 0;
+}
+
void Heap::FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason gc_reason) {
if (incremental_marking()->IsMarking() &&
@@ -3582,6 +3733,8 @@ class SlotCollectingVisitor final : public ObjectVisitor {
UNREACHABLE();
}
+ void VisitMapPointer(HeapObject object) override {} // do nothing by default
+
int number_of_slots() { return static_cast<int>(slots_.size()); }
MaybeObjectSlot slot(int i) { return slots_[i]; }
@@ -3613,13 +3766,13 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
// Check that the set of slots before and after the transition match.
SlotCollectingVisitor old_visitor;
object.IterateFast(&old_visitor);
- MapWord old_map_word = object.map_word();
+ MapWord old_map_word = object.map_word(kRelaxedLoad);
// Temporarily set the new map to iterate new slots.
- object.set_map_word(MapWord::FromMap(new_map));
+ object.set_map_word(MapWord::FromMap(new_map), kRelaxedStore);
SlotCollectingVisitor new_visitor;
object.IterateFast(&new_visitor);
// Restore the old map.
- object.set_map_word(old_map_word);
+ object.set_map_word(old_map_word, kRelaxedStore);
DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
for (int i = 0; i < new_visitor.number_of_slots(); i++) {
DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
@@ -3692,12 +3845,15 @@ void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
}
}
-double Heap::MonotonicallyIncreasingTimeInMs() {
+double Heap::MonotonicallyIncreasingTimeInMs() const {
return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
static_cast<double>(base::Time::kMillisecondsPerSecond);
}
-void Heap::VerifyNewSpaceTop() { new_space()->VerifyTop(); }
+void Heap::VerifyNewSpaceTop() {
+ if (!new_space()) return;
+ new_space()->VerifyTop();
+}
bool Heap::IdleNotification(int idle_time_in_ms) {
return IdleNotification(
@@ -3866,6 +4022,11 @@ void Heap::AppendArrayBufferExtension(JSArrayBuffer object,
array_buffer_sweeper_->Append(object, extension);
}
+void Heap::DetachArrayBufferExtension(JSArrayBuffer object,
+ ArrayBufferExtension* extension) {
+ return array_buffer_sweeper_->Detach(object, extension);
+}
+
void Heap::AutomaticallyRestoreInitialHeapLimit(double threshold_percent) {
initial_max_old_generation_size_threshold_ =
initial_max_old_generation_size_ * threshold_percent;
@@ -4009,13 +4170,23 @@ bool Heap::Contains(HeapObject value) const {
return false;
}
return HasBeenSetUp() &&
- (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
- code_space_->Contains(value) || map_space_->Contains(value) ||
- lo_space_->Contains(value) || code_lo_space_->Contains(value) ||
- new_lo_space_->Contains(value));
+ ((new_space_ && new_space_->ToSpaceContains(value)) ||
+ old_space_->Contains(value) || code_space_->Contains(value) ||
+ map_space_->Contains(value) || lo_space_->Contains(value) ||
+ code_lo_space_->Contains(value) ||
+ (new_lo_space_ && new_lo_space_->Contains(value)));
+}
+
+bool Heap::SharedHeapContains(HeapObject value) const {
+ if (shared_old_space_)
+ return shared_old_space_->Contains(value) ||
+ shared_map_space_->Contains(value);
+ return false;
}
bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
+ return third_party_heap::Heap::InSpace(value.address(), space);
if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
return false;
}
@@ -4042,6 +4213,8 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
UNREACHABLE();
}
+bool Heap::IsShared() { return isolate()->is_shared(); }
+
bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
return false;
@@ -4116,7 +4289,7 @@ void Heap::Verify() {
VerifySmisVisitor smis_visitor;
IterateSmiRoots(&smis_visitor);
- new_space_->Verify(isolate());
+ if (new_space_) new_space_->Verify(isolate());
old_space_->Verify(isolate(), &visitor);
map_space_->Verify(isolate(), &visitor);
@@ -4126,7 +4299,7 @@ void Heap::Verify() {
lo_space_->Verify(isolate());
code_lo_space_->Verify(isolate());
- new_lo_space_->Verify(isolate());
+ if (new_lo_space_) new_lo_space_->Verify(isolate());
VerifyStringTable(isolate());
}
@@ -4147,7 +4320,7 @@ class SlotVerifyingVisitor : public ObjectVisitor {
ObjectSlot end) override {
#ifdef DEBUG
for (ObjectSlot slot = start; slot < end; ++slot) {
- DCHECK(!HasWeakHeapObjectTag(*slot));
+ DCHECK(!MapWord::IsPacked((*slot).ptr()) || !HasWeakHeapObjectTag(*slot));
}
#endif // DEBUG
VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
@@ -4315,7 +4488,7 @@ void Heap::VerifyCountersBeforeConcurrentSweeping() {
#endif
void Heap::ZapFromSpace() {
- if (!new_space_->IsFromSpaceCommitted()) return;
+ if (!new_space_ || !new_space_->IsFromSpaceCommitted()) return;
for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
memory_allocator()->ZapBlock(page->area_start(),
page->HighWaterMark() - page->area_start(),
@@ -4397,18 +4570,20 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
- for (FullObjectSlot p = start; p < end; ++p) FixHandle(p);
+ for (FullObjectSlot p = start; p < end; ++p) {
+ FixHandle(p);
+ }
}
private:
inline void FixHandle(FullObjectSlot p) {
if (!(*p).IsHeapObject()) return;
HeapObject current = HeapObject::cast(*p);
- if (!current.map_word().IsForwardingAddress() &&
+ if (!current.map_word(kRelaxedLoad).IsForwardingAddress() &&
current.IsFreeSpaceOrFiller()) {
#ifdef DEBUG
// We need to find a FixedArrayBase map after walking the fillers.
- while (!current.map_word().IsForwardingAddress() &&
+ while (!current.map_word(kRelaxedLoad).IsForwardingAddress() &&
current.IsFreeSpaceOrFiller()) {
Address next = current.ptr();
if (current.map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
@@ -4421,7 +4596,7 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
current = HeapObject::cast(Object(next));
}
- DCHECK(current.map_word().IsForwardingAddress() ||
+ DCHECK(current.map_word(kRelaxedLoad).IsForwardingAddress() ||
current.IsFixedArrayBase());
#endif // DEBUG
p.store(Smi::zero());
@@ -4502,8 +4677,10 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
// Iterate over local handles in handle scopes.
FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
#ifndef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
- isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
- isolate_->handle_scope_implementer()->Iterate(v);
+ if (!options.contains(SkipRoot::kMainThreadHandles)) {
+ isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
+ isolate_->handle_scope_implementer()->Iterate(v);
+ }
#endif
safepoint_->Iterate(&left_trim_visitor);
@@ -4772,8 +4949,8 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->end_marker = HeapStats::kEndMarker;
*stats->ro_space_size = read_only_space_->Size();
*stats->ro_space_capacity = read_only_space_->Capacity();
- *stats->new_space_size = new_space_->Size();
- *stats->new_space_capacity = new_space_->Capacity();
+ *stats->new_space_size = NewSpaceSize();
+ *stats->new_space_capacity = NewSpaceCapacity();
*stats->old_space_size = old_space_->SizeOfObjects();
*stats->old_space_capacity = old_space_->Capacity();
*stats->code_space_size = code_space_->SizeOfObjects();
@@ -5036,9 +5213,9 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
const base::Optional<size_t> global_memory_available =
GlobalMemoryAvailable();
- if (old_generation_space_available > new_space_->Capacity() &&
+ if (old_generation_space_available > NewSpaceCapacity() &&
(!global_memory_available ||
- global_memory_available > new_space_->Capacity())) {
+ global_memory_available > NewSpaceCapacity())) {
return IncrementalMarkingLimit::kNoLimit;
}
if (ShouldOptimizeForMemoryUsage()) {
@@ -5065,8 +5242,10 @@ void Heap::EnableInlineAllocation() {
inline_allocation_disabled_ = false;
// Update inline allocation limit for new space.
- new_space()->AdvanceAllocationObservers();
- new_space()->UpdateInlineAllocationLimit(0);
+ if (new_space()) {
+ new_space()->AdvanceAllocationObservers();
+ new_space()->UpdateInlineAllocationLimit(0);
+ }
}
void Heap::DisableInlineAllocation() {
@@ -5074,7 +5253,9 @@ void Heap::DisableInlineAllocation() {
inline_allocation_disabled_ = true;
// Update inline allocation limit for new space.
- new_space()->UpdateInlineAllocationLimit(0);
+ if (new_space()) {
+ new_space()->UpdateInlineAllocationLimit(0);
+ }
// Update inline allocation limit for old spaces.
PagedSpaceIterator spaces(this);
@@ -5138,6 +5319,10 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
return HeapObject();
}
+namespace {
+V8_DECLARE_ONCE(initialize_shared_code_range_once);
+} // namespace
+
void Heap::SetUp() {
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
allocation_timeout_ = NextAllocationTimeout();
@@ -5159,9 +5344,45 @@ void Heap::SetUp() {
reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
~kMmapRegionMask;
+ v8::PageAllocator* code_page_allocator;
+ if (isolate_->RequiresCodeRange() || code_range_size_ != 0) {
+ const size_t requested_size =
+ code_range_size_ == 0 ? kMaximalCodeRangeSize : code_range_size_;
+ // When a target requires the code range feature, we put all code objects in
+ // a contiguous range of virtual address space, so that they can call each
+ // other with near calls.
+ if (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) {
+ // When sharing a pointer cage among Isolates, also share the
+ // CodeRange. isolate_->page_allocator() is the process-wide pointer
+ // compression cage's PageAllocator.
+ base::CallOnce(&initialize_shared_code_range_once,
+ &CodeRange::InitializeProcessWideCodeRangeOnce,
+ isolate_->page_allocator(), requested_size);
+ code_range_ = CodeRange::GetProcessWideCodeRange();
+ } else {
+ code_range_ = std::make_shared<CodeRange>();
+ if (!code_range_->InitReservation(isolate_->page_allocator(),
+ requested_size)) {
+ V8::FatalProcessOutOfMemory(
+ isolate_, "Failed to reserve virtual memory for CodeRange");
+ }
+ }
+
+ LOG(isolate_,
+ NewEvent("CodeRange",
+ reinterpret_cast<void*>(code_range_->reservation()->address()),
+ code_range_size_));
+
+ isolate_->AddCodeRange(code_range_->reservation()->region().begin(),
+ code_range_->reservation()->region().size());
+ code_page_allocator = code_range_->page_allocator();
+ } else {
+ code_page_allocator = isolate_->page_allocator();
+ }
+
// Set up memory allocator.
memory_allocator_.reset(
- new MemoryAllocator(isolate_, MaxReserved(), code_range_size_));
+ new MemoryAllocator(isolate_, code_page_allocator, MaxReserved()));
mark_compact_collector_.reset(new MarkCompactCollector(this));
@@ -5203,49 +5424,6 @@ void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
read_only_space_ = space;
}
-uint8_t* Heap::RemapEmbeddedBuiltinsIntoCodeRange(
- const uint8_t* embedded_blob_code, size_t embedded_blob_code_size) {
- const base::AddressRegion& code_range = memory_allocator()->code_range();
-
- CHECK_NE(code_range.begin(), kNullAddress);
- CHECK(!code_range.is_empty());
-
- v8::PageAllocator* code_page_allocator =
- memory_allocator()->code_page_allocator();
-
- const size_t kAllocatePageSize = code_page_allocator->AllocatePageSize();
- size_t allocate_code_size =
- RoundUp(embedded_blob_code_size, kAllocatePageSize);
-
- // Allocate the re-embedded code blob in the end.
- void* hint = reinterpret_cast<void*>(code_range.end() - allocate_code_size);
-
- void* embedded_blob_copy = code_page_allocator->AllocatePages(
- hint, allocate_code_size, kAllocatePageSize, PageAllocator::kNoAccess);
-
- if (!embedded_blob_copy) {
- V8::FatalProcessOutOfMemory(
- isolate(), "Can't allocate space for re-embedded builtins");
- }
-
- size_t code_size =
- RoundUp(embedded_blob_code_size, code_page_allocator->CommitPageSize());
-
- if (!code_page_allocator->SetPermissions(embedded_blob_copy, code_size,
- PageAllocator::kReadWrite)) {
- V8::FatalProcessOutOfMemory(isolate(),
- "Re-embedded builtins: set permissions");
- }
- memcpy(embedded_blob_copy, embedded_blob_code, embedded_blob_code_size);
-
- if (!code_page_allocator->SetPermissions(embedded_blob_copy, code_size,
- PageAllocator::kReadExecute)) {
- V8::FatalProcessOutOfMemory(isolate(),
- "Re-embedded builtins: set permissions");
- }
- return reinterpret_cast<uint8_t*>(embedded_blob_copy);
-}
-
class StressConcurrentAllocationObserver : public AllocationObserver {
public:
explicit StressConcurrentAllocationObserver(Heap* heap)
@@ -5269,15 +5447,19 @@ class StressConcurrentAllocationObserver : public AllocationObserver {
void Heap::SetUpSpaces() {
// Ensure SetUpFromReadOnlySpace has been ran.
DCHECK_NOT_NULL(read_only_space_);
- space_[NEW_SPACE] = new_space_ =
- new NewSpace(this, memory_allocator_->data_page_allocator(),
- initial_semispace_size_, max_semi_space_size_);
+ if (!FLAG_single_generation) {
+ space_[NEW_SPACE] = new_space_ =
+ new NewSpace(this, memory_allocator_->data_page_allocator(),
+ initial_semispace_size_, max_semi_space_size_);
+ }
space_[OLD_SPACE] = old_space_ = new OldSpace(this);
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
space_[MAP_SPACE] = map_space_ = new MapSpace(this);
space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
- space_[NEW_LO_SPACE] = new_lo_space_ =
- new NewLargeObjectSpace(this, new_space_->Capacity());
+ if (!FLAG_single_generation) {
+ space_[NEW_LO_SPACE] = new_lo_space_ =
+ new NewLargeObjectSpace(this, NewSpaceCapacity());
+ }
space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
@@ -5300,6 +5482,8 @@ void Heap::SetUpSpaces() {
dead_object_stats_.reset(new ObjectStats(this));
}
local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
+ embedder_roots_handler_ =
+ &local_embedder_heap_tracer()->default_embedder_roots_handler();
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
@@ -5311,10 +5495,12 @@ void Heap::SetUpSpaces() {
}
#endif // ENABLE_MINOR_MC
- scavenge_job_.reset(new ScavengeJob());
- scavenge_task_observer_.reset(new ScavengeTaskObserver(
- this, ScavengeJob::YoungGenerationTaskTriggerSize(this)));
- new_space()->AddAllocationObserver(scavenge_task_observer_.get());
+ if (!FLAG_single_generation) {
+ scavenge_job_.reset(new ScavengeJob());
+ scavenge_task_observer_.reset(new ScavengeTaskObserver(
+ this, ScavengeJob::YoungGenerationTaskTriggerSize(this)));
+ new_space()->AddAllocationObserver(scavenge_task_observer_.get());
+ }
SetGetExternallyAllocatedMemoryInBytesCallback(
DefaultGetExternallyAllocatedMemoryInBytesCallback);
@@ -5325,7 +5511,7 @@ void Heap::SetUpSpaces() {
AddAllocationObserversToAllSpaces(stress_marking_observer_,
stress_marking_observer_);
}
- if (FLAG_stress_scavenge > 0) {
+ if (FLAG_stress_scavenge > 0 && new_space()) {
stress_scavenge_observer_ = new StressScavengeObserver(this);
new_space()->AddAllocationObserver(stress_scavenge_observer_);
}
@@ -5333,6 +5519,11 @@ void Heap::SetUpSpaces() {
write_protect_code_memory_ = FLAG_write_protect_code_memory;
}
+void Heap::InitializeMainThreadLocalHeap(LocalHeap* main_thread_local_heap) {
+ DCHECK_NULL(main_thread_local_heap_);
+ main_thread_local_heap_ = main_thread_local_heap;
+}
+
void Heap::InitializeHashSeed() {
DCHECK(!deserialization_complete_);
uint64_t new_hash_seed;
@@ -5438,6 +5629,14 @@ void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
local_embedder_heap_tracer()->SetRemoteTracer(tracer);
}
+void Heap::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) {
+ embedder_roots_handler_ = handler;
+}
+
+EmbedderRootsHandler* Heap::GetEmbedderRootsHandler() const {
+ return embedder_roots_handler_;
+}
+
EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
return local_embedder_heap_tracer()->remote_tracer();
}
@@ -5461,6 +5660,11 @@ EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const {
return EmbedderHeapTracer::TraceFlags::kNoFlags;
}
+const cppgc::EmbedderStackState* Heap::overriden_stack_state() const {
+ const auto* cpp_heap = CppHeap::From(cpp_heap_);
+ return cpp_heap ? cpp_heap->override_stack_state() : nullptr;
+}
+
void Heap::RegisterExternallyReferencedObject(Address* location) {
GlobalHandles::MarkTraced(location);
Object object(*location);
@@ -5524,7 +5728,10 @@ void Heap::TearDown() {
}
}
- new_space()->RemoveAllocationObserver(scavenge_task_observer_.get());
+ if (new_space()) {
+ new_space()->RemoveAllocationObserver(scavenge_task_observer_.get());
+ }
+
scavenge_task_observer_.reset();
scavenge_job_.reset();
@@ -5541,7 +5748,7 @@ void Heap::TearDown() {
delete stress_marking_observer_;
stress_marking_observer_ = nullptr;
}
- if (FLAG_stress_scavenge > 0) {
+ if (FLAG_stress_scavenge > 0 && new_space()) {
new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
delete stress_scavenge_observer_;
stress_scavenge_observer_ = nullptr;
@@ -5578,6 +5785,8 @@ void Heap::TearDown() {
dead_object_stats_.reset();
local_embedder_heap_tracer_.reset();
+ embedder_roots_handler_ = nullptr;
+
if (cpp_heap_) {
CppHeap::From(cpp_heap_)->DetachIsolate();
cpp_heap_ = nullptr;
@@ -5587,6 +5796,8 @@ void Heap::TearDown() {
tracer_.reset();
+ allocation_sites_to_pretenure_.reset();
+
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i];
space_[i] = nullptr;
@@ -5608,6 +5819,24 @@ void Heap::TearDown() {
memory_allocator_.reset();
}
+void Heap::InitSharedSpaces() {
+ shared_old_space_ = isolate()->shared_isolate()->heap()->old_space();
+ shared_old_allocator_.reset(
+ new ConcurrentAllocator(main_thread_local_heap(), shared_old_space_));
+
+ shared_map_space_ = isolate()->shared_isolate()->heap()->map_space();
+ shared_map_allocator_.reset(
+ new ConcurrentAllocator(main_thread_local_heap(), shared_map_space_));
+}
+
+void Heap::DeinitSharedSpaces() {
+ shared_old_space_ = nullptr;
+ shared_old_allocator_.reset();
+
+ shared_map_space_ = nullptr;
+ shared_map_allocator_.reset();
+}
+
void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
GCType gc_type, void* data) {
DCHECK_NOT_NULL(callback);
@@ -5685,7 +5914,7 @@ Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
} // anonymous namespace
-void Heap::CompactWeakArrayLists(AllocationType allocation) {
+void Heap::CompactWeakArrayLists() {
// Find known PrototypeUsers and compact them.
std::vector<Handle<PrototypeInfo>> prototype_infos;
{
@@ -5702,20 +5931,18 @@ void Heap::CompactWeakArrayLists(AllocationType allocation) {
for (auto& prototype_info : prototype_infos) {
Handle<WeakArrayList> array(
WeakArrayList::cast(prototype_info->prototype_users()), isolate());
- DCHECK_IMPLIES(allocation == AllocationType::kOld,
- InOldSpace(*array) ||
- *array == ReadOnlyRoots(this).empty_weak_array_list());
+ DCHECK(InOldSpace(*array) ||
+ *array == ReadOnlyRoots(this).empty_weak_array_list());
WeakArrayList new_array = PrototypeUsers::Compact(
- array, this, JSObject::PrototypeRegistryCompactionCallback, allocation);
+ array, this, JSObject::PrototypeRegistryCompactionCallback,
+ AllocationType::kOld);
prototype_info->set_prototype_users(new_array);
}
// Find known WeakArrayLists and compact them.
Handle<WeakArrayList> scripts(script_list(), isolate());
- DCHECK_IMPLIES(
- !V8_ENABLE_THIRD_PARTY_HEAP_BOOL && allocation == AllocationType::kOld,
- InOldSpace(*scripts));
- scripts = CompactWeakArrayList(this, scripts, allocation);
+ DCHECK(InOldSpace(*scripts));
+ scripts = CompactWeakArrayList(this, scripts, AllocationType::kOld);
set_script_list(*scripts);
}
@@ -5847,7 +6074,6 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
void Heap::VerifySlotRangeHasNoRecordedSlots(Address start, Address end) {
#ifndef V8_DISABLE_WRITE_BARRIERS
Page* page = Page::FromAddress(start);
- DCHECK(!page->IsLargePage());
DCHECK(!page->InYoungGeneration());
RememberedSet<OLD_TO_NEW>::CheckNoneInRange(page, start, end);
#endif
@@ -5887,18 +6113,26 @@ PagedSpace* PagedSpaceIterator::Next() {
}
SpaceIterator::SpaceIterator(Heap* heap)
- : heap_(heap), current_space_(FIRST_MUTABLE_SPACE - 1) {}
+ : heap_(heap), current_space_(FIRST_MUTABLE_SPACE) {}
SpaceIterator::~SpaceIterator() = default;
bool SpaceIterator::HasNext() {
- // Iterate until no more spaces.
- return current_space_ != LAST_SPACE;
+ while (current_space_ <= LAST_MUTABLE_SPACE) {
+ Space* space = heap_->space(current_space_);
+ if (space) return true;
+ ++current_space_;
+ }
+
+ // No more spaces left.
+ return false;
}
Space* SpaceIterator::Next() {
DCHECK(HasNext());
- return heap_->space(++current_space_);
+ Space* space = heap_->space(current_space_++);
+ DCHECK_NOT_NULL(space);
+ return space;
}
class HeapObjectsFilter {
@@ -5922,14 +6156,14 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
bool SkipObject(HeapObject object) override {
if (object.IsFreeSpaceOrFiller()) return true;
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
+ Address chunk = object.ptr() & ~kLogicalChunkAlignmentMask;
if (reachable_.count(chunk) == 0) return true;
return reachable_[chunk]->count(object) == 0;
}
private:
bool MarkAsReachable(HeapObject object) {
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
+ Address chunk = object.ptr() & ~kLogicalChunkAlignmentMask;
if (reachable_.count(chunk) == 0) {
reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
}
@@ -5938,11 +6172,20 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
return true;
}
+ static constexpr intptr_t kLogicalChunkAlignment =
+ (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+ static constexpr intptr_t kLogicalChunkAlignmentMask =
+ kLogicalChunkAlignment - 1;
+
class MarkingVisitor : public ObjectVisitor, public RootVisitor {
public:
explicit MarkingVisitor(UnreachableObjectsFilter* filter)
: filter_(filter) {}
+ void VisitMapPointer(HeapObject object) override {
+ MarkHeapObject(Map::unchecked_cast(object.map()));
+ }
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
@@ -6017,8 +6260,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
Heap* heap_;
DISALLOW_GARBAGE_COLLECTION(no_gc_)
- std::unordered_map<BasicMemoryChunk*,
- std::unordered_set<HeapObject, Object::Hasher>*>
+ std::unordered_map<Address, std::unordered_set<HeapObject, Object::Hasher>*>
reachable_;
};
@@ -6041,13 +6283,14 @@ HeapObjectIterator::HeapObjectIterator(
break;
}
object_iterator_ = space_iterator_->Next()->GetObjectIterator(heap_);
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) heap_->tp_heap_->ResetIterator();
}
HeapObjectIterator::~HeapObjectIterator() {
#ifdef DEBUG
// Assert that in filtering mode we have iterated through all
// objects. Otherwise, heap will be left in an inconsistent state.
- if (filtering_ != kNoFiltering) {
+ if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && filtering_ != kNoFiltering) {
DCHECK_NULL(object_iterator_);
}
#endif
@@ -6064,6 +6307,7 @@ HeapObject HeapObjectIterator::Next() {
}
HeapObject HeapObjectIterator::NextObject() {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return heap_->tp_heap_->NextObject();
// No iterator means we are done.
if (object_iterator_.get() == nullptr) return HeapObject();
@@ -6288,8 +6532,6 @@ MaybeHandle<JSFinalizationRegistry> Heap::DequeueDirtyJSFinalizationRegistry() {
}
void Heap::RemoveDirtyFinalizationRegistriesOnContext(NativeContext context) {
- if (!FLAG_harmony_weak_refs) return;
-
DisallowGarbageCollection no_gc;
Isolate* isolate = this->isolate();
@@ -6319,7 +6561,6 @@ void Heap::RemoveDirtyFinalizationRegistriesOnContext(NativeContext context) {
}
void Heap::KeepDuringJob(Handle<JSReceiver> target) {
- DCHECK(FLAG_harmony_weak_refs);
DCHECK(weak_refs_keep_during_job().IsUndefined() ||
weak_refs_keep_during_job().IsOrderedHashSet());
Handle<OrderedHashSet> table;
@@ -6454,7 +6695,8 @@ void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
if (object.GetHeapObject(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
} else {
- CHECK(object.IsSmi() || object.IsCleared());
+ CHECK(object.IsSmi() || object.IsCleared() ||
+ MapWord::IsPacked(object.ptr()));
}
}
}
@@ -6538,7 +6780,7 @@ void Heap::CreateObjectStats() {
}
Map Heap::GcSafeMapOfCodeSpaceObject(HeapObject object) {
- MapWord map_word = object.map_word();
+ MapWord map_word = object.map_word(kRelaxedLoad);
return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress().map()
: map_word.ToMap();
}
@@ -6701,6 +6943,7 @@ template void Heap::WriteBarrierForRange<MaybeObjectSlot>(
template <typename TSlot>
void Heap::WriteBarrierForRange(HeapObject object, TSlot start_slot,
TSlot end_slot) {
+ if (FLAG_disable_write_barriers) return;
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
base::Flags<RangeWriteBarrierMode> mode;
diff --git a/chromium/v8/src/heap/heap.h b/chromium/v8/src/heap/heap.h
index 429f8864be7..4be4d8f7325 100644
--- a/chromium/v8/src/heap/heap.h
+++ b/chromium/v8/src/heap/heap.h
@@ -66,12 +66,16 @@ class ArrayBufferCollector;
class ArrayBufferSweeper;
class BasicMemoryChunk;
class CodeLargeObjectSpace;
+class CodeRange;
class CollectionBarrier;
+class ConcurrentAllocator;
class ConcurrentMarking;
class CppHeap;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
+template <typename T>
+class GlobalHandleVector;
class GlobalSafepoint;
class HeapObjectAllocationTracker;
class HeapObjectsFilter;
@@ -176,6 +180,7 @@ enum class SkipRoot {
kGlobalHandles,
kOldGeneration,
kStack,
+ kMainThreadHandles,
kUnserializable,
kWeak
};
@@ -194,7 +199,7 @@ class StrongRootsEntry {
class AllocationResult {
public:
- static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
+ static inline AllocationResult Retry(AllocationSpace space) {
return AllocationResult(space);
}
@@ -513,6 +518,9 @@ class Heap {
inline Address* OldSpaceAllocationTopAddress();
inline Address* OldSpaceAllocationLimitAddress();
+ size_t NewSpaceSize();
+ size_t NewSpaceCapacity();
+
// Move len non-weak tagged elements from src_slot to dst_slot of dst_object.
// The source and destination memory ranges can overlap.
V8_EXPORT_PRIVATE void MoveRange(HeapObject dst_object, ObjectSlot dst_slot,
@@ -566,9 +574,12 @@ class Heap {
V8_EXPORT_PRIVATE int NotifyContextDisposed(bool dependant_context);
void set_native_contexts_list(Object object) {
- native_contexts_list_ = object;
+ native_contexts_list_.store(object.ptr(), std::memory_order_release);
+ }
+
+ Object native_contexts_list() const {
+ return Object(native_contexts_list_.load(std::memory_order_acquire));
}
- Object native_contexts_list() const { return native_contexts_list_; }
void set_allocation_sites_list(Object object) {
allocation_sites_list_ = object;
@@ -693,10 +704,12 @@ class Heap {
void AppendArrayBufferExtension(JSArrayBuffer object,
ArrayBufferExtension* extension);
+ void DetachArrayBufferExtension(JSArrayBuffer object,
+ ArrayBufferExtension* extension);
GlobalSafepoint* safepoint() { return safepoint_.get(); }
- V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs();
+ V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs() const;
void VerifyNewSpaceTop();
@@ -743,7 +756,7 @@ class Heap {
size_t backing_store_bytes() const { return backing_store_bytes_; }
- void CompactWeakArrayLists(AllocationType allocation);
+ void CompactWeakArrayLists();
V8_EXPORT_PRIVATE void AddRetainedMap(Handle<NativeContext> context,
Handle<Map> map);
@@ -760,6 +773,11 @@ class Heap {
inline bool CanAllocateInReadOnlySpace();
bool deserialization_complete() const { return deserialization_complete_; }
+ // We can only invoke Safepoint() on the main thread local heap after
+ // deserialization is complete. Before that, main_thread_local_heap_ might be
+ // null.
+ V8_INLINE bool CanSafepoint() const { return deserialization_complete(); }
+
bool HasLowAllocationRate();
bool HasHighFragmentation();
bool HasHighFragmentation(size_t used, size_t committed);
@@ -802,6 +820,9 @@ class Heap {
// Sets up the heap memory without creating any objects.
void SetUpSpaces();
+ // Prepares the heap, setting up for deserialization.
+ void InitializeMainThreadLocalHeap(LocalHeap* main_thread_local_heap);
+
// (Re-)Initialize hash seed from flag or RNG.
void InitializeHashSeed();
@@ -812,12 +833,6 @@ class Heap {
// Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
void CreateObjectStats();
- // If the code range exists, allocates executable pages in the code range and
- // copies the embedded builtins code blob there. Returns address of the copy.
- // The builtins code region will be freed with the code range at tear down.
- uint8_t* RemapEmbeddedBuiltinsIntoCodeRange(const uint8_t* embedded_blob_code,
- size_t embedded_blob_code_size);
-
// Sets the TearDown state, so no new GC tasks get posted.
void StartTearDown();
@@ -827,6 +842,12 @@ class Heap {
// Returns whether SetUp has been called.
bool HasBeenSetUp() const;
+ // Initialializes shared spaces.
+ void InitSharedSpaces();
+
+ // Removes shared spaces again.
+ void DeinitSharedSpaces();
+
// ===========================================================================
// Getters for spaces. =======================================================
// ===========================================================================
@@ -857,6 +878,7 @@ class Heap {
}
inline Isolate* isolate();
+ inline const Isolate* isolate() const;
MarkCompactCollector* mark_compact_collector() {
return mark_compact_collector_.get();
@@ -870,7 +892,11 @@ class Heap {
return array_buffer_sweeper_.get();
}
- const base::AddressRegion& code_range();
+ const base::AddressRegion& code_region();
+
+ CodeRange* code_range() { return code_range_.get(); }
+
+ LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; }
// ===========================================================================
// Root set access. ==========================================================
@@ -1076,17 +1102,19 @@ class Heap {
void EnsureSweepingCompleted();
- IncrementalMarking* incremental_marking() {
+ IncrementalMarking* incremental_marking() const {
return incremental_marking_.get();
}
- MarkingBarrier* marking_barrier() { return marking_barrier_.get(); }
+ MarkingBarrier* marking_barrier() const { return marking_barrier_.get(); }
// ===========================================================================
// Concurrent marking API. ===================================================
// ===========================================================================
- ConcurrentMarking* concurrent_marking() { return concurrent_marking_.get(); }
+ ConcurrentMarking* concurrent_marking() const {
+ return concurrent_marking_.get();
+ }
// The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker.
@@ -1150,6 +1178,16 @@ class Heap {
v8::CppHeap* cpp_heap() const { return cpp_heap_; }
+ const cppgc::EmbedderStackState* overriden_stack_state() const;
+
+ // ===========================================================================
+ // Embedder roots optimizations. =============================================
+ // ===========================================================================
+
+ V8_EXPORT_PRIVATE void SetEmbedderRootsHandler(EmbedderRootsHandler* handler);
+
+ EmbedderRootsHandler* GetEmbedderRootsHandler() const;
+
// ===========================================================================
// External string table API. ================================================
// ===========================================================================
@@ -1192,10 +1230,18 @@ class Heap {
// heaps is required.
V8_EXPORT_PRIVATE bool Contains(HeapObject value) const;
+ // Checks whether an address/object is in the non-read-only heap (including
+ // auxiliary area and unused area). Use IsValidHeapObject if checking both
+ // heaps is required.
+ V8_EXPORT_PRIVATE bool SharedHeapContains(HeapObject value) const;
+
// Checks whether an address/object in a space.
// Currently used by tests, serialization and heap verification only.
V8_EXPORT_PRIVATE bool InSpace(HeapObject value, AllocationSpace space) const;
+ // Returns true when this heap is shared.
+ V8_EXPORT_PRIVATE bool IsShared();
+
// Slow methods that can be used for verification as they can also be used
// with off-heap Addresses.
V8_EXPORT_PRIVATE bool InSpaceSlow(Address addr, AllocationSpace space) const;
@@ -1335,9 +1381,9 @@ class Heap {
survived_since_last_expansion_ += survived;
}
- inline void UpdateNewSpaceAllocationCounter();
+ void UpdateNewSpaceAllocationCounter();
- inline size_t NewSpaceAllocationCounter();
+ V8_EXPORT_PRIVATE size_t NewSpaceAllocationCounter();
// This should be used only for testing.
void set_new_space_allocation_counter(size_t new_value) {
@@ -1451,6 +1497,12 @@ class Heap {
void MergeAllocationSitePretenuringFeedback(
const PretenuringFeedbackMap& local_pretenuring_feedback);
+ // Adds an allocation site to the list of sites to be pretenured during the
+ // next collection. Added allocation sites are pretenured independent of
+ // their feedback.
+ V8_EXPORT_PRIVATE void PretenureAllocationSiteOnNextCollection(
+ AllocationSite site);
+
// ===========================================================================
// Allocation tracking. ======================================================
// ===========================================================================
@@ -2020,7 +2072,7 @@ class Heap {
AllocationAlignment alignment = kWordAligned);
// Allocates a heap object based on the map.
- V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
+ V8_WARN_UNUSED_RESULT AllocationResult Allocate(Handle<Map> map,
AllocationType allocation);
// Allocates a partial map for bootstrapping.
@@ -2045,6 +2097,7 @@ class Heap {
// Stores the option corresponding to the object in the provided *option.
bool IsRetainingPathTarget(HeapObject object, RetainingPathOption* option);
void PrintRetainingPath(HeapObject object, RetainingPathOption option);
+ void UpdateRetainersAfterScavenge();
#ifdef DEBUG
V8_EXPORT_PRIVATE void IncrementObjectCounters();
@@ -2117,9 +2170,18 @@ class Heap {
CodeLargeObjectSpace* code_lo_space_ = nullptr;
NewLargeObjectSpace* new_lo_space_ = nullptr;
ReadOnlySpace* read_only_space_ = nullptr;
+
+ OldSpace* shared_old_space_ = nullptr;
+ MapSpace* shared_map_space_ = nullptr;
+
+ std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
+ std::unique_ptr<ConcurrentAllocator> shared_map_allocator_;
+
// Map from the space id to the space.
Space* space_[LAST_SPACE + 1];
+ LocalHeap* main_thread_local_heap_ = nullptr;
+
// List for tracking ArrayBufferExtensions
ArrayBufferExtension* old_array_buffer_extensions_ = nullptr;
ArrayBufferExtension* young_array_buffer_extensions_ = nullptr;
@@ -2189,7 +2251,9 @@ class Heap {
// Weak list heads, threaded through the objects.
// List heads are initialized lazily and contain the undefined_value at start.
- Object native_contexts_list_;
+ // {native_contexts_list_} is an Address instead of an Object to allow the use
+ // of atomic accessors.
+ std::atomic<Address> native_contexts_list_;
Object allocation_sites_list_;
Object dirty_js_finalization_registries_list_;
// Weak list tails.
@@ -2247,9 +2311,18 @@ class Heap {
std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
std::unique_ptr<MarkingBarrier> marking_barrier_;
+ // This object controls virtual space reserved for code on the V8 heap. This
+ // is only valid for 64-bit architectures where kRequiresCodeRange.
+ //
+ // Owned by the heap when !V8_COMPRESS_POINTERS_IN_SHARED_CAGE, otherwise is
+ // process-wide.
+ std::shared_ptr<CodeRange> code_range_;
+
// The embedder owns the C++ heap.
v8::CppHeap* cpp_heap_ = nullptr;
+ EmbedderRootsHandler* embedder_roots_handler_ = nullptr;
+
StrongRootsEntry* strong_roots_head_ = nullptr;
base::Mutex strong_roots_mutex_;
@@ -2278,6 +2351,9 @@ class Heap {
// forwarding pointers.
PretenuringFeedbackMap global_pretenuring_feedback_;
+ std::unique_ptr<GlobalHandleVector<AllocationSite>>
+ allocation_sites_to_pretenure_;
+
char trace_ring_buffer_[kTraceRingBufferSize];
// Used as boolean.
@@ -2335,14 +2411,15 @@ class Heap {
int allocation_timeout_ = 0;
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
- std::map<HeapObject, HeapObject, Object::Comparer> retainer_;
- std::map<HeapObject, Root, Object::Comparer> retaining_root_;
+ std::unordered_map<HeapObject, HeapObject, Object::Hasher> retainer_;
+ std::unordered_map<HeapObject, Root, Object::Hasher> retaining_root_;
// If an object is retained by an ephemeron, then the retaining key of the
// ephemeron is stored in this map.
- std::map<HeapObject, HeapObject, Object::Comparer> ephemeron_retainer_;
+ std::unordered_map<HeapObject, HeapObject, Object::Hasher>
+ ephemeron_retainer_;
// For each index inthe retaining_path_targets_ array this map
// stores the option of the corresponding target.
- std::map<int, RetainingPathOption> retaining_path_target_option_;
+ std::unordered_map<int, RetainingPathOption> retaining_path_target_option_;
std::vector<HeapObjectAllocationTracker*> allocation_trackers_;
@@ -2367,6 +2444,7 @@ class Heap {
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class LocalHeap;
+ friend class MarkingBarrier;
friend class OldLargeObjectSpace;
template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase;
@@ -2630,14 +2708,13 @@ class HeapObjectAllocationTracker {
template <typename T>
T ForwardingAddress(T heap_obj) {
- MapWord map_word = heap_obj.map_word();
+ MapWord map_word = heap_obj.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
return T::cast(map_word.ToForwardingAddress());
} else if (Heap::InFromPage(heap_obj)) {
return T();
} else {
- // TODO(ulan): Support minor mark-compactor here.
return heap_obj;
}
}
diff --git a/chromium/v8/src/heap/incremental-marking.cc b/chromium/v8/src/heap/incremental-marking.cc
index a0938359817..0c405c40bf8 100644
--- a/chromium/v8/src/heap/incremental-marking.cc
+++ b/chromium/v8/src/heap/incremental-marking.cc
@@ -39,9 +39,8 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
size_t size) {
Heap* heap = incremental_marking_->heap();
VMState<GC> state(heap->isolate());
- RuntimeCallTimerScope runtime_timer(
- heap->isolate(),
- RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
+ RCS_SCOPE(heap->isolate(),
+ RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
incremental_marking_->AdvanceOnAllocation();
// AdvanceIncrementalMarkingOnAllocation can start incremental marking.
incremental_marking_->EnsureBlackAllocated(addr, size);
@@ -108,20 +107,28 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) override {
+ DCHECK(!MapWord::IsPacked((*p).ptr()));
MarkObjectByPointer(p);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
- for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(p);
+ for (FullObjectSlot p = start; p < end; ++p) {
+ DCHECK(!MapWord::IsPacked((*p).ptr()));
+ MarkObjectByPointer(p);
+ }
}
private:
void MarkObjectByPointer(FullObjectSlot p) {
- Object obj = *p;
- if (!obj.IsHeapObject()) return;
-
- heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
+ Object object = *p;
+ if (!object.IsHeapObject()) return;
+ DCHECK(!MapWord::IsPacked(object.ptr()));
+ HeapObject heap_object = HeapObject::cast(object);
+ BasicMemoryChunk* target_page =
+ BasicMemoryChunk::FromHeapObject(heap_object);
+ if (target_page->InSharedHeap()) return;
+ heap_->incremental_marking()->WhiteToGreyAndPush(heap_object);
}
Heap* heap_;
@@ -132,12 +139,14 @@ bool IncrementalMarking::WasActivated() { return was_activated_; }
bool IncrementalMarking::CanBeActivated() {
- // Only start incremental marking in a safe state: 1) when incremental
- // marking is turned on, 2) when we are currently not in a GC, and
- // 3) when we are currently not serializing or deserializing the heap.
+ // Only start incremental marking in a safe state:
+ // 1) when incremental marking is turned on
+ // 2) when we are currently not in a GC, and
+ // 3) when we are currently not serializing or deserializing the heap, and
+ // 4) not a shared heap.
return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
heap_->deserialization_complete() &&
- !heap_->isolate()->serializer_enabled();
+ !heap_->isolate()->serializer_enabled() && !heap_->IsShared();
}
bool IncrementalMarking::IsBelowActivationThresholds() const {
@@ -147,6 +156,7 @@ bool IncrementalMarking::IsBelowActivationThresholds() const {
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
DCHECK(!collector_->sweeping_in_progress());
+ DCHECK(!heap_->IsShared());
if (FLAG_trace_incremental_marking) {
const size_t old_generation_size_mb =
@@ -314,7 +324,9 @@ void IncrementalMarking::MarkRoots() {
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateRoots(
- &visitor, base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kWeak});
+ &visitor,
+ base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kMainThreadHandles,
+ SkipRoot::kWeak});
}
bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
@@ -356,6 +368,9 @@ void IncrementalMarking::RetainMaps() {
if (!map_retaining_is_disabled && marking_state()->IsWhite(map)) {
if (ShouldRetainMap(map, age)) {
WhiteToGreyAndPush(map);
+ if (V8_UNLIKELY(FLAG_track_retaining_path)) {
+ heap_->AddRetainingRoot(Root::kRetainMaps, map);
+ }
}
Object prototype = map.prototype();
if (age > 0 && prototype.IsHeapObject() &&
@@ -432,7 +447,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
DCHECK(obj.IsHeapObject());
// Only pointers to from space have to be updated.
if (Heap::InFromPage(obj)) {
- MapWord map_word = obj.map_word();
+ MapWord map_word = obj.map_word(kRelaxedLoad);
if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist
// anymore, e.g. left trimmed objects or objects from the root set
diff --git a/chromium/v8/src/heap/large-spaces.cc b/chromium/v8/src/heap/large-spaces.cc
index 5cbcc8620fb..4bb989fe9a8 100644
--- a/chromium/v8/src/heap/large-spaces.cc
+++ b/chromium/v8/src/heap/large-spaces.cc
@@ -5,6 +5,7 @@
#include "src/heap/large-spaces.h"
#include "src/base/platform/mutex.h"
+#include "src/base/sanitizer/msan.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/combined-heap.h"
@@ -18,7 +19,6 @@
#include "src/heap/spaces-inl.h"
#include "src/logging/log.h"
#include "src/objects/objects-inl.h"
-#include "src/sanitizer/msan.h"
#include "src/utils/ostreams.h"
namespace v8 {
@@ -130,6 +130,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
+ DCHECK(!FLAG_enable_third_party_heap);
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->CanExpandOldGeneration(object_size) ||
@@ -160,6 +161,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
AllocationResult OldLargeObjectSpace::AllocateRawBackground(
LocalHeap* local_heap, int object_size) {
+ DCHECK(!FLAG_enable_third_party_heap);
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->CanExpandOldGenerationBackground(local_heap, object_size) ||
@@ -446,6 +448,7 @@ NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
capacity_(capacity) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
+ DCHECK(!FLAG_enable_third_party_heap);
// Do not allocate more objects if promoting the existing object would exceed
// the old generation capacity.
if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
@@ -532,6 +535,7 @@ CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
chunk_map_(kInitialChunkMapCapacity) {}
AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
+ DCHECK(!FLAG_enable_third_party_heap);
return OldLargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
}
diff --git a/chromium/v8/src/heap/large-spaces.h b/chromium/v8/src/heap/large-spaces.h
index 8761b9949c4..1e53671a9b1 100644
--- a/chromium/v8/src/heap/large-spaces.h
+++ b/chromium/v8/src/heap/large-spaces.h
@@ -32,6 +32,7 @@ class LargePage : public MemoryChunk {
static const int kMaxCodePageSize = 512 * MB;
static LargePage* FromHeapObject(HeapObject o) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
}
diff --git a/chromium/v8/src/heap/local-allocator.h b/chromium/v8/src/heap/local-allocator.h
index 9efbf3cf563..e64932b9fe9 100644
--- a/chromium/v8/src/heap/local-allocator.h
+++ b/chromium/v8/src/heap/local-allocator.h
@@ -36,7 +36,7 @@ class EvacuationAllocator {
// Give back remaining LAB space if this EvacuationAllocator's new space LAB
// sits right next to new space allocation top.
const LinearAllocationArea info = new_space_lab_.CloseAndMakeIterable();
- new_space_->MaybeFreeUnusedLab(info);
+ if (new_space_) new_space_->MaybeFreeUnusedLab(info);
}
inline AllocationResult Allocate(AllocationSpace space, int object_size,
diff --git a/chromium/v8/src/heap/local-heap-inl.h b/chromium/v8/src/heap/local-heap-inl.h
index fd0ec5a4499..e1333773dd3 100644
--- a/chromium/v8/src/heap/local-heap-inl.h
+++ b/chromium/v8/src/heap/local-heap-inl.h
@@ -18,6 +18,7 @@ namespace internal {
AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
AllocationOrigin origin,
AllocationAlignment alignment) {
+ DCHECK(!FLAG_enable_third_party_heap);
#if DEBUG
VerifyCurrent();
DCHECK(AllowHandleAllocation::IsAllowed());
@@ -45,6 +46,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
Address LocalHeap::AllocateRawOrFail(int object_size, AllocationType type,
AllocationOrigin origin,
AllocationAlignment alignment) {
+ DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
if (!result.IsRetry()) return result.ToObject().address();
return PerformCollectionAndAllocateAgain(object_size, type, origin,
diff --git a/chromium/v8/src/heap/local-heap.cc b/chromium/v8/src/heap/local-heap.cc
index 70cdbcc0d74..2c6724ba3a3 100644
--- a/chromium/v8/src/heap/local-heap.cc
+++ b/chromium/v8/src/heap/local-heap.cc
@@ -221,7 +221,7 @@ bool LocalHeap::TryPerformCollection() {
heap_->CollectGarbageForBackground(this);
return true;
} else {
- LocalHeap* main_thread = heap_->isolate()->main_thread_local_heap();
+ LocalHeap* main_thread = heap_->main_thread_local_heap();
ThreadState current = main_thread->state_relaxed();
while (true) {
diff --git a/chromium/v8/src/heap/mark-compact-inl.h b/chromium/v8/src/heap/mark-compact-inl.h
index a9db17f2aac..f28b9b5e849 100644
--- a/chromium/v8/src/heap/mark-compact-inl.h
+++ b/chromium/v8/src/heap/mark-compact-inl.h
@@ -205,9 +205,10 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// make sure that we skip all set bits in the black area until the
// object ends.
HeapObject black_object = HeapObject::FromAddress(addr);
- Object map_object = ObjectSlot(addr).Acquire_Load();
+ Object map_object = black_object.map(kAcquireLoad);
CHECK(map_object.IsMap());
map = Map::cast(map_object);
+ DCHECK(map.IsMap());
size = black_object.SizeFromMap(map);
CHECK_LE(addr + size, chunk_->area_end());
Address end = addr + size - kTaggedSize;
@@ -235,10 +236,11 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
object = black_object;
}
} else if ((mode == kGreyObjects || mode == kAllLiveObjects)) {
- Object map_object = ObjectSlot(addr).Acquire_Load();
+ object = HeapObject::FromAddress(addr);
+ Object map_object = object.map(kAcquireLoad);
CHECK(map_object.IsMap());
map = Map::cast(map_object);
- object = HeapObject::FromAddress(addr);
+ DCHECK(map.IsMap());
size = object.SizeFromMap(map);
CHECK_LE(addr + size, chunk_->area_end());
}
diff --git a/chromium/v8/src/heap/mark-compact.cc b/chromium/v8/src/heap/mark-compact.cc
index 951b49507ca..73eab9e2038 100644
--- a/chromium/v8/src/heap/mark-compact.cc
+++ b/chromium/v8/src/heap/mark-compact.cc
@@ -42,6 +42,7 @@
#include "src/heap/worklist.h"
#include "src/ic/stub-cache.h"
#include "src/init/v8.h"
+#include "src/logging/tracing-flags.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/foreign.h"
#include "src/objects/hash-table-inl.h"
@@ -51,6 +52,7 @@
#include "src/objects/slots-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/tasks/cancelable-task.h"
+#include "src/tracing/tracing-category-observer.h"
#include "src/utils/utils-inl.h"
namespace v8 {
@@ -82,6 +84,7 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const MemoryChunk* chunk) = 0;
+ virtual void VerifyMap(Map map) = 0;
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
@@ -105,6 +108,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
VerifyRootPointers(start, end);
}
+ void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
+
void VerifyRoots();
void VerifyMarkingOnPage(const Page* page, Address start, Address end);
void VerifyMarking(NewSpace* new_space);
@@ -146,6 +151,7 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
}
void MarkingVerifier::VerifyMarking(NewSpace* space) {
+ if (!space) return;
Address end = space->top();
// The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages.
@@ -168,6 +174,7 @@ void MarkingVerifier::VerifyMarking(PagedSpace* space) {
}
void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
+ if (!lo_space) return;
LargeObjectSpaceObjectIterator it(lo_space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (IsBlackOrGrey(obj)) {
@@ -208,6 +215,8 @@ class FullMarkingVerifier : public MarkingVerifier {
return marking_state_->IsBlackOrGrey(object);
}
+ void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
+
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
@@ -235,6 +244,9 @@ class FullMarkingVerifier : public MarkingVerifier {
private:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
+ if (!heap_->IsShared() &&
+ BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
+ return;
CHECK(marking_state_->IsBlackOrGrey(heap_object));
}
@@ -271,11 +283,14 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
VerifyRootPointers(start, end);
}
+ void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
+
protected:
explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
inline Heap* heap() { return heap_; }
+ virtual void VerifyMap(Map map) = 0;
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
@@ -302,6 +317,7 @@ void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
}
void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
+ if (!space) return;
PageRange range(space->first_allocatable_address(), space->top());
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
@@ -354,7 +370,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
}
}
}
-
+ void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
@@ -408,6 +424,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
#ifdef DEBUG
state_(IDLE),
#endif
+ is_shared_heap_(heap->IsShared()),
was_marked_incrementally_(false),
evacuation_(false),
compacting_(false),
@@ -548,6 +565,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
}
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
+ if (!space) return;
for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
@@ -555,6 +573,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
}
void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
+ if (!space) return;
LargeObjectSpaceObjectIterator it(space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
CHECK(non_atomic_marking_state()->IsWhite(obj));
@@ -863,9 +882,14 @@ void MarkCompactCollector::Prepare() {
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
// All objects are guaranteed to be initialized in atomic pause
- heap()->new_lo_space()->ResetPendingObject();
- DCHECK_EQ(heap()->new_space()->top(),
- heap()->new_space()->original_top_acquire());
+ if (heap()->new_lo_space()) {
+ heap()->new_lo_space()->ResetPendingObject();
+ }
+
+ if (heap()->new_space()) {
+ DCHECK_EQ(heap()->new_space()->top(),
+ heap()->new_space()->original_top_acquire());
+ }
}
void MarkCompactCollector::FinishConcurrentMarking() {
@@ -950,26 +974,34 @@ void MarkCompactCollector::SweepArrayBufferExtensions() {
class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
public:
explicit RootMarkingVisitor(MarkCompactCollector* collector)
- : collector_(collector) {}
+ : collector_(collector), is_shared_heap_(collector->is_shared_heap()) {}
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) final {
+ DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
MarkObjectByPointer(root, p);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) final {
- for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p);
+ for (FullObjectSlot p = start; p < end; ++p) {
+ MarkObjectByPointer(root, p);
+ }
}
private:
V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
- if (!(*p).IsHeapObject()) return;
-
- collector_->MarkRootObject(root, HeapObject::cast(*p));
+ Object object = *p;
+ if (!object.IsHeapObject()) return;
+ HeapObject heap_object = HeapObject::cast(object);
+ BasicMemoryChunk* target_page =
+ BasicMemoryChunk::FromHeapObject(heap_object);
+ if (!is_shared_heap_ && target_page->InSharedHeap()) return;
+ collector_->MarkRootObject(root, heap_object);
}
MarkCompactCollector* const collector_;
+ const bool is_shared_heap_;
};
// This visitor is used to visit the body of special objects held alive by
@@ -991,8 +1023,12 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
MarkObject(host, *p);
}
+ void VisitMapPointer(HeapObject host) final { MarkObject(host, host.map()); }
+
void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
for (ObjectSlot p = start; p < end; ++p) {
+ // The map slot should be handled in VisitMapPointer.
+ DCHECK_NE(host.map_slot(), p);
DCHECK(!HasWeakHeapObjectTag(*p));
MarkObject(host, *p);
}
@@ -1145,6 +1181,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
}
inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
+ DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
RecordMigratedSlot(host, *p, p.address());
}
@@ -1310,7 +1347,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
}
- src.set_map_word(MapWord::FromForwardingAddress(dst));
+ src.set_map_word(MapWord::FromForwardingAddress(dst), kRelaxedStore);
}
EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator,
@@ -1439,7 +1476,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
if (map.visitor_id() == kVisitThinString) {
HeapObject actual = ThinString::cast(object).unchecked_actual();
if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
- object.set_map_word(MapWord::FromForwardingAddress(actual));
+ object.set_map_word(MapWord::FromForwardingAddress(actual),
+ kRelaxedStore);
return true;
}
// TODO(mlippautz): Handle ConsString.
@@ -1543,7 +1581,7 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
HeapObject target_object;
if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
object, size, &target_object)) {
- DCHECK(object.map_word().IsForwardingAddress());
+ DCHECK(object.map_word(kRelaxedLoad).IsForwardingAddress());
return true;
}
return false;
@@ -2483,9 +2521,6 @@ void MarkCompactCollector::ClearWeakReferences() {
}
void MarkCompactCollector::ClearJSWeakRefs() {
- if (!FLAG_harmony_weak_refs) {
- return;
- }
JSWeakRef weak_ref;
while (weak_objects_.js_weak_refs.Pop(kMainThreadTask, &weak_ref)) {
HeapObject target = HeapObject::cast(weak_ref.target());
@@ -2680,7 +2715,7 @@ static inline SlotCallbackResult UpdateSlot(TSlot slot,
std::is_same<TSlot, OffHeapObjectSlot>::value,
"Only [Full|OffHeap]ObjectSlot and [Full]MaybeObjectSlot are "
"expected here");
- MapWord map_word = heap_obj.map_word();
+ MapWord map_word = heap_obj.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
DCHECK_IMPLIES(!Heap::InFromPage(heap_obj),
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
@@ -2762,6 +2797,7 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) override {
+ DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
UpdateRootSlotInternal(cage_base_, p);
}
@@ -2821,7 +2857,7 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
- MapWord map_word = HeapObject::cast(*p).map_word();
+ MapWord map_word = HeapObject::cast(*p).map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
String new_string = String::cast(map_word.ToForwardingAddress());
@@ -2841,18 +2877,23 @@ static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
void MarkCompactCollector::EvacuatePrologue() {
// New space.
NewSpace* new_space = heap()->new_space();
- // Append the list of new space pages to be processed.
- for (Page* p :
- PageRange(new_space->first_allocatable_address(), new_space->top())) {
- new_space_evacuation_pages_.push_back(p);
- }
- new_space->Flip();
- new_space->ResetLinearAllocationArea();
- DCHECK_EQ(new_space->Size(), 0);
+ if (new_space) {
+ // Append the list of new space pages to be processed.
+ for (Page* p :
+ PageRange(new_space->first_allocatable_address(), new_space->top())) {
+ new_space_evacuation_pages_.push_back(p);
+ }
+ new_space->Flip();
+ new_space->ResetLinearAllocationArea();
+
+ DCHECK_EQ(new_space->Size(), 0);
+ }
- heap()->new_lo_space()->Flip();
- heap()->new_lo_space()->ResetPendingObject();
+ if (heap()->new_lo_space()) {
+ heap()->new_lo_space()->Flip();
+ heap()->new_lo_space()->ResetPendingObject();
+ }
// Old space.
DCHECK(old_space_evacuation_pages_.empty());
@@ -2863,18 +2904,27 @@ void MarkCompactCollector::EvacuatePrologue() {
void MarkCompactCollector::EvacuateEpilogue() {
aborted_evacuation_candidates_.clear();
+
// New space.
- heap()->new_space()->set_age_mark(heap()->new_space()->top());
- DCHECK_IMPLIES(FLAG_always_promote_young_mc,
- heap()->new_space()->Size() == 0);
+ if (heap()->new_space()) {
+ heap()->new_space()->set_age_mark(heap()->new_space()->top());
+ DCHECK_IMPLIES(FLAG_always_promote_young_mc,
+ heap()->new_space()->Size() == 0);
+ }
+
// Deallocate unmarked large objects.
heap()->lo_space()->FreeUnmarkedObjects();
heap()->code_lo_space()->FreeUnmarkedObjects();
- heap()->new_lo_space()->FreeUnmarkedObjects();
+ if (heap()->new_lo_space()) {
+ heap()->new_lo_space()->FreeUnmarkedObjects();
+ }
+
// Old space. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
+
// Give pages that are queued to be freed back to the OS.
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
+
#ifdef DEBUG
// Old-to-old slot sets must be empty after evacuation.
for (Page* p : *heap()->old_space()) {
@@ -3274,19 +3324,21 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
// Promote young generation large objects.
- IncrementalMarking::NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
-
- for (auto it = heap()->new_lo_space()->begin();
- it != heap()->new_lo_space()->end();) {
- LargePage* current = *it;
- it++;
- HeapObject object = current->GetObject();
- DCHECK(!marking_state->IsGrey(object));
- if (marking_state->IsBlack(object)) {
- heap_->lo_space()->PromoteNewLargeObject(current);
- current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
- evacuation_items.emplace_back(ParallelWorkItem{}, current);
+ if (heap()->new_lo_space()) {
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+
+ for (auto it = heap()->new_lo_space()->begin();
+ it != heap()->new_lo_space()->end();) {
+ LargePage* current = *it;
+ it++;
+ HeapObject object = current->GetObject();
+ DCHECK(!marking_state->IsGrey(object));
+ if (marking_state->IsBlack(object)) {
+ heap_->lo_space()->PromoteNewLargeObject(current);
+ current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ evacuation_items.emplace_back(ParallelWorkItem{}, current);
+ }
}
}
@@ -3314,7 +3366,7 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
Object RetainAs(Object object) override {
if (object.IsHeapObject()) {
HeapObject heap_object = HeapObject::cast(object);
- MapWord map_word = heap_object.map_word();
+ MapWord map_word = heap_object.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
}
@@ -3443,7 +3495,7 @@ void MarkCompactCollector::Evacuate() {
UpdatePointersAfterEvacuation();
- {
+ if (heap()->new_space()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
if (!heap()->new_space()->Rebalance()) {
heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
@@ -3645,7 +3697,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
return REMOVE_SLOT;
}
if (Heap::InFromPage(heap_object)) {
- MapWord map_word = heap_object.map_word();
+ MapWord map_word = heap_object.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
HeapObjectReference::Update(THeapObjectSlot(slot),
map_word.ToForwardingAddress());
@@ -3815,6 +3867,8 @@ MarkCompactCollector::CreateRememberedSetUpdatingItem(
int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items) {
+ if (!heap()->new_space()) return 0;
+
// Seed to space pages.
const Address space_start = heap()->new_space()->first_allocatable_address();
const Address space_end = heap()->new_space()->top();
@@ -3877,7 +3931,7 @@ class EphemeronTableUpdatingItem : public UpdatingItem {
it != heap_->ephemeron_remembered_set_.end();) {
EphemeronHashTable table = it->first;
auto& indices = it->second;
- if (table.map_word().IsForwardingAddress()) {
+ if (table.map_word(kRelaxedLoad).IsForwardingAddress()) {
// The table has moved, and RecordMigratedSlotVisitor::VisitEphemeron
// inserts entries for the moved table into ephemeron_remembered_set_.
it = heap_->ephemeron_remembered_set_.erase(it);
@@ -3890,7 +3944,7 @@ class EphemeronTableUpdatingItem : public UpdatingItem {
HeapObjectSlot key_slot(table.RawFieldOfElementAt(
EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
HeapObject key = key_slot.ToHeapObject();
- MapWord map_word = key.map_word();
+ MapWord map_word = key.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
key = map_word.ToForwardingAddress();
key_slot.StoreHeapObject(key);
@@ -4145,6 +4199,8 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
}
protected:
+ void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
+
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
@@ -4213,7 +4269,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
}
}
}
-
+ void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
@@ -4482,6 +4538,7 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) final {
for (FullObjectSlot p = start; p < end; ++p) {
+ DCHECK(!MapWord::IsPacked((*p).ptr()));
MarkObjectByPointer(p);
}
}
@@ -4539,7 +4596,8 @@ void MinorMarkCompactCollector::CollectGarbage() {
}
// Since we promote all surviving large objects immediatelly, all remaining
// large objects must be dead.
- // TODO(ulan): Don't free all as soon as we have an intermediate generation.
+ // TODO(v8:11685): Don't free all as soon as we have an intermediate
+ // generation.
heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
}
@@ -4572,7 +4630,7 @@ void MinorMarkCompactCollector::MakeIterable(
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
- Map map = object.synchronized_map();
+ Map map = object.map(kAcquireLoad);
int size = object.SizeFromMap(map);
free_start = free_end + size;
}
@@ -5027,7 +5085,7 @@ void MinorMarkCompactCollector::TraceFragmentation() {
free_bytes_index++;
}
}
- Map map = object.synchronized_map();
+ Map map = object.map(kAcquireLoad);
int size = object.SizeFromMap(map);
live_bytes += size;
free_start = free_end + size;
diff --git a/chromium/v8/src/heap/mark-compact.h b/chromium/v8/src/heap/mark-compact.h
index 733588ae80a..035fb37064a 100644
--- a/chromium/v8/src/heap/mark-compact.h
+++ b/chromium/v8/src/heap/mark-compact.h
@@ -506,6 +506,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void RecordLiveSlotsOnPage(Page* page);
bool is_compacting() const { return compacting_; }
+ bool is_shared_heap() const { return is_shared_heap_; }
// Ensures that sweeping is finished.
//
@@ -605,11 +606,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void MarkLiveObjects() override;
- // Marks the object black and adds it to the marking work list.
+ // Marks the object grey and adds it to the marking work list.
// This is for non-incremental marking only.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
- // Marks the object black and adds it to the marking work list.
+ // Marks the object grey and adds it to the marking work list.
// This is for non-incremental marking only.
V8_INLINE void MarkRootObject(Root root, HeapObject obj);
@@ -743,6 +744,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
CollectorState state_;
#endif
+ const bool is_shared_heap_;
+
bool was_marked_incrementally_;
bool evacuation_;
diff --git a/chromium/v8/src/heap/marking-barrier-inl.h b/chromium/v8/src/heap/marking-barrier-inl.h
index 56bd7efda23..d03bdcb0f7a 100644
--- a/chromium/v8/src/heap/marking-barrier-inl.h
+++ b/chromium/v8/src/heap/marking-barrier-inl.h
@@ -28,8 +28,14 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
// visits the host object.
return false;
}
- if (WhiteToGreyAndPush(value) && is_main_thread_barrier_) {
- incremental_marking_->RestartIfNotMarking();
+ if (WhiteToGreyAndPush(value)) {
+ if (is_main_thread_barrier_) {
+ incremental_marking_->RestartIfNotMarking();
+ }
+
+ if (V8_UNLIKELY(FLAG_track_retaining_path)) {
+ heap_->AddRetainingRoot(Root::kWriteBarrier, value);
+ }
}
return true;
}
diff --git a/chromium/v8/src/heap/marking-visitor-inl.h b/chromium/v8/src/heap/marking-visitor-inl.h
index 55c37e535bd..14e8a4d3552 100644
--- a/chromium/v8/src/heap/marking-visitor-inl.h
+++ b/chromium/v8/src/heap/marking-visitor-inl.h
@@ -22,6 +22,7 @@ namespace internal {
template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkObject(
HeapObject host, HeapObject object) {
+ DCHECK(ReadOnlyHeap::Contains(object) || heap_->Contains(object));
concrete_visitor()->SynchronizePageAccess(object);
if (concrete_visitor()->marking_state()->WhiteToGrey(object)) {
local_marking_worklists_->Push(object);
@@ -38,6 +39,9 @@ template <typename ConcreteVisitor, typename MarkingState>
template <typename THeapObjectSlot>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessStrongHeapObject(
HeapObject host, THeapObjectSlot slot, HeapObject heap_object) {
+ concrete_visitor()->SynchronizePageAccess(heap_object);
+ BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(heap_object);
+ if (!is_shared_heap_ && target_page->InSharedHeap()) return;
MarkObject(host, heap_object);
concrete_visitor()->RecordSlot(host, slot, heap_object);
}
@@ -355,7 +359,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkDescriptorArrayBlack(
DescriptorArray descriptors) {
concrete_visitor()->marking_state()->WhiteToGrey(descriptors);
if (concrete_visitor()->marking_state()->GreyToBlack(descriptors)) {
- VisitPointer(descriptors, descriptors.map_slot());
+ VisitMapPointer(descriptors);
VisitPointers(descriptors, descriptors.GetFirstPointerSlot(),
descriptors.GetDescriptorSlot(0));
return DescriptorArray::BodyDescriptor::SizeOf(descriptors.map(),
diff --git a/chromium/v8/src/heap/marking-visitor.h b/chromium/v8/src/heap/marking-visitor.h
index 45dda338d01..f8795aadfd5 100644
--- a/chromium/v8/src/heap/marking-visitor.h
+++ b/chromium/v8/src/heap/marking-visitor.h
@@ -114,7 +114,8 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
mark_compact_epoch_(mark_compact_epoch),
bytecode_flush_mode_(bytecode_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
- is_forced_gc_(is_forced_gc) {}
+ is_forced_gc_(is_forced_gc),
+ is_shared_heap_(heap->IsShared()) {}
V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
V8_INLINE int VisitDescriptorArray(Map map, DescriptorArray object);
@@ -133,6 +134,11 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
V8_INLINE int VisitWeakCell(Map map, WeakCell object);
// ObjectVisitor overrides.
+ void VisitMapPointer(HeapObject host) final {
+ // Note that we are skipping the recording the slot because map objects
+ // can't move, so this is safe (see ProcessStrongHeapObject for comparison)
+ MarkObject(host, HeapObject::cast(host.map()));
+ }
V8_INLINE void VisitPointer(HeapObject host, ObjectSlot p) final {
VisitPointersImpl(host, p, p + 1);
}
@@ -196,6 +202,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
const BytecodeFlushMode bytecode_flush_mode_;
const bool is_embedder_tracing_enabled_;
const bool is_forced_gc_;
+ const bool is_shared_heap_;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/memory-allocator.cc b/chromium/v8/src/heap/memory-allocator.cc
index b5bccb879d0..5783f2d04b9 100644
--- a/chromium/v8/src/heap/memory-allocator.cc
+++ b/chromium/v8/src/heap/memory-allocator.cc
@@ -20,118 +20,23 @@
namespace v8 {
namespace internal {
-static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
- LAZY_INSTANCE_INITIALIZER;
-
-namespace {
-void FunctionInStaticBinaryForAddressHint() {}
-} // namespace
-
-Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
- base::MutexGuard guard(&mutex_);
- auto it = recently_freed_.find(code_range_size);
- if (it == recently_freed_.end() || it->second.empty()) {
- return FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint);
- }
- Address result = it->second.back();
- it->second.pop_back();
- return result;
-}
-
-void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
- size_t code_range_size) {
- base::MutexGuard guard(&mutex_);
- recently_freed_[code_range_size].push_back(code_range_start);
-}
-
// -----------------------------------------------------------------------------
// MemoryAllocator
//
-MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
- size_t code_range_size)
+MemoryAllocator::MemoryAllocator(Isolate* isolate,
+ v8::PageAllocator* code_page_allocator,
+ size_t capacity)
: isolate_(isolate),
data_page_allocator_(isolate->page_allocator()),
- code_page_allocator_(nullptr),
+ code_page_allocator_(code_page_allocator),
capacity_(RoundUp(capacity, Page::kPageSize)),
size_(0),
size_executable_(0),
lowest_ever_allocated_(static_cast<Address>(-1ll)),
highest_ever_allocated_(kNullAddress),
unmapper_(isolate->heap(), this) {
- InitializeCodePageAllocator(data_page_allocator_, code_range_size);
-}
-
-void MemoryAllocator::InitializeCodePageAllocator(
- v8::PageAllocator* page_allocator, size_t requested) {
- DCHECK_NULL(code_page_allocator_instance_.get());
-
- code_page_allocator_ = page_allocator;
-
- if (requested == 0) {
- if (!isolate_->RequiresCodeRange()) return;
- // When a target requires the code range feature, we put all code objects
- // in a kMaximalCodeRangeSize range of virtual address space, so that
- // they can call each other with near calls.
- requested = kMaximalCodeRangeSize;
- } else if (requested <= kMinimumCodeRangeSize) {
- requested = kMinimumCodeRangeSize;
- }
-
- const size_t reserved_area =
- kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
- if (requested < (kMaximalCodeRangeSize - reserved_area)) {
- requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
- // Fullfilling both reserved pages requirement and huge code area
- // alignments is not supported (requires re-implementation).
- DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
- }
- DCHECK(!isolate_->RequiresCodeRange() || requested <= kMaximalCodeRangeSize);
-
- Address hint =
- RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
- page_allocator->AllocatePageSize());
- VirtualMemory reservation(
- page_allocator, requested, reinterpret_cast<void*>(hint),
- std::max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
- if (!reservation.IsReserved()) {
- V8::FatalProcessOutOfMemory(isolate_,
- "CodeRange setup: allocate virtual memory");
- }
- code_range_ = reservation.region();
- isolate_->AddCodeRange(code_range_.begin(), code_range_.size());
-
- // We are sure that we have mapped a block of requested addresses.
- DCHECK_GE(reservation.size(), requested);
- Address base = reservation.address();
-
- // On some platforms, specifically Win64, we need to reserve some pages at
- // the beginning of an executable space. See
- // https://cs.chromium.org/chromium/src/components/crash/content/
- // app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
- // for details.
- if (reserved_area > 0) {
- if (!reservation.SetPermissions(base, reserved_area,
- PageAllocator::kReadWrite))
- V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
-
- base += reserved_area;
- }
- Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
- size_t size =
- RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
- MemoryChunk::kPageSize);
- DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
-
- LOG(isolate_,
- NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
- requested));
-
- code_reservation_ = std::move(reservation);
- code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
- page_allocator, aligned_base, size,
- static_cast<size_t>(MemoryChunk::kAlignment));
- code_page_allocator_ = code_page_allocator_instance_.get();
+ DCHECK_NOT_NULL(code_page_allocator);
}
void MemoryAllocator::TearDown() {
@@ -147,13 +52,6 @@ void MemoryAllocator::TearDown() {
last_chunk_.Free();
}
- if (code_page_allocator_instance_.get()) {
- DCHECK(!code_range_.is_empty());
- code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
- code_range_.size());
- code_range_ = base::AddressRegion();
- code_page_allocator_instance_.reset();
- }
code_page_allocator_ = nullptr;
data_page_allocator_ = nullptr;
}
diff --git a/chromium/v8/src/heap/memory-allocator.h b/chromium/v8/src/heap/memory-allocator.h
index 179877e753a..d405aefa53b 100644
--- a/chromium/v8/src/heap/memory-allocator.h
+++ b/chromium/v8/src/heap/memory-allocator.h
@@ -17,6 +17,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
+#include "src/heap/code-range.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
@@ -30,27 +31,6 @@ class Heap;
class Isolate;
class ReadOnlyPage;
-// The process-wide singleton that keeps track of code range regions with the
-// intention to reuse free code range regions as a workaround for CFG memory
-// leaks (see crbug.com/870054).
-class CodeRangeAddressHint {
- public:
- // Returns the most recently freed code range start address for the given
- // size. If there is no such entry, then a random address is returned.
- V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
-
- V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
- size_t code_range_size);
-
- private:
- base::Mutex mutex_;
- // A map from code range size to an array of recently freed code range
- // addresses. There should be O(1) different code range sizes.
- // The length of each array is limited by the peak number of code ranges,
- // which should be also O(1).
- std::unordered_map<size_t, std::vector<Address>> recently_freed_;
-};
-
// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
// allocator allocates and deallocates pages for the paged heap spaces and large
@@ -172,8 +152,9 @@ class MemoryAllocator {
V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
Address addr, size_t size);
- V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate, size_t max_capacity,
- size_t code_range_size);
+ V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate,
+ v8::PageAllocator* code_page_allocator,
+ size_t max_capacity);
V8_EXPORT_PRIVATE void TearDown();
@@ -283,17 +264,6 @@ class MemoryAllocator {
: data_page_allocator_;
}
- // A region of memory that may contain executable code including reserved
- // OS page with read-write access in the beginning.
- const base::AddressRegion& code_range() const {
- // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
- DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
- DCHECK_IMPLIES(!code_range_.is_empty(),
- code_range_.contains(code_page_allocator_instance_->begin(),
- code_page_allocator_instance_->size()));
- return code_range_;
- }
-
Unmapper* unmapper() { return &unmapper_; }
// Performs all necessary bookkeeping to free the memory, but does not free
@@ -306,9 +276,6 @@ class MemoryAllocator {
void RegisterReadOnlyMemory(ReadOnlyPage* page);
private:
- void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
- size_t requested);
-
// PreFreeMemory logically frees the object, i.e., it unregisters the
// memory, logs a delete event and adds the chunk to remembered unmapped
// pages.
@@ -360,10 +327,6 @@ class MemoryAllocator {
Isolate* isolate_;
- // This object controls virtual space reserved for code on the V8 heap. This
- // is only valid for 64-bit architectures where kRequiresCodeRange.
- VirtualMemory code_reservation_;
-
// Page allocator used for allocating data pages. Depending on the
// configuration it may be a page allocator instance provided by
// v8::Platform or a BoundedPageAllocator (when pointer compression is
@@ -371,29 +334,12 @@ class MemoryAllocator {
v8::PageAllocator* data_page_allocator_;
// Page allocator used for allocating code pages. Depending on the
- // configuration it may be a page allocator instance provided by
- // v8::Platform or a BoundedPageAllocator (when pointer compression is
- // enabled or on those 64-bit architectures where pc-relative 32-bit
+ // configuration it may be a page allocator instance provided by v8::Platform
+ // or a BoundedPageAllocator from Heap::code_range_ (when pointer compression
+ // is enabled or on those 64-bit architectures where pc-relative 32-bit
// displacement can be used for call and jump instructions).
v8::PageAllocator* code_page_allocator_;
- // A part of the |code_reservation_| that may contain executable code
- // including reserved page with read-write access in the beginning.
- // See details below.
- base::AddressRegion code_range_;
-
- // This unique pointer owns the instance of bounded code allocator
- // that controls executable pages allocation. It does not control the
- // optionally existing page in the beginning of the |code_range_|.
- // So, summarizing all above, the following conditions hold:
- // 1) |code_reservation_| >= |code_range_|
- // 2) |code_range_| >= |optional RW pages| +
- // |code_page_allocator_instance_|. 3) |code_reservation_| is
- // AllocatePageSize()-aligned 4) |code_page_allocator_instance_| is
- // MemoryChunk::kAlignment-aligned 5) |code_range_| is
- // CommitPageSize()-aligned
- std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
-
// Maximum space size in bytes.
size_t capacity_;
diff --git a/chromium/v8/src/heap/memory-chunk.cc b/chromium/v8/src/heap/memory-chunk.cc
index c2355c6b84b..4d16da707f9 100644
--- a/chromium/v8/src/heap/memory-chunk.cc
+++ b/chromium/v8/src/heap/memory-chunk.cc
@@ -161,6 +161,9 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
chunk->possibly_empty_buckets_.Initialize();
+ // All pages of a shared heap need to be marked with this flag.
+ if (heap->IsShared()) chunk->SetFlag(IN_SHARED_HEAP);
+
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
chunk->object_start_bitmap_ = ObjectStartBitmap(chunk->area_start());
#endif
diff --git a/chromium/v8/src/heap/memory-measurement.cc b/chromium/v8/src/heap/memory-measurement.cc
index ab491e19a6e..491fe3102ab 100644
--- a/chromium/v8/src/heap/memory-measurement.cc
+++ b/chromium/v8/src/heap/memory-measurement.cc
@@ -336,7 +336,7 @@ std::unique_ptr<v8::MeasureMemoryDelegate> MemoryMeasurement::DefaultDelegate(
bool NativeContextInferrer::InferForContext(Isolate* isolate, Context context,
Address* native_context) {
- Map context_map = context.synchronized_map();
+ Map context_map = context.map(kAcquireLoad);
Object maybe_native_context =
TaggedField<Object, Map::kConstructorOrBackPointerOrNativeContextOffset>::
Acquire_Load(isolate, context_map);
diff --git a/chromium/v8/src/heap/new-spaces-inl.h b/chromium/v8/src/heap/new-spaces-inl.h
index ffd5d8cfd7e..98626260e11 100644
--- a/chromium/v8/src/heap/new-spaces-inl.h
+++ b/chromium/v8/src/heap/new-spaces-inl.h
@@ -5,12 +5,12 @@
#ifndef V8_HEAP_NEW_SPACES_INL_H_
#define V8_HEAP_NEW_SPACES_INL_H_
+#include "src/base/sanitizer/msan.h"
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/new-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/tagged-impl.h"
-#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
@@ -87,6 +87,8 @@ HeapObject SemiSpaceObjectIterator::Next() {
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
+ DCHECK(!FLAG_single_generation);
+ DCHECK(!FLAG_enable_third_party_heap);
#if DEBUG
VerifyTop();
#endif
@@ -110,7 +112,7 @@ AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationOrigin origin) {
Address top = allocation_info_.top();
if (allocation_info_.limit() < top + size_in_bytes) {
- return AllocationResult::Retry();
+ return AllocationResult::Retry(NEW_SPACE);
}
HeapObject obj = HeapObject::FromAddress(top);
@@ -135,7 +137,7 @@ AllocationResult NewSpace::AllocateFastAligned(
if (allocation_info_.limit() - top <
static_cast<uintptr_t>(aligned_size_in_bytes)) {
- return AllocationResult::Retry();
+ return AllocationResult::Retry(NEW_SPACE);
}
HeapObject obj = HeapObject::FromAddress(top);
diff --git a/chromium/v8/src/heap/new-spaces.cc b/chromium/v8/src/heap/new-spaces.cc
index 029b77beb4d..8486c7bcc44 100644
--- a/chromium/v8/src/heap/new-spaces.cc
+++ b/chromium/v8/src/heap/new-spaces.cc
@@ -4,6 +4,7 @@
#include "src/heap/new-spaces.h"
+#include "src/common/globals.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
@@ -628,8 +629,9 @@ AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
- return AllocationResult::Retry();
+ return AllocationResult::Retry(NEW_SPACE);
}
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
@@ -646,8 +648,9 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, alignment)) {
- return AllocationResult::Retry();
+ return AllocationResult::Retry(NEW_SPACE);
}
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
@@ -741,9 +744,11 @@ void NewSpace::Verify(Isolate* isolate) {
CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
}
+ if (!FLAG_concurrent_array_buffer_sweeping) {
size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
CHECK_EQ(bytes,
ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
+ }
// Check semi-spaces.
CHECK_EQ(from_space_.id(), kFromSpace);
diff --git a/chromium/v8/src/heap/object-stats.cc b/chromium/v8/src/heap/object-stats.cc
index 86b2e6a2c40..5e0074e47f7 100644
--- a/chromium/v8/src/heap/object-stats.cc
+++ b/chromium/v8/src/heap/object-stats.cc
@@ -74,7 +74,7 @@ class FieldStatsCollector : public ObjectVisitor {
raw_fields_count_in_object -= kDoubleSize / kTaggedSize;
*boxed_double_fields_count_ += 1;
} else if (host.IsSeqString()) {
- int string_data = SeqString::cast(host).synchronized_length() *
+ int string_data = SeqString::cast(host).length(kAcquireLoad) *
(String::cast(host).IsOneByteRepresentation() ? 1 : 2) /
kTaggedSize;
DCHECK_LE(string_data, raw_fields_count_in_object);
diff --git a/chromium/v8/src/heap/objects-visiting-inl.h b/chromium/v8/src/heap/objects-visiting-inl.h
index 37ecd50c8df..8aac430ddbd 100644
--- a/chromium/v8/src/heap/objects-visiting-inl.h
+++ b/chromium/v8/src/heap/objects-visiting-inl.h
@@ -76,8 +76,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
template <typename ResultType, typename ConcreteVisitor>
void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
HeapObject host) {
- DCHECK(!host.map_word().IsForwardingAddress());
- static_cast<ConcreteVisitor*>(this)->VisitPointer(host, host.map_slot());
+ DCHECK(!host.map_word(kRelaxedLoad).IsForwardingAddress());
+ if (!static_cast<ConcreteVisitor*>(this)->ShouldVisitMapPointer()) return;
+ static_cast<ConcreteVisitor*>(this)->VisitMapPointer(host);
}
#define VISIT(TypeName) \
@@ -167,7 +168,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object);
}
- return static_cast<ResultType>(object.size());
+ return static_cast<ResultType>(object.size(kRelaxedLoad));
}
template <typename ConcreteVisitor>
diff --git a/chromium/v8/src/heap/objects-visiting.h b/chromium/v8/src/heap/objects-visiting.h
index 9f133d6cfab..7ea322dfb99 100644
--- a/chromium/v8/src/heap/objects-visiting.h
+++ b/chromium/v8/src/heap/objects-visiting.h
@@ -52,8 +52,11 @@ namespace internal {
V(SyntheticModule) \
V(TransitionArray) \
IF_WASM(V, WasmArray) \
+ IF_WASM(V, WasmExportedFunctionData) \
+ IF_WASM(V, WasmFunctionData) \
IF_WASM(V, WasmIndirectFunctionTable) \
IF_WASM(V, WasmInstanceObject) \
+ IF_WASM(V, WasmJSFunctionData) \
IF_WASM(V, WasmStruct) \
IF_WASM(V, WasmTypeInfo)
@@ -78,6 +81,8 @@ class HeapVisitor : public ObjectVisitor {
public:
V8_INLINE ResultType Visit(HeapObject object);
V8_INLINE ResultType Visit(Map map, HeapObject object);
+ // A callback for visiting the map pointer in the object header.
+ V8_INLINE void VisitMapPointer(HeapObject host);
protected:
// A guard predicate for visiting the object.
@@ -86,8 +91,6 @@ class HeapVisitor : public ObjectVisitor {
V8_INLINE bool ShouldVisit(HeapObject object) { return true; }
// Guard predicate for visiting the objects map pointer separately.
V8_INLINE bool ShouldVisitMapPointer() { return true; }
- // A callback for visiting the map pointer in the object header.
- V8_INLINE void VisitMapPointer(HeapObject host);
// If this predicate returns false, then the heap visitor will fail
// in default Visit implemention for subclasses of JSObject.
V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }
diff --git a/chromium/v8/src/heap/paged-spaces-inl.h b/chromium/v8/src/heap/paged-spaces-inl.h
index e135e30efc3..8c77186583d 100644
--- a/chromium/v8/src/heap/paged-spaces-inl.h
+++ b/chromium/v8/src/heap/paged-spaces-inl.h
@@ -131,6 +131,7 @@ AllocationResult PagedSpace::AllocateFastAligned(
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureLabMain(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
@@ -153,6 +154,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
DCHECK_EQ(identity(), OLD_SPACE);
int allocation_size = size_in_bytes;
// We don't know exactly how much filler we need to align until space is
@@ -182,6 +184,7 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result;
if (alignment != kWordAligned) {
diff --git a/chromium/v8/src/heap/paged-spaces.cc b/chromium/v8/src/heap/paged-spaces.cc
index f541974a506..8543f109ede 100644
--- a/chromium/v8/src/heap/paged-spaces.cc
+++ b/chromium/v8/src/heap/paged-spaces.cc
@@ -731,7 +731,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
}
CHECK(allocation_pointer_found_in_space);
- if (identity() == OLD_SPACE) {
+ if (identity() == OLD_SPACE && !FLAG_concurrent_array_buffer_sweeping) {
size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
CHECK_EQ(bytes,
ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
@@ -832,8 +832,8 @@ void PagedSpace::PrepareForMarkCompact() {
bool PagedSpace::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
VMState<GC> state(heap()->isolate());
- RuntimeCallTimerScope runtime_timer(
- heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
+ RCS_SCOPE(heap()->isolate(),
+ RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
return RawRefillLabMain(size_in_bytes, origin);
}
diff --git a/chromium/v8/src/heap/read-only-heap.cc b/chromium/v8/src/heap/read-only-heap.cc
index d5f7e843efe..05ca965e082 100644
--- a/chromium/v8/src/heap/read-only-heap.cc
+++ b/chromium/v8/src/heap/read-only-heap.cc
@@ -249,6 +249,10 @@ bool ReadOnlyHeap::read_only_object_cache_is_initialized() const {
return read_only_object_cache_.size() > 0;
}
+size_t ReadOnlyHeap::read_only_object_cache_size() const {
+ return read_only_object_cache_.size();
+}
+
ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap)
: ReadOnlyHeapObjectIterator(ro_heap->read_only_space()) {}
diff --git a/chromium/v8/src/heap/read-only-heap.h b/chromium/v8/src/heap/read-only-heap.h
index f947832c5f9..558a694c944 100644
--- a/chromium/v8/src/heap/read-only-heap.h
+++ b/chromium/v8/src/heap/read-only-heap.h
@@ -81,14 +81,15 @@ class ReadOnlyHeap {
// Returns a read-only cache entry at a particular index.
Object cached_read_only_object(size_t i) const;
bool read_only_object_cache_is_initialized() const;
+ size_t read_only_object_cache_size() const;
ReadOnlySpace* read_only_space() const { return read_only_space_; }
// Returns whether the ReadOnlySpace will actually be shared taking into
// account whether shared memory is available with pointer compression.
static bool IsReadOnlySpaceShared() {
- return V8_SHARED_RO_HEAP_BOOL && (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL ||
- IsSharedMemoryAvailable());
+ return V8_SHARED_RO_HEAP_BOOL &&
+ (!COMPRESS_POINTERS_BOOL || COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL);
}
virtual void InitializeIsolateRoots(Isolate* isolate) {}
diff --git a/chromium/v8/src/heap/read-only-spaces.cc b/chromium/v8/src/heap/read-only-spaces.cc
index 5adac66afe0..248b5c22e6f 100644
--- a/chromium/v8/src/heap/read-only-spaces.cc
+++ b/chromium/v8/src/heap/read-only-spaces.cc
@@ -56,7 +56,18 @@ void ReadOnlyArtifacts::VerifyChecksum(SnapshotData* read_only_snapshot_data,
CHECK_WITH_MSG(snapshot_checksum,
"Attempt to create the read-only heap after already "
"creating from a snapshot.");
- CHECK_EQ(read_only_blob_checksum_, snapshot_checksum);
+ if (!FLAG_stress_snapshot) {
+ // --stress-snapshot is only intended to check how well the
+ // serializer/deserializer copes with unexpected objects, and is not
+ // intended to test whether the newly deserialized Isolate would actually
+ // work since it serializes a currently running Isolate, which is not
+ // supported. As a result, it's possible that it will create a new
+ // read-only snapshot that is not compatible with the original one (for
+ // instance due to the string table being re-ordered). Since we won't
+ // acutally use that new Isoalte, we're ok with any potential corruption.
+ // See crbug.com/1043058.
+ CHECK_EQ(read_only_blob_checksum_, snapshot_checksum);
+ }
} else {
// If there's no checksum, then that means the read-only heap objects are
// being created.
@@ -70,11 +81,10 @@ SingleCopyReadOnlyArtifacts::~SingleCopyReadOnlyArtifacts() {
// TearDown requires MemoryAllocator which itself is tied to an Isolate.
shared_read_only_space_->pages_.resize(0);
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
for (ReadOnlyPage* chunk : pages_) {
void* chunk_address = reinterpret_cast<void*>(chunk->address());
- size_t size = RoundUp(chunk->size(), page_allocator->AllocatePageSize());
- CHECK(page_allocator->FreePages(chunk_address, size));
+ size_t size = RoundUp(chunk->size(), page_allocator_->AllocatePageSize());
+ CHECK(page_allocator_->FreePages(chunk_address, size));
}
}
@@ -86,6 +96,12 @@ ReadOnlyHeap* SingleCopyReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
void SingleCopyReadOnlyArtifacts::Initialize(Isolate* isolate,
std::vector<ReadOnlyPage*>&& pages,
const AllocationStats& stats) {
+ // Do not use the platform page allocator when sharing a pointer compression
+ // cage, as the Isolate's page allocator is a BoundedPageAllocator tied to the
+ // shared cage.
+ page_allocator_ = COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL
+ ? isolate->page_allocator()
+ : GetPlatformPageAllocator();
pages_ = std::move(pages);
set_accounting_stats(stats);
set_shared_read_only_space(
@@ -304,11 +320,12 @@ void ReadOnlySpace::DetachPagesAndAddToArtifacts(
DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
Heap* heap = ReadOnlySpace::heap();
- // Without pointer compression, ReadOnlySpace pages are directly shared
- // between all heaps and so must be unregistered from their originating
- // allocator.
- Seal(COMPRESS_POINTERS_BOOL ? SealMode::kDetachFromHeap
- : SealMode::kDetachFromHeapAndUnregisterMemory);
+ // Without pointer compression in a per-Isolate cage, ReadOnlySpace pages are
+ // directly shared between all heaps and so must be unregistered from their
+ // originating allocator.
+ Seal(COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL
+ ? SealMode::kDetachFromHeap
+ : SealMode::kDetachFromHeapAndUnregisterMemory);
artifacts->Initialize(heap->isolate(), std::move(pages_), accounting_stats_);
}
@@ -635,6 +652,7 @@ HeapObject ReadOnlySpace::TryAllocateLinearlyAligned(
AllocationResult ReadOnlySpace::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
+ DCHECK(!FLAG_enable_third_party_heap);
DCHECK(!IsDetached());
int allocation_size = size_in_bytes;
@@ -789,9 +807,9 @@ SharedReadOnlySpace::SharedReadOnlySpace(Heap* heap,
SingleCopyReadOnlyArtifacts* artifacts)
: SharedReadOnlySpace(heap) {
// This constructor should only be used when RO_SPACE is shared without
- // pointer compression.
+ // pointer compression in a per-Isolate cage.
DCHECK(V8_SHARED_RO_HEAP_BOOL);
- DCHECK(!COMPRESS_POINTERS_BOOL);
+ DCHECK(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL);
accounting_stats_ = artifacts->accounting_stats();
pages_ = artifacts->pages();
}
diff --git a/chromium/v8/src/heap/read-only-spaces.h b/chromium/v8/src/heap/read-only-spaces.h
index ee4b2a82234..0ca05d8d4c8 100644
--- a/chromium/v8/src/heap/read-only-spaces.h
+++ b/chromium/v8/src/heap/read-only-spaces.h
@@ -132,6 +132,9 @@ class SingleCopyReadOnlyArtifacts : public ReadOnlyArtifacts {
const AllocationStats& stats) override;
void ReinstallReadOnlySpace(Isolate* isolate) override;
void VerifyHeapAndSpaceRelationships(Isolate* isolate) override;
+
+ private:
+ v8::PageAllocator* page_allocator_ = nullptr;
};
// -----------------------------------------------------------------------------
diff --git a/chromium/v8/src/heap/scavenger-inl.h b/chromium/v8/src/heap/scavenger-inl.h
index 8560b5b62ba..193565d34de 100644
--- a/chromium/v8/src/heap/scavenger-inl.h
+++ b/chromium/v8/src/heap/scavenger-inl.h
@@ -119,7 +119,7 @@ void Scavenger::PageMemoryFence(MaybeObject object) {
bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
int size) {
// Copy the content of source to target.
- target.set_map_word(MapWord::FromMap(map));
+ target.set_map_word(MapWord::FromMap(map), kRelaxedStore);
heap()->CopyBlock(target.address() + kTaggedSize,
source.address() + kTaggedSize, size - kTaggedSize);
@@ -159,7 +159,7 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) {
allocator_.FreeLast(NEW_SPACE, target, object_size);
- MapWord map_word = object.synchronized_map_word();
+ MapWord map_word = object.map_word(kAcquireLoad);
HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
DCHECK(!Heap::InFromPage(*slot));
return Heap::InToPage(*slot)
@@ -184,6 +184,7 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
+ DCHECK_GE(object_size, Heap::kMinObjectSizeInTaggedWords * kTaggedSize);
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation = allocator_.Allocate(
OLD_SPACE, object_size, AllocationOrigin::kGC, alignment);
@@ -195,7 +196,7 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) {
allocator_.FreeLast(OLD_SPACE, target, object_size);
- MapWord map_word = object.synchronized_map_word();
+ MapWord map_word = object.map_word(kAcquireLoad);
HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
DCHECK(!Heap::InFromPage(*slot));
return Heap::InToPage(*slot)
@@ -326,24 +327,25 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
HeapObjectReference::Update(slot, first);
if (!Heap::InYoungGeneration(first)) {
- object.synchronized_set_map_word(MapWord::FromForwardingAddress(first));
+ object.set_map_word(MapWord::FromForwardingAddress(first), kReleaseStore);
return REMOVE_SLOT;
}
- MapWord first_word = first.synchronized_map_word();
+ MapWord first_word = first.map_word(kAcquireLoad);
if (first_word.IsForwardingAddress()) {
HeapObject target = first_word.ToForwardingAddress();
HeapObjectReference::Update(slot, target);
- object.synchronized_set_map_word(MapWord::FromForwardingAddress(target));
+ object.set_map_word(MapWord::FromForwardingAddress(target),
+ kReleaseStore);
return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
}
Map map = first_word.ToMap();
SlotCallbackResult result =
EvacuateObjectDefault(map, slot, first, first.SizeFromMap(map),
Map::ObjectFieldsFrom(map.visitor_id()));
- object.synchronized_set_map_word(
- MapWord::FromForwardingAddress(slot.ToHeapObject()));
+ object.set_map_word(MapWord::FromForwardingAddress(slot.ToHeapObject()),
+ kReleaseStore);
return result;
}
DCHECK_EQ(ObjectFields::kMaybePointers,
@@ -390,7 +392,7 @@ SlotCallbackResult Scavenger::ScavengeObject(THeapObjectSlot p,
DCHECK(Heap::InFromPage(object));
// Synchronized load that consumes the publishing CAS of MigrateObject.
- MapWord first_word = object.synchronized_map_word();
+ MapWord first_word = object.map_word(kAcquireLoad);
// If the first word is a forwarding address, the object has already been
// copied.
diff --git a/chromium/v8/src/heap/scavenger.cc b/chromium/v8/src/heap/scavenger.cc
index be9971e7c68..efa3ed2f614 100644
--- a/chromium/v8/src/heap/scavenger.cc
+++ b/chromium/v8/src/heap/scavenger.cc
@@ -130,13 +130,13 @@ namespace {
V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, Object object) {
return Heap::InFromPage(object) &&
- !HeapObject::cast(object).map_word().IsForwardingAddress();
+ !HeapObject::cast(object).map_word(kRelaxedLoad).IsForwardingAddress();
}
// Same as IsUnscavengedHeapObject() above but specialized for HeapObjects.
V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, HeapObject heap_object) {
return Heap::InFromPage(heap_object) &&
- !heap_object.map_word().IsForwardingAddress();
+ !heap_object.map_word(kRelaxedLoad).IsForwardingAddress();
}
bool IsUnscavengedHeapObjectSlot(Heap* heap, FullObjectSlot p) {
@@ -152,7 +152,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
return object;
}
- MapWord map_word = HeapObject::cast(object).map_word();
+ MapWord map_word = HeapObject::cast(object).map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
}
@@ -384,6 +384,10 @@ void ScavengerCollector::CollectGarbage() {
&Heap::UpdateYoungReferenceInExternalStringTableEntry);
heap_->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
+
+ if (V8_UNLIKELY(FLAG_track_retaining_path)) {
+ heap_->UpdateRetainersAfterScavenge();
+ }
}
if (FLAG_concurrent_marking) {
@@ -481,7 +485,7 @@ void ScavengerCollector::HandleSurvivingNewLargeObjects() {
Map map = update_info.second;
// Order is important here. We have to re-install the map to have access
// to meta-data like size during page promotion.
- object.set_map_word(MapWord::FromMap(map));
+ object.set_map_word(MapWord::FromMap(map), kRelaxedStore);
LargePage* page = LargePage::FromHeapObject(object);
heap_->lo_space()->PromoteNewLargeObject(page);
}
@@ -734,6 +738,7 @@ void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
FullObjectSlot p) {
DCHECK(!HasWeakHeapObjectTag(*p));
+ DCHECK(!MapWord::IsPacked((*p).ptr()));
ScavengePointer(p);
}
@@ -741,12 +746,15 @@ void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
FullObjectSlot start,
FullObjectSlot end) {
// Copy all HeapObject pointers in [start, end)
- for (FullObjectSlot p = start; p < end; ++p) ScavengePointer(p);
+ for (FullObjectSlot p = start; p < end; ++p) {
+ ScavengePointer(p);
+ }
}
void RootScavengeVisitor::ScavengePointer(FullObjectSlot p) {
Object object = *p;
DCHECK(!HasWeakHeapObjectTag(object));
+ DCHECK(!MapWord::IsPacked(object.ptr()));
if (Heap::InYoungGeneration(object)) {
scavenger_->ScavengeObject(FullHeapObjectSlot(p), HeapObject::cast(object));
}
diff --git a/chromium/v8/src/heap/setup-heap-internal.cc b/chromium/v8/src/heap/setup-heap-internal.cc
index 8a3e1fda121..886c89aeae0 100644
--- a/chromium/v8/src/heap/setup-heap-internal.cc
+++ b/chromium/v8/src/heap/setup-heap-internal.cc
@@ -195,9 +195,10 @@ void Heap::FinalizePartialMap(Map map) {
map.set_constructor_or_back_pointer(roots.null_value());
}
-AllocationResult Heap::Allocate(Map map, AllocationType allocation_type) {
- DCHECK(map.instance_type() != MAP_TYPE);
- int size = map.instance_size();
+AllocationResult Heap::Allocate(Handle<Map> map,
+ AllocationType allocation_type) {
+ DCHECK(map->instance_type() != MAP_TYPE);
+ int size = map->instance_size();
HeapObject result;
AllocationResult allocation = AllocateRaw(size, allocation_type);
if (!allocation.To(&result)) return allocation;
@@ -205,7 +206,7 @@ AllocationResult Heap::Allocate(Map map, AllocationType allocation_type) {
WriteBarrierMode write_barrier_mode =
allocation_type == AllocationType::kYoung ? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
- result.set_map_after_allocation(map, write_barrier_mode);
+ result.set_map_after_allocation(*map, write_barrier_mode);
return result;
}
@@ -281,7 +282,7 @@ bool Heap::CreateInitialMaps() {
{
AllocationResult allocation =
- Allocate(roots.null_map(), AllocationType::kReadOnly);
+ Allocate(roots.null_map_handle(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_null_value(Oddball::cast(obj));
@@ -289,7 +290,7 @@ bool Heap::CreateInitialMaps() {
{
AllocationResult allocation =
- Allocate(roots.undefined_map(), AllocationType::kReadOnly);
+ Allocate(roots.undefined_map_handle(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_undefined_value(Oddball::cast(obj));
@@ -297,7 +298,7 @@ bool Heap::CreateInitialMaps() {
DCHECK(!InYoungGeneration(roots.undefined_value()));
{
AllocationResult allocation =
- Allocate(roots.the_hole_map(), AllocationType::kReadOnly);
+ Allocate(roots.the_hole_map_handle(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_the_hole_value(Oddball::cast(obj));
@@ -317,7 +318,7 @@ bool Heap::CreateInitialMaps() {
// Allocate the empty enum cache.
{
AllocationResult allocation =
- Allocate(roots.enum_cache_map(), AllocationType::kReadOnly);
+ Allocate(roots.enum_cache_map_handle(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_empty_enum_cache(EnumCache::cast(obj));
@@ -381,6 +382,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
Context::SYMBOL_FUNCTION_INDEX)
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
+ ALLOCATE_MAP(MEGA_DOM_HANDLER_TYPE, MegaDomHandler::kSize, mega_dom_handler)
ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
Context::BOOLEAN_FUNCTION_INDEX);
@@ -500,6 +502,10 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
code_data_container)
+ IF_WASM(ALLOCATE_MAP, WASM_EXPORTED_FUNCTION_DATA_TYPE,
+ WasmExportedFunctionData::kSize, wasm_exported_function_data)
+ IF_WASM(ALLOCATE_MAP, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData::kSize,
+ wasm_js_function_data)
IF_WASM(ALLOCATE_MAP, WASM_TYPE_INFO_TYPE, WasmTypeInfo::kSize,
wasm_type_info)
@@ -550,8 +556,9 @@ bool Heap::CreateInitialMaps() {
{
// Empty array boilerplate description
- AllocationResult alloc = Allocate(roots.array_boilerplate_description_map(),
- AllocationType::kReadOnly);
+ AllocationResult alloc =
+ Allocate(roots.array_boilerplate_description_map_handle(),
+ AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
ArrayBoilerplateDescription::cast(obj).set_constant_elements(
@@ -564,7 +571,7 @@ bool Heap::CreateInitialMaps() {
{
AllocationResult allocation =
- Allocate(roots.boolean_map(), AllocationType::kReadOnly);
+ Allocate(roots.boolean_map_handle(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_true_value(Oddball::cast(obj));
@@ -572,7 +579,7 @@ bool Heap::CreateInitialMaps() {
{
AllocationResult allocation =
- Allocate(roots.boolean_map(), AllocationType::kReadOnly);
+ Allocate(roots.boolean_map_handle(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_false_value(Oddball::cast(obj));
@@ -864,6 +871,7 @@ void Heap::CreateInitialObjects() {
set_is_concat_spreadable_protector(*factory->NewProtector());
set_map_iterator_protector(*factory->NewProtector());
set_no_elements_protector(*factory->NewProtector());
+ set_mega_dom_protector(*factory->NewProtector());
set_promise_hook_protector(*factory->NewProtector());
set_promise_resolve_protector(*factory->NewProtector());
set_promise_species_protector(*factory->NewProtector());
diff --git a/chromium/v8/src/heap/spaces-inl.h b/chromium/v8/src/heap/spaces-inl.h
index ccdf050e681..9b9a02af437 100644
--- a/chromium/v8/src/heap/spaces-inl.h
+++ b/chromium/v8/src/heap/spaces-inl.h
@@ -141,7 +141,8 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + size_in_bytes;
- if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
+ if (new_top > allocation_info_.limit())
+ return AllocationResult::Retry(NEW_SPACE);
allocation_info_.set_top(new_top);
if (filler_size > 0) {
diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc
index b6c1e0bcc2a..63346786d51 100644
--- a/chromium/v8/src/heap/spaces.cc
+++ b/chromium/v8/src/heap/spaces.cc
@@ -11,6 +11,7 @@
#include "src/base/bits.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/macros.h"
+#include "src/base/sanitizer/msan.h"
#include "src/common/globals.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
@@ -31,7 +32,6 @@
#include "src/objects/heap-object.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects-inl.h"
-#include "src/sanitizer/msan.h"
#include "src/snapshot/snapshot.h"
#include "src/utils/ostreams.h"
diff --git a/chromium/v8/src/heap/spaces.h b/chromium/v8/src/heap/spaces.h
index 0a0a884dc02..4afada00ceb 100644
--- a/chromium/v8/src/heap/spaces.h
+++ b/chromium/v8/src/heap/spaces.h
@@ -221,9 +221,11 @@ class Page : public MemoryChunk {
// from [page_addr .. page_addr + kPageSize[. This only works if the object
// is in fact in a page.
static Page* FromAddress(Address addr) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
}
static Page* FromHeapObject(HeapObject o) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
}
@@ -232,6 +234,7 @@ class Page : public MemoryChunk {
// we subtract a hole word. The valid address ranges from
// [page_addr + area_start_ .. page_addr + kPageSize + kTaggedSize].
static Page* FromAllocationAreaAddress(Address address) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return Page::FromAddress(address - kTaggedSize);
}
diff --git a/chromium/v8/src/heap/sweeper.cc b/chromium/v8/src/heap/sweeper.cc
index c4e60be7072..0e35a3cea2b 100644
--- a/chromium/v8/src/heap/sweeper.cc
+++ b/chromium/v8/src/heap/sweeper.cc
@@ -351,9 +351,6 @@ int Sweeper::RawSweep(
size_t live_bytes = 0;
size_t max_freed_bytes = 0;
- // TODO(ulan): we don't have to clear type old-to-old slots in code space
- // because the concurrent marker doesn't mark code objects. This requires
- // the write barrier for code objects to check the color of the code object.
bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
p->typed_slot_set<OLD_TO_OLD>() != nullptr;
@@ -393,7 +390,8 @@ int Sweeper::RawSweep(
free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
&old_to_new_cleanup);
}
- Map map = object.synchronized_map();
+ Map map = object.map(kAcquireLoad);
+ DCHECK(map.IsMap());
int size = object.SizeFromMap(map);
live_bytes += size;
free_start = free_end + size;
diff --git a/chromium/v8/src/heap/third-party/heap-api-stub.cc b/chromium/v8/src/heap/third-party/heap-api-stub.cc
index 6d31479bec6..f7ccb50810a 100644
--- a/chromium/v8/src/heap/third-party/heap-api-stub.cc
+++ b/chromium/v8/src/heap/third-party/heap-api-stub.cc
@@ -38,6 +38,12 @@ const base::AddressRegion& Heap::GetCodeRange() {
}
// static
+bool Heap::InSpace(Address, AllocationSpace) { return false; }
+
+// static
+bool Heap::InOldSpace(Address) { return false; }
+
+// static
bool Heap::InCodeSpace(Address) { return false; }
// static
diff --git a/chromium/v8/src/heap/third-party/heap-api.h b/chromium/v8/src/heap/third-party/heap-api.h
index c4712b988e9..16f2fde8842 100644
--- a/chromium/v8/src/heap/third-party/heap-api.h
+++ b/chromium/v8/src/heap/third-party/heap-api.h
@@ -26,6 +26,10 @@ class Heap {
const base::AddressRegion& GetCodeRange();
+ static bool InSpace(Address address, AllocationSpace space);
+
+ static bool InOldSpace(Address address);
+
static bool InCodeSpace(Address address);
static bool InReadOnlySpace(Address address);
@@ -38,6 +42,8 @@ class Heap {
HeapObject NextObject();
bool CollectGarbage();
+
+ size_t Capacity();
};
} // namespace third_party_heap
diff --git a/chromium/v8/src/heap/weak-object-worklists.cc b/chromium/v8/src/heap/weak-object-worklists.cc
index 532739000fe..84df473076f 100644
--- a/chromium/v8/src/heap/weak-object-worklists.cc
+++ b/chromium/v8/src/heap/weak-object-worklists.cc
@@ -115,19 +115,17 @@ void WeakObjects::UpdateWeakObjectsInCode(
void WeakObjects::UpdateJSWeakRefs(
WeakObjectWorklist<JSWeakRef>& js_weak_refs) {
- if (FLAG_harmony_weak_refs) {
- js_weak_refs.Update(
- [](JSWeakRef js_weak_ref_in, JSWeakRef* js_weak_ref_out) -> bool {
- JSWeakRef forwarded = ForwardingAddress(js_weak_ref_in);
-
- if (!forwarded.is_null()) {
- *js_weak_ref_out = forwarded;
- return true;
- }
-
- return false;
- });
- }
+ js_weak_refs.Update(
+ [](JSWeakRef js_weak_ref_in, JSWeakRef* js_weak_ref_out) -> bool {
+ JSWeakRef forwarded = ForwardingAddress(js_weak_ref_in);
+
+ if (!forwarded.is_null()) {
+ *js_weak_ref_out = forwarded;
+ return true;
+ }
+
+ return false;
+ });
}
void WeakObjects::UpdateWeakCells(WeakObjectWorklist<WeakCell>& weak_cells) {