summaryrefslogtreecommitdiff
path: root/chromium/v8/src/heap/cppgc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/heap/cppgc')
-rw-r--r--chromium/v8/src/heap/cppgc/allocation.cc16
-rw-r--r--chromium/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc39
-rw-r--r--chromium/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc52
-rw-r--r--chromium/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S32
-rw-r--r--chromium/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc53
-rw-r--r--chromium/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S48
-rw-r--r--chromium/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc48
-rw-r--r--chromium/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc48
-rw-r--r--chromium/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc94
-rw-r--r--chromium/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc35
-rw-r--r--chromium/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc94
-rw-r--r--chromium/v8/src/heap/cppgc/asm/x64/push_registers_masm.S45
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap-local-data.cc36
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap.cc85
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap.h53
-rw-r--r--chromium/v8/src/heap/cppgc/free-list.cc9
-rw-r--r--chromium/v8/src/heap/cppgc/garbage-collector.h56
-rw-r--r--chromium/v8/src/heap/cppgc/gc-info-table.cc5
-rw-r--r--chromium/v8/src/heap/cppgc/gc-info-table.h3
-rw-r--r--chromium/v8/src/heap/cppgc/gc-info.cc5
-rw-r--r--chromium/v8/src/heap/cppgc/gc-invoker.cc105
-rw-r--r--chromium/v8/src/heap/cppgc/gc-invoker.h47
-rw-r--r--chromium/v8/src/heap/cppgc/globals.h7
-rw-r--r--chromium/v8/src/heap/cppgc/heap-base.cc88
-rw-r--r--chromium/v8/src/heap/cppgc/heap-base.h151
-rw-r--r--chromium/v8/src/heap/cppgc/heap-growing.cc99
-rw-r--r--chromium/v8/src/heap/cppgc/heap-growing.h53
-rw-r--r--chromium/v8/src/heap/cppgc/heap-inl.h33
-rw-r--r--chromium/v8/src/heap/cppgc/heap-object-header-inl.h5
-rw-r--r--chromium/v8/src/heap/cppgc/heap-object-header.h3
-rw-r--r--chromium/v8/src/heap/cppgc/heap-page-inl.h30
-rw-r--r--chromium/v8/src/heap/cppgc/heap-page.cc141
-rw-r--r--chromium/v8/src/heap/cppgc/heap-page.h56
-rw-r--r--chromium/v8/src/heap/cppgc/heap-space.cc20
-rw-r--r--chromium/v8/src/heap/cppgc/heap-space.h5
-rw-r--r--chromium/v8/src/heap/cppgc/heap.cc110
-rw-r--r--chromium/v8/src/heap/cppgc/heap.h134
-rw-r--r--chromium/v8/src/heap/cppgc/marker.cc140
-rw-r--r--chromium/v8/src/heap/cppgc/marker.h90
-rw-r--r--chromium/v8/src/heap/cppgc/marking-visitor.cc67
-rw-r--r--chromium/v8/src/heap/cppgc/marking-visitor.h22
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator-inl.h4
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator.cc145
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator.h41
-rw-r--r--chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h1
-rw-r--r--chromium/v8/src/heap/cppgc/page-memory-inl.h10
-rw-r--r--chromium/v8/src/heap/cppgc/page-memory.h16
-rw-r--r--chromium/v8/src/heap/cppgc/persistent-node.cc12
-rw-r--r--chromium/v8/src/heap/cppgc/platform.cc10
-rw-r--r--chromium/v8/src/heap/cppgc/pointer-policies.cc4
-rw-r--r--chromium/v8/src/heap/cppgc/prefinalizer-handler.cc19
-rw-r--r--chromium/v8/src/heap/cppgc/prefinalizer-handler.h2
-rw-r--r--chromium/v8/src/heap/cppgc/process-heap.cc13
-rw-r--r--chromium/v8/src/heap/cppgc/raw-heap.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/raw-heap.h10
-rw-r--r--chromium/v8/src/heap/cppgc/stack.cc129
-rw-r--r--chromium/v8/src/heap/cppgc/stack.h43
-rw-r--r--chromium/v8/src/heap/cppgc/stats-collector.cc114
-rw-r--r--chromium/v8/src/heap/cppgc/stats-collector.h130
-rw-r--r--chromium/v8/src/heap/cppgc/sweeper.cc479
-rw-r--r--chromium/v8/src/heap/cppgc/sweeper.h7
-rw-r--r--chromium/v8/src/heap/cppgc/task-handle.h47
-rw-r--r--chromium/v8/src/heap/cppgc/virtual-memory.cc56
-rw-r--r--chromium/v8/src/heap/cppgc/virtual-memory.h60
-rw-r--r--chromium/v8/src/heap/cppgc/visitor.cc76
-rw-r--r--chromium/v8/src/heap/cppgc/visitor.h34
-rw-r--r--chromium/v8/src/heap/cppgc/write-barrier.cc84
67 files changed, 2575 insertions, 1235 deletions
diff --git a/chromium/v8/src/heap/cppgc/allocation.cc b/chromium/v8/src/heap/cppgc/allocation.cc
index 32f917da5ac..04bcea82d03 100644
--- a/chromium/v8/src/heap/cppgc/allocation.cc
+++ b/chromium/v8/src/heap/cppgc/allocation.cc
@@ -6,7 +6,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/heap/cppgc/heap-inl.h"
+#include "src/heap/cppgc/object-allocator-inl.h"
namespace cppgc {
namespace internal {
@@ -15,19 +15,17 @@ STATIC_ASSERT(api_constants::kLargeObjectSizeThreshold ==
kLargeObjectSizeThreshold);
// static
-void* MakeGarbageCollectedTraitInternal::Allocate(cppgc::Heap* heap,
- size_t size,
- GCInfoIndex index) {
- DCHECK_NOT_NULL(heap);
- return Heap::From(heap)->Allocate(size, index);
+void* MakeGarbageCollectedTraitInternal::Allocate(
+ cppgc::AllocationHandle& handle, size_t size, GCInfoIndex index) {
+ return static_cast<ObjectAllocator&>(handle).AllocateObject(size, index);
}
// static
void* MakeGarbageCollectedTraitInternal::Allocate(
- cppgc::Heap* heap, size_t size, GCInfoIndex index,
+ cppgc::AllocationHandle& handle, size_t size, GCInfoIndex index,
CustomSpaceIndex space_index) {
- DCHECK_NOT_NULL(heap);
- return Heap::From(heap)->Allocate(size, index, space_index);
+ return static_cast<ObjectAllocator&>(handle).AllocateObject(size, index,
+ space_index);
}
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc b/chromium/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc
deleted file mode 100644
index 5246c3f6c3e..00000000000
--- a/chromium/v8/src/heap/cppgc/asm/arm/push_registers_asm.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Push all callee-saved registers to get them on the stack for conservative
-// stack scanning.
-//
-// See asm/x64/push_registers_clang.cc for why the function is not generated
-// using clang.
-//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-
-// We maintain 8-byte alignment at calls by pushing an additional
-// non-callee-saved register (r3).
-//
-// Calling convention source:
-// https://en.wikipedia.org/wiki/Calling_convention#ARM_(A32)
-// http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka4127.html
-asm(".globl PushAllRegistersAndIterateStack \n"
- ".type PushAllRegistersAndIterateStack, %function \n"
- ".hidden PushAllRegistersAndIterateStack \n"
- "PushAllRegistersAndIterateStack: \n"
- // Push all callee-saved registers and save return address.
- // Only {r4-r11} are callee-saved registers. Push r3 in addition to align
- // the stack back to 8 bytes.
- " push {r3-r11, lr} \n"
- // Pass 1st parameter (r0) unchanged (Stack*).
- // Pass 2nd parameter (r1) unchanged (StackVisitor*).
- // Save 3rd parameter (r2; IterateStackCallback).
- " mov r3, r2 \n"
- // Pass 3rd parameter as sp (stack pointer).
- " mov r2, sp \n"
- // Call the callback.
- " blx r3 \n"
- // Discard all the registers.
- " add sp, sp, #36 \n"
- // Pop lr into pc which returns and switches mode if needed.
- " pop {pc} \n");
diff --git a/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc b/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc
deleted file mode 100644
index 30d4de1f308..00000000000
--- a/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_asm.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Push all callee-saved registers to get them on the stack for conservative
-// stack scanning.
-//
-// See asm/x64/push_registers_clang.cc for why the function is not generated
-// using clang.
-//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-
-// We maintain 16-byte alignment.
-//
-// Calling convention source:
-// https://en.wikipedia.org/wiki/Calling_convention#ARM_(A64)
-
-asm(
-#if defined(__APPLE__)
- ".globl _PushAllRegistersAndIterateStack \n"
- ".private_extern _PushAllRegistersAndIterateStack \n"
- "_PushAllRegistersAndIterateStack: \n"
-#else // !defined(__APPLE__)
- ".globl PushAllRegistersAndIterateStack \n"
-#if !defined(_WIN64)
- ".type PushAllRegistersAndIterateStack, %function \n"
- ".hidden PushAllRegistersAndIterateStack \n"
-#endif // !defined(_WIN64)
- "PushAllRegistersAndIterateStack: \n"
-#endif // !defined(__APPLE__)
- // x19-x29 are callee-saved.
- " stp x19, x20, [sp, #-16]! \n"
- " stp x21, x22, [sp, #-16]! \n"
- " stp x23, x24, [sp, #-16]! \n"
- " stp x25, x26, [sp, #-16]! \n"
- " stp x27, x28, [sp, #-16]! \n"
- " stp fp, lr, [sp, #-16]! \n"
- // Maintain frame pointer.
- " mov fp, sp \n"
- // Pass 1st parameter (x0) unchanged (Stack*).
- // Pass 2nd parameter (x1) unchanged (StackVisitor*).
- // Save 3rd parameter (x2; IterateStackCallback)
- " mov x7, x2 \n"
- // Pass 3rd parameter as sp (stack pointer).
- " mov x2, sp \n"
- " blr x7 \n"
- // Load return address.
- " ldr lr, [sp, #8] \n"
- // Restore frame pointer and pop all callee-saved registers.
- " ldr fp, [sp], #96 \n"
- " ret \n");
diff --git a/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S b/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S
deleted file mode 100644
index 9773654ffcf..00000000000
--- a/chromium/v8/src/heap/cppgc/asm/arm64/push_registers_masm.S
+++ /dev/null
@@ -1,32 +0,0 @@
-; Copyright 2020 the V8 project authors. All rights reserved.
-; Use of this source code is governed by a BSD-style license that can be
-; found in the LICENSE file.
-
-; This file is exactly the same as push_registers_asm.cc, just formatted for
-; the Microsoft Arm Assembler.
-
- AREA |.text|, CODE, ALIGN=4, READONLY
- EXPORT PushAllRegistersAndIterateStack
-PushAllRegistersAndIterateStack
- ; x19-x29 are callee-saved
- STP x19, x20, [sp, #-16]!
- STP x21, x22, [sp, #-16]!
- STP x23, x24, [sp, #-16]!
- STP x25, x26, [sp, #-16]!
- STP x27, x28, [sp, #-16]!
- STP fp, lr, [sp, #-16]!
- ; Maintain frame pointer
- MOV fp, sp
- ; Pass 1st parameter (x0) unchanged (Stack*).
- ; Pass 2nd parameter (x1) unchanged (StackVisitor*).
- ; Save 3rd parameter (x2; IterateStackCallback)
- MOV x7, x2
- ; Pass 3rd parameter as sp (stack pointer)
- MOV x2, sp
- BLR x7
- ; Load return address
- LDR lr, [sp, #8]
- ; Restore frame pointer and pop all callee-saved registers.
- LDR fp, [sp], #96
- RET
- END \ No newline at end of file
diff --git a/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc b/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc
deleted file mode 100644
index ed9c14a50e9..00000000000
--- a/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_asm.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Push all callee-saved registers to get them on the stack for conservative
-// stack scanning.
-//
-// See asm/x64/push_registers_clang.cc for why the function is not generated
-// using clang.
-//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-
-// We maintain 16-byte alignment at calls. There is an 4-byte return address
-// on the stack and we push 28 bytes which maintains 16-byte stack alignment
-// at the call.
-//
-// The following assumes cdecl calling convention.
-// Source: https://en.wikipedia.org/wiki/X86_calling_conventions#cdecl
-asm(
-#ifdef _WIN32
- ".globl _PushAllRegistersAndIterateStack \n"
- "_PushAllRegistersAndIterateStack: \n"
-#else // !_WIN32
- ".globl PushAllRegistersAndIterateStack \n"
- ".type PushAllRegistersAndIterateStack, %function \n"
- ".hidden PushAllRegistersAndIterateStack \n"
- "PushAllRegistersAndIterateStack: \n"
-#endif // !_WIN32
- // [ IterateStackCallback ]
- // [ StackVisitor* ]
- // [ Stack* ]
- // [ ret ]
- // ebp is callee-saved. Maintain proper frame pointer for debugging.
- " push %ebp \n"
- " movl %esp, %ebp \n"
- " push %ebx \n"
- " push %esi \n"
- " push %edi \n"
- // Save 3rd parameter (IterateStackCallback).
- " movl 28(%esp), %ecx \n"
- // Pass 3rd parameter as esp (stack pointer).
- " push %esp \n"
- // Pass 2nd parameter (StackVisitor*).
- " push 28(%esp) \n"
- // Pass 1st parameter (Stack*).
- " push 28(%esp) \n"
- " call *%ecx \n"
- // Pop the callee-saved registers.
- " addl $24, %esp \n"
- // Restore rbp as it was used as frame pointer.
- " pop %ebp \n"
- " ret \n");
diff --git a/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S b/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S
deleted file mode 100644
index a35fd6e527d..00000000000
--- a/chromium/v8/src/heap/cppgc/asm/ia32/push_registers_masm.S
+++ /dev/null
@@ -1,48 +0,0 @@
-;; Copyright 2020 the V8 project authors. All rights reserved.
-;; Use of this source code is governed by a BSD-style license that can be
-;; found in the LICENSE file.
-
-;; MASM syntax
-;; https://docs.microsoft.com/en-us/cpp/assembler/masm/microsoft-macro-assembler-reference?view=vs-2019
-
-.model flat, C
-
-public PushAllRegistersAndIterateStack
-
-.code
-PushAllRegistersAndIterateStack:
- ;; Push all callee-saved registers to get them on the stack for conservative
- ;; stack scanning.
- ;;
- ;; We maintain 16-byte alignment at calls. There is an 8-byte return address
- ;; on the stack and we push 72 bytes which maintains 16-byte stack alignment
- ;; at the call.
- ;;
- ;; The following assumes cdecl calling convention.
- ;; Source: https://docs.microsoft.com/en-us/cpp/cpp/cdecl?view=vs-2019
- ;;
- ;; [ IterateStackCallback ]
- ;; [ StackVisitor* ]
- ;; [ Stack* ]
- ;; [ ret ]
- push ebp
- mov ebp, esp
- push ebx
- push esi
- push edi
- ;; Save 3rd parameter (IterateStackCallback).
- mov ecx, [ esp + 28 ]
- ;; Pass 3rd parameter as esp (stack pointer).
- push esp
- ;; Pass 2nd parameter (StackVisitor*).
- push [ esp + 28 ]
- ;; Pass 1st parameter (Stack*).
- push [ esp + 28 ]
- call ecx
- ;; Pop the callee-saved registers.
- add esp, 24
- ;; Restore rbp as it was used as frame pointer.
- pop ebp
- ret
-
-end
diff --git a/chromium/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc b/chromium/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc
deleted file mode 100644
index 4a46caa6c52..00000000000
--- a/chromium/v8/src/heap/cppgc/asm/mips/push_registers_asm.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Push all callee-saved registers to get them on the stack for conservative
-// stack scanning.
-//
-// See asm/x64/push_registers_clang.cc for why the function is not generated
-// using clang.
-//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-asm(".set noreorder \n"
- ".global PushAllRegistersAndIterateStack \n"
- ".type PushAllRegistersAndIterateStack, %function \n"
- ".hidden PushAllRegistersAndIterateStack \n"
- "PushAllRegistersAndIterateStack: \n"
- // Push all callee-saved registers and save return address.
- " addiu $sp, $sp, -48 \n"
- " sw $ra, 44($sp) \n"
- " sw $s8, 40($sp) \n"
- " sw $sp, 36($sp) \n"
- " sw $gp, 32($sp) \n"
- " sw $s7, 28($sp) \n"
- " sw $s6, 24($sp) \n"
- " sw $s5, 20($sp) \n"
- " sw $s4, 16($sp) \n"
- " sw $s3, 12($sp) \n"
- " sw $s2, 8($sp) \n"
- " sw $s1, 4($sp) \n"
- " sw $s0, 0($sp) \n"
- // Maintain frame pointer.
- " move $s8, $sp \n"
- // Pass 1st parameter (a0) unchanged (Stack*).
- // Pass 2nd parameter (a1) unchanged (StackVisitor*).
- // Save 3rd parameter (a2; IterateStackCallback).
- " move $a3, $a2 \n"
- // Call the callback.
- " jalr $a3 \n"
- // Delay slot: Pass 3rd parameter as sp (stack pointer).
- " move $a2, $sp \n"
- // Load return address.
- " lw $ra, 44($sp) \n"
- // Restore frame pointer.
- " lw $s8, 40($sp) \n"
- " jr $ra \n"
- // Delay slot: Discard all callee-saved registers.
- " addiu $sp, $sp, 48 \n");
diff --git a/chromium/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc b/chromium/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc
deleted file mode 100644
index 6befa3bcc0c..00000000000
--- a/chromium/v8/src/heap/cppgc/asm/mips64/push_registers_asm.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Push all callee-saved registers to get them on the stack for conservative
-// stack scanning.
-//
-// See asm/x64/push_registers_clang.cc for why the function is not generated
-// using clang.
-//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-asm(".set noreorder \n"
- ".global PushAllRegistersAndIterateStack \n"
- ".type PushAllRegistersAndIterateStack, %function \n"
- ".hidden PushAllRegistersAndIterateStack \n"
- "PushAllRegistersAndIterateStack: \n"
- // Push all callee-saved registers and save return address.
- " daddiu $sp, $sp, -96 \n"
- " sd $ra, 88($sp) \n"
- " sd $s8, 80($sp) \n"
- " sd $sp, 72($sp) \n"
- " sd $gp, 64($sp) \n"
- " sd $s7, 56($sp) \n"
- " sd $s6, 48($sp) \n"
- " sd $s5, 40($sp) \n"
- " sd $s4, 32($sp) \n"
- " sd $s3, 24($sp) \n"
- " sd $s2, 16($sp) \n"
- " sd $s1, 8($sp) \n"
- " sd $s0, 0($sp) \n"
- // Maintain frame pointer.
- " move $s8, $sp \n"
- // Pass 1st parameter (a0) unchanged (Stack*).
- // Pass 2nd parameter (a1) unchanged (StackVisitor*).
- // Save 3rd parameter (a2; IterateStackCallback).
- " move $a3, $a2 \n"
- // Call the callback.
- " jalr $a3 \n"
- // Delay slot: Pass 3rd parameter as sp (stack pointer).
- " move $a2, $sp \n"
- // Load return address.
- " ld $ra, 88($sp) \n"
- // Restore frame pointer.
- " ld $s8, 80($sp) \n"
- " jr $ra \n"
- // Delay slot: Discard all callee-saved registers.
- " daddiu $sp, $sp, 96 \n");
diff --git a/chromium/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc b/chromium/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc
deleted file mode 100644
index 6936819ba2b..00000000000
--- a/chromium/v8/src/heap/cppgc/asm/ppc/push_registers_asm.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Push all callee-saved registers to get them on the stack for conservative
-// stack scanning.
-//
-// See asm/x64/push_registers_clang.cc for why the function is not generated
-// using clang.
-
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-
-// PPC ABI source:
-// http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi.html
-
-// AIX Runtime process stack:
-// https://www.ibm.com/support/knowledgecenter/ssw_aix_71/assembler/idalangref_runtime_process.html
-asm(
-#if defined(_AIX)
- ".globl .PushAllRegistersAndIterateStack, hidden \n"
- ".csect .text[PR] \n"
- ".PushAllRegistersAndIterateStack: \n"
-#else
- ".globl PushAllRegistersAndIterateStack \n"
- ".type PushAllRegistersAndIterateStack, %function \n"
- ".hidden PushAllRegistersAndIterateStack \n"
- "PushAllRegistersAndIterateStack: \n"
-#endif
- // Push all callee-saved registers.
- // lr, TOC pointer, r16 to r31. 160 bytes.
- // The parameter save area shall be allocated by the caller. 112 btes.
- // At anytime, SP (r1) needs to be multiple of 16 (i.e. 16-aligned).
- " mflr 0 \n"
- " std 0, 16(1) \n"
-#if defined(_AIX)
- " std 2, 40(1) \n"
-#else
- " std 2, 24(1) \n"
-#endif
- " stdu 1, -256(1) \n"
- " std 14, 112(1) \n"
- " std 15, 120(1) \n"
- " std 16, 128(1) \n"
- " std 17, 136(1) \n"
- " std 18, 144(1) \n"
- " std 19, 152(1) \n"
- " std 20, 160(1) \n"
- " std 21, 168(1) \n"
- " std 22, 176(1) \n"
- " std 23, 184(1) \n"
- " std 24, 192(1) \n"
- " std 25, 200(1) \n"
- " std 26, 208(1) \n"
- " std 27, 216(1) \n"
- " std 28, 224(1) \n"
- " std 29, 232(1) \n"
- " std 30, 240(1) \n"
- " std 31, 248(1) \n"
- // Pass 1st parameter (r3) unchanged (Stack*).
- // Pass 2nd parameter (r4) unchanged (StackVisitor*).
- // Save 3rd parameter (r5; IterateStackCallback).
- " mr 6, 5 \n"
-#if defined(_AIX)
- // Set up TOC for callee.
- " ld 2,8(5) \n"
- // AIX uses function decorators, which means that
- // pointers to functions do not point to code, but
- // instead point to metadata about them, hence
- // need to deterrence.
- " ld 6,0(6) \n"
-#endif
- // Pass 3rd parameter as sp (stack pointer).
- " mr 5, 1 \n"
-#if !defined(_AIX)
- // Set up r12 to be equal to the callee address (in order for TOC
- // relocation). Only needed on LE Linux.
- " mr 12, 6 \n"
-#endif
- // Call the callback.
- " mtctr 6 \n"
- " bctrl \n"
- // Discard all the registers.
- " addi 1, 1, 256 \n"
- // Restore lr.
- " ld 0, 16(1) \n"
- " mtlr 0 \n"
-#if defined(_AIX)
- // Restore TOC pointer.
- " ld 2, 40(1) \n"
-#else
- " ld 2, 24(1) \n"
-#endif
- " blr \n");
diff --git a/chromium/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc b/chromium/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc
deleted file mode 100644
index 6b9b2c08536..00000000000
--- a/chromium/v8/src/heap/cppgc/asm/s390/push_registers_asm.cc
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Push all callee-saved registers to get them on the stack for conservative
-// stack scanning.
-
-// See asm/x64/push_registers_clang.cc for why the function is not generated
-// using clang.
-
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-
-// S390 ABI source:
-// http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_zSeries.html
-asm(".globl PushAllRegistersAndIterateStack \n"
- ".type PushAllRegistersAndIterateStack, %function \n"
- ".hidden PushAllRegistersAndIterateStack \n"
- "PushAllRegistersAndIterateStack: \n"
- // Push all callee-saved registers.
- // r6-r13, r14 and sp(r15)
- " stmg %r6, %sp, 48(%sp) \n"
- // Allocate frame.
- " lay %sp, -160(%sp) \n"
- // Pass 1st parameter (r2) unchanged (Stack*).
- // Pass 2nd parameter (r3) unchanged (StackVisitor*).
- // Save 3rd parameter (r4; IterateStackCallback).
- " lgr %r5, %r4 \n"
- // Pass sp as 3rd parameter. 160+48 to point
- // to callee saved region stored above.
- " lay %r4, 208(%sp) \n"
- // Call the callback.
- " basr %r14, %r5 \n"
- " lmg %r14,%sp, 272(%sp) \n"
- " br %r14 \n");
diff --git a/chromium/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc b/chromium/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc
deleted file mode 100644
index 68f7918c93c..00000000000
--- a/chromium/v8/src/heap/cppgc/asm/x64/push_registers_asm.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Push all callee-saved registers to get them on the stack for conservative
-// stack scanning.
-//
-// We cannot rely on clang generating the function and right symbol mangling
-// as `__attribite__((naked))` does not prevent clang from generating TSAN
-// function entry stubs (`__tsan_func_entry`). Even with
-// `__attribute__((no_sanitize_thread)` annotation clang generates the entry
-// stub.
-// See https://bugs.llvm.org/show_bug.cgi?id=45400.
-
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
-// _WIN64 Defined as 1 when the compilation target is 64-bit ARM or x64.
-// Otherwise, undefined.
-#ifdef _WIN64
-
-// We maintain 16-byte alignment at calls. There is an 8-byte return address
-// on the stack and we push 72 bytes which maintains 16-byte stack alignment
-// at the call.
-// Source: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
-asm(".globl PushAllRegistersAndIterateStack \n"
- "PushAllRegistersAndIterateStack: \n"
- // rbp is callee-saved. Maintain proper frame pointer for debugging.
- " push %rbp \n"
- " mov %rsp, %rbp \n"
- // Dummy for alignment.
- " push $0xCDCDCD \n"
- " push %rsi \n"
- " push %rdi \n"
- " push %rbx \n"
- " push %r12 \n"
- " push %r13 \n"
- " push %r14 \n"
- " push %r15 \n"
- // Pass 1st parameter (rcx) unchanged (Stack*).
- // Pass 2nd parameter (rdx) unchanged (StackVisitor*).
- // Save 3rd parameter (r8; IterateStackCallback)
- " mov %r8, %r9 \n"
- // Pass 3rd parameter as rsp (stack pointer).
- " mov %rsp, %r8 \n"
- // Call the callback.
- " call *%r9 \n"
- // Pop the callee-saved registers.
- " add $64, %rsp \n"
- // Restore rbp as it was used as frame pointer.
- " pop %rbp \n"
- " ret \n");
-
-#else // !_WIN64
-
-// We maintain 16-byte alignment at calls. There is an 8-byte return address
-// on the stack and we push 56 bytes which maintains 16-byte stack alignment
-// at the call.
-// Source: https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf
-asm(
-#ifdef __APPLE__
- ".globl _PushAllRegistersAndIterateStack \n"
- ".private_extern _PushAllRegistersAndIterateStack \n"
- "_PushAllRegistersAndIterateStack: \n"
-#else // !__APPLE__
- ".globl PushAllRegistersAndIterateStack \n"
- ".type PushAllRegistersAndIterateStack, %function \n"
- ".hidden PushAllRegistersAndIterateStack \n"
- "PushAllRegistersAndIterateStack: \n"
-#endif // !__APPLE__
- // rbp is callee-saved. Maintain proper frame pointer for debugging.
- " push %rbp \n"
- " mov %rsp, %rbp \n"
- // Dummy for alignment.
- " push $0xCDCDCD \n"
- " push %rbx \n"
- " push %r12 \n"
- " push %r13 \n"
- " push %r14 \n"
- " push %r15 \n"
- // Pass 1st parameter (rdi) unchanged (Stack*).
- // Pass 2nd parameter (rsi) unchanged (StackVisitor*).
- // Save 3rd parameter (rdx; IterateStackCallback)
- " mov %rdx, %r8 \n"
- // Pass 3rd parameter as rsp (stack pointer).
- " mov %rsp, %rdx \n"
- // Call the callback.
- " call *%r8 \n"
- // Pop the callee-saved registers.
- " add $48, %rsp \n"
- // Restore rbp as it was used as frame pointer.
- " pop %rbp \n"
- " ret \n");
-
-#endif // !_WIN64
diff --git a/chromium/v8/src/heap/cppgc/asm/x64/push_registers_masm.S b/chromium/v8/src/heap/cppgc/asm/x64/push_registers_masm.S
deleted file mode 100644
index 627843830fa..00000000000
--- a/chromium/v8/src/heap/cppgc/asm/x64/push_registers_masm.S
+++ /dev/null
@@ -1,45 +0,0 @@
-;; Copyright 2020 the V8 project authors. All rights reserved.
-;; Use of this source code is governed by a BSD-style license that can be
-;; found in the LICENSE file.
-
-;; MASM syntax
-;; https://docs.microsoft.com/en-us/cpp/assembler/masm/microsoft-macro-assembler-reference?view=vs-2019
-
-public PushAllRegistersAndIterateStack
-
-.code
-PushAllRegistersAndIterateStack:
- ;; Push all callee-saved registers to get them on the stack for conservative
- ;; stack scanning.
- ;;
- ;; We maintain 16-byte alignment at calls. There is an 8-byte return address
- ;; on the stack and we push 72 bytes which maintains 16-byte stack alignment
- ;; at the call.
- ;; Source: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
- ;;
- ;; rbp is callee-saved. Maintain proper frame pointer for debugging.
- push rbp
- mov rbp, rsp
- push 0CDCDCDh ;; Dummy for alignment.
- push rsi
- push rdi
- push rbx
- push r12
- push r13
- push r14
- push r15
- ;; Pass 1st parameter (rcx) unchanged (Stack*).
- ;; Pass 2nd parameter (rdx) unchanged (StackVisitor*).
- ;; Save 3rd parameter (r8; IterateStackCallback)
- mov r9, r8
- ;; Pass 3rd parameter as rsp (stack pointer).
- mov r8, rsp
- ;; Call the callback.
- call r9
- ;; Pop the callee-saved registers.
- add rsp, 64
- ;; Restore rbp as it was used as frame pointer.
- pop rbp
- ret
-
-end
diff --git a/chromium/v8/src/heap/cppgc/caged-heap-local-data.cc b/chromium/v8/src/heap/cppgc/caged-heap-local-data.cc
new file mode 100644
index 00000000000..55ededdc087
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/caged-heap-local-data.cc
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/caged-heap-local-data.h"
+
+#include <algorithm>
+#include <type_traits>
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+#if defined(CPPGC_YOUNG_GENERATION)
+
+static_assert(
+ std::is_trivially_default_constructible<AgeTable>::value,
+ "To support lazy committing, AgeTable must be trivially constructible");
+
+void AgeTable::Reset(PageAllocator* allocator) {
+ // TODO(chromium:1029379): Consider MADV_DONTNEED instead of MADV_FREE on
+ // POSIX platforms.
+ std::fill(table_.begin(), table_.end(), Age::kOld);
+ const uintptr_t begin = RoundUp(reinterpret_cast<uintptr_t>(table_.begin()),
+ allocator->CommitPageSize());
+ const uintptr_t end = RoundDown(reinterpret_cast<uintptr_t>(table_.end()),
+ allocator->CommitPageSize());
+ allocator->DiscardSystemPages(reinterpret_cast<void*>(begin), end - begin);
+}
+
+#endif
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/caged-heap.cc b/chromium/v8/src/heap/cppgc/caged-heap.cc
new file mode 100644
index 00000000000..16cb30aa281
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/caged-heap.cc
@@ -0,0 +1,85 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !defined(CPPGC_CAGED_HEAP)
+#error "Must be compiled with caged heap enabled"
+#endif
+
+#include "src/heap/cppgc/caged-heap.h"
+
+#include "include/cppgc/internal/caged-heap-local-data.h"
+#include "src/base/bounded-page-allocator.h"
+#include "src/base/logging.h"
+#include "src/heap/cppgc/globals.h"
+
+namespace cppgc {
+namespace internal {
+
+STATIC_ASSERT(api_constants::kCagedHeapReservationSize ==
+ kCagedHeapReservationSize);
+STATIC_ASSERT(api_constants::kCagedHeapReservationAlignment ==
+ kCagedHeapReservationAlignment);
+
+namespace {
+
+VirtualMemory ReserveCagedHeap(PageAllocator* platform_allocator) {
+ DCHECK_NOT_NULL(platform_allocator);
+ DCHECK_EQ(0u,
+ kCagedHeapReservationSize % platform_allocator->AllocatePageSize());
+
+ static constexpr size_t kAllocationTries = 4;
+ for (size_t i = 0; i < kAllocationTries; ++i) {
+ void* hint = reinterpret_cast<void*>(RoundDown(
+ reinterpret_cast<uintptr_t>(platform_allocator->GetRandomMmapAddr()),
+ kCagedHeapReservationAlignment));
+
+ VirtualMemory memory(platform_allocator, kCagedHeapReservationSize,
+ kCagedHeapReservationAlignment, hint);
+ if (memory.IsReserved()) return memory;
+ }
+
+ FATAL("Fatal process out of memory: Failed to reserve memory for caged heap");
+ UNREACHABLE();
+}
+
+std::unique_ptr<CagedHeap::AllocatorType> CreateBoundedAllocator(
+ v8::PageAllocator* platform_allocator, void* caged_heap_start) {
+ DCHECK(caged_heap_start);
+
+ auto start =
+ reinterpret_cast<CagedHeap::AllocatorType::Address>(caged_heap_start);
+
+ return std::make_unique<CagedHeap::AllocatorType>(
+ platform_allocator, start, kCagedHeapReservationSize, kPageSize);
+}
+
+} // namespace
+
+CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
+ : reserved_area_(ReserveCagedHeap(platform_allocator)) {
+ DCHECK_NOT_NULL(heap_base);
+
+ void* caged_heap_start = reserved_area_.address();
+ CHECK(platform_allocator->SetPermissions(
+ reserved_area_.address(),
+ RoundUp(sizeof(CagedHeapLocalData), platform_allocator->CommitPageSize()),
+ PageAllocator::kReadWrite));
+
+ auto* local_data =
+ new (reserved_area_.address()) CagedHeapLocalData(heap_base);
+#if defined(CPPGC_YOUNG_GENERATION)
+ local_data->age_table.Reset(platform_allocator);
+#endif
+ USE(local_data);
+
+ caged_heap_start = reinterpret_cast<void*>(
+ RoundUp(reinterpret_cast<uintptr_t>(caged_heap_start) +
+ sizeof(CagedHeapLocalData),
+ kPageSize));
+ bounded_allocator_ =
+ CreateBoundedAllocator(platform_allocator, caged_heap_start);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/caged-heap.h b/chromium/v8/src/heap/cppgc/caged-heap.h
new file mode 100644
index 00000000000..7ac34624a0a
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/caged-heap.h
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_CAGED_HEAP_H_
+#define V8_HEAP_CPPGC_CAGED_HEAP_H_
+
+#include <memory>
+
+#include "include/cppgc/platform.h"
+#include "src/base/bounded-page-allocator.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/virtual-memory.h"
+
+namespace cppgc {
+namespace internal {
+
+struct CagedHeapLocalData;
+class HeapBase;
+
+class CagedHeap final {
+ public:
+ using AllocatorType = v8::base::BoundedPageAllocator;
+
+ CagedHeap(HeapBase* heap, PageAllocator* platform_allocator);
+
+ CagedHeap(const CagedHeap&) = delete;
+ CagedHeap& operator=(const CagedHeap&) = delete;
+
+ AllocatorType& allocator() { return *bounded_allocator_; }
+ const AllocatorType& allocator() const { return *bounded_allocator_; }
+
+ CagedHeapLocalData& local_data() {
+ return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
+ }
+ const CagedHeapLocalData& local_data() const {
+ return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
+ }
+
+ static uintptr_t OffsetFromAddress(void* address) {
+ return reinterpret_cast<uintptr_t>(address) &
+ (kCagedHeapReservationAlignment - 1);
+ }
+
+ private:
+ VirtualMemory reserved_area_;
+ std::unique_ptr<AllocatorType> bounded_allocator_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_CAGED_HEAP_H_
diff --git a/chromium/v8/src/heap/cppgc/free-list.cc b/chromium/v8/src/heap/cppgc/free-list.cc
index e5e6b70793d..8f649059323 100644
--- a/chromium/v8/src/heap/cppgc/free-list.cc
+++ b/chromium/v8/src/heap/cppgc/free-list.cc
@@ -68,12 +68,17 @@ void FreeList::Add(FreeList::Block block) {
if (block.size < sizeof(Entry)) {
// Create wasted entry. This can happen when an almost emptied linear
// allocation buffer is returned to the freelist.
+ // This could be SET_MEMORY_ACCESSIBLE. Since there's no payload, the next
+ // operating overwrites the memory completely, and we can thus avoid
+ // zeroing it out.
+ ASAN_UNPOISON_MEMORY_REGION(block.address, sizeof(HeapObjectHeader));
new (block.address) HeapObjectHeader(size, kFreeListGCInfoIndex);
return;
}
- // Make sure the freelist header is writable.
- SET_MEMORY_ACCESIBLE(block.address, sizeof(Entry));
+ // Make sure the freelist header is writable. SET_MEMORY_ACCESSIBLE is not
+ // needed as we write the whole payload of Entry.
+ ASAN_UNPOISON_MEMORY_REGION(block.address, sizeof(Entry));
Entry* entry = new (block.address) Entry(size);
const size_t index = BucketIndexForSize(static_cast<uint32_t>(size));
entry->Link(&free_list_heads_[index]);
diff --git a/chromium/v8/src/heap/cppgc/garbage-collector.h b/chromium/v8/src/heap/cppgc/garbage-collector.h
new file mode 100644
index 00000000000..6c906fd501a
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/garbage-collector.h
@@ -0,0 +1,56 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
+#define V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
+
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/sweeper.h"
+
+namespace cppgc {
+namespace internal {
+
+// GC interface that allows abstraction over the actual GC invocation. This is
+// needed to mock/fake GC for testing.
+class GarbageCollector {
+ public:
+ struct Config {
+ using CollectionType = Marker::MarkingConfig::CollectionType;
+ using StackState = cppgc::Heap::StackState;
+ using MarkingType = Marker::MarkingConfig::MarkingType;
+ using SweepingType = Sweeper::Config;
+
+ static constexpr Config ConservativeAtomicConfig() {
+ return {CollectionType::kMajor, StackState::kMayContainHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ static constexpr Config PreciseAtomicConfig() {
+ return {CollectionType::kMajor, StackState::kNoHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ static constexpr Config MinorPreciseAtomicConfig() {
+ return {CollectionType::kMinor, StackState::kNoHeapPointers,
+ MarkingType::kAtomic, SweepingType::kAtomic};
+ }
+
+ CollectionType collection_type = CollectionType::kMajor;
+ StackState stack_state = StackState::kMayContainHeapPointers;
+ MarkingType marking_type = MarkingType::kAtomic;
+ SweepingType sweeping_type = SweepingType::kAtomic;
+ };
+
+ // Executes a garbage collection specified in config.
+ virtual void CollectGarbage(Config config) = 0;
+
+ // The current epoch that the GC maintains. The epoch is increased on every
+ // GC invocation.
+ virtual size_t epoch() const = 0;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_GARBAGE_COLLECTOR_H_
diff --git a/chromium/v8/src/heap/cppgc/gc-info-table.cc b/chromium/v8/src/heap/cppgc/gc-info-table.cc
index dda5f0a7e83..8f2ee965011 100644
--- a/chromium/v8/src/heap/cppgc/gc-info-table.cc
+++ b/chromium/v8/src/heap/cppgc/gc-info-table.cc
@@ -18,6 +18,11 @@ namespace internal {
namespace {
+// GCInfoTable::table_, the table which holds GCInfos, is maintained as a
+// contiguous array reserved upfront. Subparts of the array are (re-)committed
+// as read/write or read-only in OS pages, whose size is a power of 2. To avoid
+// having GCInfos that cross the boundaries between these subparts we force the
+// size of GCInfo to be a power of 2 as well.
constexpr size_t kEntrySize = sizeof(GCInfo);
static_assert(v8::base::bits::IsPowerOfTwo(kEntrySize),
"GCInfoTable entries size must be power of "
diff --git a/chromium/v8/src/heap/cppgc/gc-info-table.h b/chromium/v8/src/heap/cppgc/gc-info-table.h
index 25141f5d1cc..749f30b258c 100644
--- a/chromium/v8/src/heap/cppgc/gc-info-table.h
+++ b/chromium/v8/src/heap/cppgc/gc-info-table.h
@@ -22,7 +22,10 @@ namespace internal {
// inherit from GarbageCollected.
struct GCInfo final {
FinalizationCallback finalize;
+ TraceCallback trace;
bool has_v_table;
+ // Keep sizeof(GCInfo) a power of 2.
+ size_t padding = 0;
};
class V8_EXPORT GCInfoTable final {
diff --git a/chromium/v8/src/heap/cppgc/gc-info.cc b/chromium/v8/src/heap/cppgc/gc-info.cc
index 007eab3a338..70970139b17 100644
--- a/chromium/v8/src/heap/cppgc/gc-info.cc
+++ b/chromium/v8/src/heap/cppgc/gc-info.cc
@@ -10,9 +10,10 @@ namespace cppgc {
namespace internal {
RegisteredGCInfoIndex::RegisteredGCInfoIndex(
- FinalizationCallback finalization_callback, bool has_v_table)
+ FinalizationCallback finalization_callback, TraceCallback trace_callback,
+ bool has_v_table)
: index_(GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
- {finalization_callback, has_v_table})) {}
+ {finalization_callback, trace_callback, has_v_table})) {}
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/gc-invoker.cc b/chromium/v8/src/heap/cppgc/gc-invoker.cc
new file mode 100644
index 00000000000..a1212d80523
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/gc-invoker.cc
@@ -0,0 +1,105 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/gc-invoker.h"
+
+#include <memory>
+
+#include "include/cppgc/platform.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/task-handle.h"
+
+namespace cppgc {
+namespace internal {
+
+class GCInvoker::GCInvokerImpl final : public GarbageCollector {
+ public:
+ GCInvokerImpl(GarbageCollector*, cppgc::Platform*, cppgc::Heap::StackSupport);
+ ~GCInvokerImpl();
+
+ GCInvokerImpl(const GCInvokerImpl&) = delete;
+ GCInvokerImpl& operator=(const GCInvokerImpl&) = delete;
+
+ void CollectGarbage(GarbageCollector::Config) final;
+ size_t epoch() const final { return collector_->epoch(); }
+
+ private:
+ class GCTask final : public cppgc::Task {
+ public:
+ using Handle = SingleThreadedHandle;
+
+ static Handle Post(GarbageCollector* collector, cppgc::TaskRunner* runner) {
+ auto task = std::make_unique<GCInvoker::GCInvokerImpl::GCTask>(collector);
+ auto handle = task->GetHandle();
+ runner->PostNonNestableTask(std::move(task));
+ return handle;
+ }
+
+ explicit GCTask(GarbageCollector* collector)
+ : collector_(collector), saved_epoch_(collector->epoch()) {}
+
+ private:
+ void Run() final {
+ if (handle_.IsCanceled() || (collector_->epoch() != saved_epoch_)) return;
+
+ collector_->CollectGarbage(
+ GarbageCollector::Config::PreciseAtomicConfig());
+ handle_.Cancel();
+ }
+
+ Handle GetHandle() { return handle_; }
+
+ GarbageCollector* collector_;
+ Handle handle_;
+ size_t saved_epoch_;
+ };
+
+ GarbageCollector* collector_;
+ cppgc::Platform* platform_;
+ cppgc::Heap::StackSupport stack_support_;
+ GCTask::Handle gc_task_handle_;
+};
+
+GCInvoker::GCInvokerImpl::GCInvokerImpl(GarbageCollector* collector,
+ cppgc::Platform* platform,
+ cppgc::Heap::StackSupport stack_support)
+ : collector_(collector),
+ platform_(platform),
+ stack_support_(stack_support) {}
+
+GCInvoker::GCInvokerImpl::~GCInvokerImpl() {
+ if (gc_task_handle_) {
+ gc_task_handle_.Cancel();
+ }
+}
+
+void GCInvoker::GCInvokerImpl::CollectGarbage(GarbageCollector::Config config) {
+ if ((config.stack_state ==
+ GarbageCollector::Config::StackState::kNoHeapPointers) ||
+ (stack_support_ ==
+ cppgc::Heap::StackSupport::kSupportsConservativeStackScan)) {
+ collector_->CollectGarbage(config);
+ } else if (platform_->GetForegroundTaskRunner()->NonNestableTasksEnabled()) {
+ if (!gc_task_handle_) {
+ gc_task_handle_ =
+ GCTask::Post(collector_, platform_->GetForegroundTaskRunner().get());
+ }
+ }
+}
+
+GCInvoker::GCInvoker(GarbageCollector* collector, cppgc::Platform* platform,
+ cppgc::Heap::StackSupport stack_support)
+ : impl_(std::make_unique<GCInvoker::GCInvokerImpl>(collector, platform,
+ stack_support)) {}
+
+GCInvoker::~GCInvoker() = default;
+
+void GCInvoker::CollectGarbage(GarbageCollector::Config config) {
+ impl_->CollectGarbage(config);
+}
+
+size_t GCInvoker::epoch() const { return impl_->epoch(); }
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/gc-invoker.h b/chromium/v8/src/heap/cppgc/gc-invoker.h
new file mode 100644
index 00000000000..a9e3369b3e9
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/gc-invoker.h
@@ -0,0 +1,47 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_GC_INVOKER_H_
+#define V8_HEAP_CPPGC_GC_INVOKER_H_
+
+#include "include/cppgc/heap.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/garbage-collector.h"
+
+namespace cppgc {
+
+class Platform;
+
+namespace internal {
+
+// GC invoker that dispatches GC depending on StackSupport and StackState:
+// 1. If StackState specifies no stack scan needed the GC is invoked
+// synchronously.
+// 2. If StackState specifies conservative GC and StackSupport prohibits stack
+// scanning: Delay GC until it can be invoked without accessing the stack.
+// To do so, a precise GC without stack scan is scheduled using the platform
+// if non-nestable tasks are supported, and otherwise no operation is carried
+// out. This means that the heuristics allows to arbitrary go over the limit
+// in case non-nestable tasks are not supported and only conservative GCs are
+// requested.
+class V8_EXPORT_PRIVATE GCInvoker final : public GarbageCollector {
+ public:
+ GCInvoker(GarbageCollector*, cppgc::Platform*, cppgc::Heap::StackSupport);
+ ~GCInvoker();
+
+ GCInvoker(const GCInvoker&) = delete;
+ GCInvoker& operator=(const GCInvoker&) = delete;
+
+ void CollectGarbage(GarbageCollector::Config) final;
+ size_t epoch() const final;
+
+ private:
+ class GCInvokerImpl;
+ std::unique_ptr<GCInvokerImpl> impl_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_GC_INVOKER_H_
diff --git a/chromium/v8/src/heap/cppgc/globals.h b/chromium/v8/src/heap/cppgc/globals.h
index 734abd508ef..d286a7fa428 100644
--- a/chromium/v8/src/heap/cppgc/globals.h
+++ b/chromium/v8/src/heap/cppgc/globals.h
@@ -16,6 +16,10 @@ namespace internal {
using Address = uint8_t*;
using ConstAddress = const uint8_t*;
+constexpr size_t kKB = 1024;
+constexpr size_t kMB = kKB * 1024;
+constexpr size_t kGB = kMB * 1024;
+
// See 6.7.6 (http://eel.is/c++draft/basic.align) for alignment restrictions. We
// do not fully support all alignment restrictions (following
// alignof(std​::​max_­align_­t)) but limit to alignof(double).
@@ -42,6 +46,9 @@ constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
constexpr GCInfoIndex kFreeListGCInfoIndex = 0;
constexpr size_t kFreeListEntrySize = 2 * sizeof(uintptr_t);
+constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
+constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
+
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/heap-base.cc b/chromium/v8/src/heap/cppgc/heap-base.cc
new file mode 100644
index 00000000000..7963df0af3f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-base.cc
@@ -0,0 +1,88 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-base.h"
+
+#include "src/base/bounded-page-allocator.h"
+#include "src/base/platform/platform.h"
+#include "src/heap/base/stack.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-page-inl.h"
+#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
+#include "src/heap/cppgc/stats-collector.h"
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
+ friend class HeapVisitor<ObjectSizeCounter>;
+
+ public:
+ size_t GetSize(RawHeap* heap) {
+ Traverse(heap);
+ return accumulated_size_;
+ }
+
+ private:
+ static size_t ObjectSize(const HeapObjectHeader* header) {
+ const size_t size =
+ header->IsLargeObject()
+ ? static_cast<const LargePage*>(BasePage::FromPayload(header))
+ ->PayloadSize()
+ : header->GetSize();
+ DCHECK_GE(size, sizeof(HeapObjectHeader));
+ return size - sizeof(HeapObjectHeader);
+ }
+
+ bool VisitHeapObjectHeader(HeapObjectHeader* header) {
+ if (header->IsFree()) return true;
+ accumulated_size_ += ObjectSize(header);
+ return true;
+ }
+
+ size_t accumulated_size_ = 0;
+};
+
+} // namespace
+
+HeapBase::HeapBase(std::shared_ptr<cppgc::Platform> platform,
+ size_t custom_spaces)
+ : raw_heap_(this, custom_spaces),
+ platform_(std::move(platform)),
+#if defined(CPPGC_CAGED_HEAP)
+ caged_heap_(this, platform_->GetPageAllocator()),
+ page_backend_(std::make_unique<PageBackend>(&caged_heap_.allocator())),
+#else
+ page_backend_(
+ std::make_unique<PageBackend>(platform_->GetPageAllocator())),
+#endif
+ stats_collector_(std::make_unique<StatsCollector>()),
+ stack_(std::make_unique<heap::base::Stack>(
+ v8::base::Stack::GetStackStart())),
+ prefinalizer_handler_(std::make_unique<PreFinalizerHandler>()),
+ object_allocator_(&raw_heap_, page_backend_.get(),
+ stats_collector_.get()),
+ sweeper_(&raw_heap_, platform_.get(), stats_collector_.get()) {
+}
+
+HeapBase::~HeapBase() = default;
+
+size_t HeapBase::ObjectPayloadSize() const {
+ return ObjectSizeCounter().GetSize(const_cast<RawHeap*>(&raw_heap()));
+}
+
+HeapBase::NoGCScope::NoGCScope(HeapBase& heap) : heap_(heap) {
+ heap_.no_gc_scope_++;
+}
+
+HeapBase::NoGCScope::~NoGCScope() { heap_.no_gc_scope_--; }
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/heap-base.h b/chromium/v8/src/heap/cppgc/heap-base.h
new file mode 100644
index 00000000000..cc61ed32fc8
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-base.h
@@ -0,0 +1,151 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_BASE_H_
+#define V8_HEAP_CPPGC_HEAP_BASE_H_
+
+#include <memory>
+#include <set>
+
+#include "include/cppgc/internal/persistent-node.h"
+#include "include/cppgc/macros.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/raw-heap.h"
+#include "src/heap/cppgc/sweeper.h"
+
+#if defined(CPPGC_CAGED_HEAP)
+#include "src/heap/cppgc/caged-heap.h"
+#endif
+
+namespace heap {
+namespace base {
+class Stack;
+} // namespace base
+} // namespace heap
+
+namespace cppgc {
+
+class Platform;
+
+namespace internal {
+
+namespace testing {
+class TestWithHeap;
+}
+
+class Marker;
+class PageBackend;
+class PreFinalizerHandler;
+class StatsCollector;
+
+// Base class for heap implementations.
+class V8_EXPORT_PRIVATE HeapBase {
+ public:
+ // NoGCScope allows going over limits and avoids triggering garbage
+ // collection triggered through allocations or even explicitly.
+ class V8_EXPORT_PRIVATE NoGCScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ explicit NoGCScope(HeapBase& heap);
+ ~NoGCScope();
+
+ NoGCScope(const NoGCScope&) = delete;
+ NoGCScope& operator=(const NoGCScope&) = delete;
+
+ private:
+ HeapBase& heap_;
+ };
+
+ HeapBase(std::shared_ptr<cppgc::Platform> platform, size_t custom_spaces);
+ virtual ~HeapBase();
+
+ HeapBase(const HeapBase&) = delete;
+ HeapBase& operator=(const HeapBase&) = delete;
+
+ RawHeap& raw_heap() { return raw_heap_; }
+ const RawHeap& raw_heap() const { return raw_heap_; }
+
+ cppgc::Platform* platform() { return platform_.get(); }
+ const cppgc::Platform* platform() const { return platform_.get(); }
+
+ PageBackend* page_backend() { return page_backend_.get(); }
+ const PageBackend* page_backend() const { return page_backend_.get(); }
+
+ StatsCollector* stats_collector() { return stats_collector_.get(); }
+ const StatsCollector* stats_collector() const {
+ return stats_collector_.get();
+ }
+
+#if defined(CPPGC_CAGED_HEAP)
+ CagedHeap& caged_heap() { return caged_heap_; }
+ const CagedHeap& caged_heap() const { return caged_heap_; }
+#endif
+
+ heap::base::Stack* stack() { return stack_.get(); }
+
+ PreFinalizerHandler* prefinalizer_handler() {
+ return prefinalizer_handler_.get();
+ }
+
+ Marker* marker() const { return marker_.get(); }
+
+ ObjectAllocator& object_allocator() { return object_allocator_; }
+
+ Sweeper& sweeper() { return sweeper_; }
+
+ PersistentRegion& GetStrongPersistentRegion() {
+ return strong_persistent_region_;
+ }
+ const PersistentRegion& GetStrongPersistentRegion() const {
+ return strong_persistent_region_;
+ }
+ PersistentRegion& GetWeakPersistentRegion() {
+ return weak_persistent_region_;
+ }
+ const PersistentRegion& GetWeakPersistentRegion() const {
+ return weak_persistent_region_;
+ }
+
+#if defined(CPPGC_YOUNG_GENERATION)
+ std::set<void*>& remembered_slots() { return remembered_slots_; }
+#endif
+
+ size_t ObjectPayloadSize() const;
+
+ protected:
+ bool in_no_gc_scope() const { return no_gc_scope_ > 0; }
+
+ RawHeap raw_heap_;
+ std::shared_ptr<cppgc::Platform> platform_;
+#if defined(CPPGC_CAGED_HEAP)
+ CagedHeap caged_heap_;
+#endif
+ std::unique_ptr<PageBackend> page_backend_;
+
+ std::unique_ptr<StatsCollector> stats_collector_;
+ std::unique_ptr<heap::base::Stack> stack_;
+ std::unique_ptr<PreFinalizerHandler> prefinalizer_handler_;
+ std::unique_ptr<Marker> marker_;
+
+ ObjectAllocator object_allocator_;
+ Sweeper sweeper_;
+
+ PersistentRegion strong_persistent_region_;
+ PersistentRegion weak_persistent_region_;
+
+#if defined(CPPGC_YOUNG_GENERATION)
+ std::set<void*> remembered_slots_;
+#endif
+
+ size_t no_gc_scope_ = 0;
+
+ friend class testing::TestWithHeap;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_BASE_H_
diff --git a/chromium/v8/src/heap/cppgc/heap-growing.cc b/chromium/v8/src/heap/cppgc/heap-growing.cc
new file mode 100644
index 00000000000..751d32b0e6d
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-growing.cc
@@ -0,0 +1,99 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/heap-growing.h"
+
+#include <memory>
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/stats-collector.h"
+#include "src/heap/cppgc/task-handle.h"
+
+namespace cppgc {
+namespace internal {
+
+class HeapGrowing::HeapGrowingImpl final
+ : public StatsCollector::AllocationObserver {
+ public:
+ HeapGrowingImpl(GarbageCollector*, StatsCollector*,
+ cppgc::Heap::ResourceConstraints);
+ ~HeapGrowingImpl();
+
+ HeapGrowingImpl(const HeapGrowingImpl&) = delete;
+ HeapGrowingImpl& operator=(const HeapGrowingImpl&) = delete;
+
+ void AllocatedObjectSizeIncreased(size_t) final;
+ // Only trigger GC on growing.
+ void AllocatedObjectSizeDecreased(size_t) final {}
+ void ResetAllocatedObjectSize(size_t) final;
+
+ size_t limit() const { return limit_; }
+
+ private:
+ void ConfigureLimit(size_t allocated_object_size);
+
+ GarbageCollector* collector_;
+ StatsCollector* stats_collector_;
+ // Allow 1 MB heap by default;
+ size_t initial_heap_size_ = 1 * kMB;
+ size_t limit_ = 0; // See ConfigureLimit().
+
+ SingleThreadedHandle gc_task_handle_;
+};
+
+HeapGrowing::HeapGrowingImpl::HeapGrowingImpl(
+ GarbageCollector* collector, StatsCollector* stats_collector,
+ cppgc::Heap::ResourceConstraints constraints)
+ : collector_(collector),
+ stats_collector_(stats_collector),
+ gc_task_handle_(SingleThreadedHandle::NonEmptyTag{}) {
+ if (constraints.initial_heap_size_bytes > 0) {
+ initial_heap_size_ = constraints.initial_heap_size_bytes;
+ }
+ constexpr size_t kNoAllocatedBytes = 0;
+ ConfigureLimit(kNoAllocatedBytes);
+ stats_collector->RegisterObserver(this);
+}
+
+HeapGrowing::HeapGrowingImpl::~HeapGrowingImpl() {
+ stats_collector_->UnregisterObserver(this);
+}
+
+void HeapGrowing::HeapGrowingImpl::AllocatedObjectSizeIncreased(size_t) {
+ if (stats_collector_->allocated_object_size() > limit_) {
+ collector_->CollectGarbage(
+ GarbageCollector::Config::ConservativeAtomicConfig());
+ }
+}
+
+void HeapGrowing::HeapGrowingImpl::ResetAllocatedObjectSize(
+ size_t allocated_object_size) {
+ ConfigureLimit(allocated_object_size);
+}
+
+void HeapGrowing::HeapGrowingImpl::ConfigureLimit(
+ size_t allocated_object_size) {
+ const size_t size = std::max(allocated_object_size, initial_heap_size_);
+ limit_ = std::max(static_cast<size_t>(size * kGrowingFactor),
+ size + kMinLimitIncrease);
+}
+
+HeapGrowing::HeapGrowing(GarbageCollector* collector,
+ StatsCollector* stats_collector,
+ cppgc::Heap::ResourceConstraints constraints)
+ : impl_(std::make_unique<HeapGrowing::HeapGrowingImpl>(
+ collector, stats_collector, constraints)) {}
+
+HeapGrowing::~HeapGrowing() = default;
+
+size_t HeapGrowing::limit() const { return impl_->limit(); }
+
+// static
+constexpr double HeapGrowing::kGrowingFactor;
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/heap-growing.h b/chromium/v8/src/heap/cppgc/heap-growing.h
new file mode 100644
index 00000000000..772fc2db55f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-growing.h
@@ -0,0 +1,53 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_GROWING_H_
+#define V8_HEAP_CPPGC_HEAP_GROWING_H_
+
+#include "include/cppgc/heap.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/raw-heap.h"
+
+namespace cppgc {
+
+class Platform;
+
+namespace internal {
+
+class GarbageCollector;
+class StatsCollector;
+
+// Growing strategy that invokes garbage collection using GarbageCollector based
+// on allocation statistics provided by StatsCollector and ResourceConstraints.
+//
+// Implements a fixed-ratio growing strategy with an initial heap size that the
+// GC can ignore to avoid excessive GCs for smaller heaps.
+class V8_EXPORT_PRIVATE HeapGrowing final {
+ public:
+ // Constant growing factor for growing the heap limit.
+ static constexpr double kGrowingFactor = 1.5;
+ // For smaller heaps, allow allocating at least LAB in each regular space
+ // before triggering GC again.
+ static constexpr size_t kMinLimitIncrease =
+ kPageSize * RawHeap::kNumberOfRegularSpaces;
+
+ HeapGrowing(GarbageCollector*, StatsCollector*,
+ cppgc::Heap::ResourceConstraints);
+ ~HeapGrowing();
+
+ HeapGrowing(const HeapGrowing&) = delete;
+ HeapGrowing& operator=(const HeapGrowing&) = delete;
+
+ size_t limit() const;
+
+ private:
+ class HeapGrowingImpl;
+ std::unique_ptr<HeapGrowingImpl> impl_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_GROWING_H_
diff --git a/chromium/v8/src/heap/cppgc/heap-inl.h b/chromium/v8/src/heap/cppgc/heap-inl.h
deleted file mode 100644
index 4fe3186230f..00000000000
--- a/chromium/v8/src/heap/cppgc/heap-inl.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_CPPGC_HEAP_INL_H_
-#define V8_HEAP_CPPGC_HEAP_INL_H_
-
-#include "src/heap/cppgc/globals.h"
-#include "src/heap/cppgc/heap.h"
-#include "src/heap/cppgc/object-allocator-inl.h"
-
-namespace cppgc {
-namespace internal {
-
-void* Heap::Allocate(size_t size, GCInfoIndex index) {
- DCHECK(is_allocation_allowed());
- void* result = object_allocator_.AllocateObject(size, index);
- objects_.push_back(&HeapObjectHeader::FromPayload(result));
- return result;
-}
-
-void* Heap::Allocate(size_t size, GCInfoIndex index,
- CustomSpaceIndex space_index) {
- DCHECK(is_allocation_allowed());
- void* result = object_allocator_.AllocateObject(size, index, space_index);
- objects_.push_back(&HeapObjectHeader::FromPayload(result));
- return result;
-}
-
-} // namespace internal
-} // namespace cppgc
-
-#endif // V8_HEAP_CPPGC_HEAP_INL_H_
diff --git a/chromium/v8/src/heap/cppgc/heap-object-header-inl.h b/chromium/v8/src/heap/cppgc/heap-object-header-inl.h
index cba7b24a4cb..0348013e08b 100644
--- a/chromium/v8/src/heap/cppgc/heap-object-header-inl.h
+++ b/chromium/v8/src/heap/cppgc/heap-object-header-inl.h
@@ -113,6 +113,11 @@ bool HeapObjectHeader::TryMarkAtomic() {
}
template <HeapObjectHeader::AccessMode mode>
+bool HeapObjectHeader::IsYoung() const {
+ return !IsMarked<mode>();
+}
+
+template <HeapObjectHeader::AccessMode mode>
bool HeapObjectHeader::IsFree() const {
return GetGCInfoIndex() == kFreeListGCInfoIndex;
}
diff --git a/chromium/v8/src/heap/cppgc/heap-object-header.h b/chromium/v8/src/heap/cppgc/heap-object-header.h
index b517617dd1e..9a2b5283888 100644
--- a/chromium/v8/src/heap/cppgc/heap-object-header.h
+++ b/chromium/v8/src/heap/cppgc/heap-object-header.h
@@ -80,6 +80,9 @@ class HeapObjectHeader {
inline bool TryMarkAtomic();
template <AccessMode = AccessMode::kNonAtomic>
+ bool IsYoung() const;
+
+ template <AccessMode = AccessMode::kNonAtomic>
bool IsFree() const;
inline bool IsFinalizable() const;
diff --git a/chromium/v8/src/heap/cppgc/heap-page-inl.h b/chromium/v8/src/heap/cppgc/heap-page-inl.h
new file mode 100644
index 00000000000..a416a62e492
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/heap-page-inl.h
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_HEAP_PAGE_INL_H_
+#define V8_HEAP_CPPGC_HEAP_PAGE_INL_H_
+
+#include "src/heap/cppgc/heap-page.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+BasePage* BasePage::FromPayload(void* payload) {
+ return reinterpret_cast<BasePage*>(
+ (reinterpret_cast<uintptr_t>(payload) & kPageBaseMask) + kGuardPageSize);
+}
+
+// static
+const BasePage* BasePage::FromPayload(const void* payload) {
+ return reinterpret_cast<const BasePage*>(
+ (reinterpret_cast<uintptr_t>(const_cast<void*>(payload)) &
+ kPageBaseMask) +
+ kGuardPageSize);
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_HEAP_PAGE_INL_H_
diff --git a/chromium/v8/src/heap/cppgc/heap-page.cc b/chromium/v8/src/heap/cppgc/heap-page.cc
index e8afbafbd2a..f95f4a37eb6 100644
--- a/chromium/v8/src/heap/cppgc/heap-page.cc
+++ b/chromium/v8/src/heap/cppgc/heap-page.cc
@@ -14,7 +14,7 @@
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/object-start-bitmap-inl.h"
#include "src/heap/cppgc/object-start-bitmap.h"
-#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/page-memory-inl.h"
#include "src/heap/cppgc/raw-heap.h"
namespace cppgc {
@@ -27,63 +27,120 @@ Address AlignAddress(Address address, size_t alignment) {
RoundUp(reinterpret_cast<uintptr_t>(address), alignment));
}
-} // namespace
+const HeapObjectHeader* ObjectHeaderFromInnerAddressImpl(const BasePage* page,
+ const void* address) {
+ if (page->is_large()) {
+ return LargePage::From(page)->ObjectHeader();
+ }
+ const ObjectStartBitmap& bitmap =
+ NormalPage::From(page)->object_start_bitmap();
+ const HeapObjectHeader* header =
+ bitmap.FindHeader(static_cast<ConstAddress>(address));
+ DCHECK_LT(address,
+ reinterpret_cast<ConstAddress>(header) +
+ header->GetSize<HeapObjectHeader::AccessMode::kAtomic>());
+ return header;
+}
-STATIC_ASSERT(kPageSize == api_constants::kPageAlignment);
+} // namespace
// static
-BasePage* BasePage::FromPayload(void* payload) {
- return reinterpret_cast<BasePage*>(
- (reinterpret_cast<uintptr_t>(payload) & kPageBaseMask) + kGuardPageSize);
+BasePage* BasePage::FromInnerAddress(const HeapBase* heap, void* address) {
+ return const_cast<BasePage*>(
+ FromInnerAddress(heap, const_cast<const void*>(address)));
}
// static
-const BasePage* BasePage::FromPayload(const void* payload) {
+const BasePage* BasePage::FromInnerAddress(const HeapBase* heap,
+ const void* address) {
return reinterpret_cast<const BasePage*>(
- (reinterpret_cast<uintptr_t>(const_cast<void*>(payload)) &
- kPageBaseMask) +
- kGuardPageSize);
+ heap->page_backend()->Lookup(static_cast<ConstAddress>(address)));
}
-HeapObjectHeader* BasePage::ObjectHeaderFromInnerAddress(void* address) {
- return const_cast<HeapObjectHeader*>(
+// static
+void BasePage::Destroy(BasePage* page) {
+ if (page->is_large()) {
+ LargePage::Destroy(LargePage::From(page));
+ } else {
+ NormalPage::Destroy(NormalPage::From(page));
+ }
+}
+
+Address BasePage::PayloadStart() {
+ return is_large() ? LargePage::From(this)->PayloadStart()
+ : NormalPage::From(this)->PayloadStart();
+}
+
+ConstAddress BasePage::PayloadStart() const {
+ return const_cast<BasePage*>(this)->PayloadStart();
+}
+
+Address BasePage::PayloadEnd() {
+ return is_large() ? LargePage::From(this)->PayloadEnd()
+ : NormalPage::From(this)->PayloadEnd();
+}
+
+ConstAddress BasePage::PayloadEnd() const {
+ return const_cast<BasePage*>(this)->PayloadEnd();
+}
+
+HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(void* address) const {
+ return const_cast<HeapObjectHeader&>(
ObjectHeaderFromInnerAddress(const_cast<const void*>(address)));
}
-const HeapObjectHeader* BasePage::ObjectHeaderFromInnerAddress(
- const void* address) {
+const HeapObjectHeader& BasePage::ObjectHeaderFromInnerAddress(
+ const void* address) const {
+ const HeapObjectHeader* header =
+ ObjectHeaderFromInnerAddressImpl(this, address);
+ DCHECK_NE(kFreeListGCInfoIndex, header->GetGCInfoIndex());
+ return *header;
+}
+
+HeapObjectHeader* BasePage::TryObjectHeaderFromInnerAddress(
+ void* address) const {
+ return const_cast<HeapObjectHeader*>(
+ TryObjectHeaderFromInnerAddress(const_cast<const void*>(address)));
+}
+
+const HeapObjectHeader* BasePage::TryObjectHeaderFromInnerAddress(
+ const void* address) const {
if (is_large()) {
- return LargePage::From(this)->ObjectHeader();
+ if (!LargePage::From(this)->PayloadContains(
+ static_cast<ConstAddress>(address)))
+ return nullptr;
+ } else {
+ const NormalPage* normal_page = NormalPage::From(this);
+ if (!normal_page->PayloadContains(static_cast<ConstAddress>(address)))
+ return nullptr;
+ // Check that the space has no linear allocation buffer.
+ DCHECK(!NormalPageSpace::From(normal_page->space())
+ ->linear_allocation_buffer()
+ .size());
}
- ObjectStartBitmap& bitmap = NormalPage::From(this)->object_start_bitmap();
- HeapObjectHeader* header =
- bitmap.FindHeader(static_cast<ConstAddress>(address));
- DCHECK_LT(address,
- reinterpret_cast<ConstAddress>(header) +
- header->GetSize<HeapObjectHeader::AccessMode::kAtomic>());
- DCHECK_NE(kFreeListGCInfoIndex,
- header->GetGCInfoIndex<HeapObjectHeader::AccessMode::kAtomic>());
+
+ // |address| is on the heap, so we FromInnerAddress can get the header.
+ const HeapObjectHeader* header =
+ ObjectHeaderFromInnerAddressImpl(this, address);
+ if (header->IsFree()) return nullptr;
+ DCHECK_NE(kFreeListGCInfoIndex, header->GetGCInfoIndex());
return header;
}
-BasePage::BasePage(Heap* heap, BaseSpace* space, PageType type)
+BasePage::BasePage(HeapBase* heap, BaseSpace* space, PageType type)
: heap_(heap), space_(space), type_(type) {
DCHECK_EQ(0u, (reinterpret_cast<uintptr_t>(this) - kGuardPageSize) &
kPageOffsetMask);
- DCHECK_EQ(reinterpret_cast<void*>(&heap_),
- FromPayload(this) + api_constants::kHeapOffset);
DCHECK_EQ(&heap_->raw_heap(), space_->raw_heap());
}
// static
-NormalPage* NormalPage::Create(NormalPageSpace* space) {
- DCHECK(space);
- Heap* heap = space->raw_heap()->heap();
- DCHECK(heap);
- void* memory = heap->page_backend()->AllocateNormalPageMemory(space->index());
- auto* normal_page = new (memory) NormalPage(heap, space);
- space->AddPage(normal_page);
- space->AddToFreeList(normal_page->PayloadStart(), normal_page->PayloadSize());
+NormalPage* NormalPage::Create(PageBackend* page_backend,
+ NormalPageSpace* space) {
+ DCHECK_NOT_NULL(page_backend);
+ DCHECK_NOT_NULL(space);
+ void* memory = page_backend->AllocateNormalPageMemory(space->index());
+ auto* normal_page = new (memory) NormalPage(space->raw_heap()->heap(), space);
return normal_page;
}
@@ -98,7 +155,7 @@ void NormalPage::Destroy(NormalPage* page) {
reinterpret_cast<Address>(page));
}
-NormalPage::NormalPage(Heap* heap, BaseSpace* space)
+NormalPage::NormalPage(HeapBase* heap, BaseSpace* space)
: BasePage(heap, space, PageType::kNormal),
object_start_bitmap_(PayloadStart()) {
DCHECK_LT(kLargeObjectSizeThreshold,
@@ -142,23 +199,25 @@ size_t NormalPage::PayloadSize() {
return kPageSize - 2 * kGuardPageSize - header_size;
}
-LargePage::LargePage(Heap* heap, BaseSpace* space, size_t size)
+LargePage::LargePage(HeapBase* heap, BaseSpace* space, size_t size)
: BasePage(heap, space, PageType::kLarge), payload_size_(size) {}
LargePage::~LargePage() = default;
// static
-LargePage* LargePage::Create(LargePageSpace* space, size_t size) {
- DCHECK(space);
+LargePage* LargePage::Create(PageBackend* page_backend, LargePageSpace* space,
+ size_t size) {
+ DCHECK_NOT_NULL(page_backend);
+ DCHECK_NOT_NULL(space);
DCHECK_LE(kLargeObjectSizeThreshold, size);
+
const size_t page_header_size =
RoundUp(sizeof(LargePage), kAllocationGranularity);
const size_t allocation_size = page_header_size + size;
- Heap* heap = space->raw_heap()->heap();
- void* memory = heap->page_backend()->AllocateLargePageMemory(allocation_size);
+ auto* heap = space->raw_heap()->heap();
+ void* memory = page_backend->AllocateLargePageMemory(allocation_size);
LargePage* page = new (memory) LargePage(heap, space, size);
- space->AddPage(page);
return page;
}
diff --git a/chromium/v8/src/heap/cppgc/heap-page.h b/chromium/v8/src/heap/cppgc/heap-page.h
index c676bc4bde0..7559d5f1ece 100644
--- a/chromium/v8/src/heap/cppgc/heap-page.h
+++ b/chromium/v8/src/heap/cppgc/heap-page.h
@@ -17,19 +17,24 @@ namespace internal {
class BaseSpace;
class NormalPageSpace;
class LargePageSpace;
-class Heap;
+class HeapBase;
class PageBackend;
class V8_EXPORT_PRIVATE BasePage {
public:
- static BasePage* FromPayload(void*);
- static const BasePage* FromPayload(const void*);
+ static inline BasePage* FromPayload(void*);
+ static inline const BasePage* FromPayload(const void*);
+
+ static BasePage* FromInnerAddress(const HeapBase*, void*);
+ static const BasePage* FromInnerAddress(const HeapBase*, const void*);
+
+ static void Destroy(BasePage*);
BasePage(const BasePage&) = delete;
BasePage& operator=(const BasePage&) = delete;
- Heap* heap() { return heap_; }
- const Heap* heap() const { return heap_; }
+ HeapBase* heap() { return heap_; }
+ const HeapBase* heap() const { return heap_; }
BaseSpace* space() { return space_; }
const BaseSpace* space() const { return space_; }
@@ -37,16 +42,29 @@ class V8_EXPORT_PRIVATE BasePage {
bool is_large() const { return type_ == PageType::kLarge; }
+ Address PayloadStart();
+ ConstAddress PayloadStart() const;
+ Address PayloadEnd();
+ ConstAddress PayloadEnd() const;
+
// |address| must refer to real object.
- HeapObjectHeader* ObjectHeaderFromInnerAddress(void* address);
- const HeapObjectHeader* ObjectHeaderFromInnerAddress(const void* address);
+ HeapObjectHeader& ObjectHeaderFromInnerAddress(void* address) const;
+ const HeapObjectHeader& ObjectHeaderFromInnerAddress(
+ const void* address) const;
+
+ // |address| is guaranteed to point into the page but not payload. Returns
+ // nullptr when pointing into free list entries and the valid header
+ // otherwise.
+ HeapObjectHeader* TryObjectHeaderFromInnerAddress(void* address) const;
+ const HeapObjectHeader* TryObjectHeaderFromInnerAddress(
+ const void* address) const;
protected:
enum class PageType { kNormal, kLarge };
- BasePage(Heap*, BaseSpace*, PageType);
+ BasePage(HeapBase*, BaseSpace*, PageType);
private:
- Heap* heap_;
+ HeapBase* heap_;
BaseSpace* space_;
PageType type_;
};
@@ -98,8 +116,8 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
using iterator = IteratorImpl<HeapObjectHeader>;
using const_iterator = IteratorImpl<const HeapObjectHeader>;
- // Allocates a new page.
- static NormalPage* Create(NormalPageSpace*);
+ // Allocates a new page in the detached state.
+ static NormalPage* Create(PageBackend*, NormalPageSpace*);
// Destroys and frees the page. The page must be detached from the
// corresponding space (i.e. be swept when called).
static void Destroy(NormalPage*);
@@ -130,13 +148,17 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
static size_t PayloadSize();
+ bool PayloadContains(ConstAddress address) const {
+ return (PayloadStart() <= address) && (address < PayloadEnd());
+ }
+
ObjectStartBitmap& object_start_bitmap() { return object_start_bitmap_; }
const ObjectStartBitmap& object_start_bitmap() const {
return object_start_bitmap_;
}
private:
- NormalPage(Heap* heap, BaseSpace* space);
+ NormalPage(HeapBase* heap, BaseSpace* space);
~NormalPage();
ObjectStartBitmap object_start_bitmap_;
@@ -144,8 +166,8 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
class V8_EXPORT_PRIVATE LargePage final : public BasePage {
public:
- // Allocates a new page.
- static LargePage* Create(LargePageSpace*, size_t);
+ // Allocates a new page in the detached state.
+ static LargePage* Create(PageBackend*, LargePageSpace*, size_t);
// Destroys and frees the page. The page must be detached from the
// corresponding space (i.e. be swept when called).
static void Destroy(LargePage*);
@@ -168,8 +190,12 @@ class V8_EXPORT_PRIVATE LargePage final : public BasePage {
size_t PayloadSize() const { return payload_size_; }
+ bool PayloadContains(ConstAddress address) const {
+ return (PayloadStart() <= address) && (address < PayloadEnd());
+ }
+
private:
- LargePage(Heap* heap, BaseSpace* space, size_t);
+ LargePage(HeapBase* heap, BaseSpace* space, size_t);
~LargePage();
size_t payload_size_;
diff --git a/chromium/v8/src/heap/cppgc/heap-space.cc b/chromium/v8/src/heap/cppgc/heap-space.cc
index 70ddb935314..3a213dc18ad 100644
--- a/chromium/v8/src/heap/cppgc/heap-space.cc
+++ b/chromium/v8/src/heap/cppgc/heap-space.cc
@@ -7,7 +7,8 @@
#include <algorithm>
#include "src/base/logging.h"
-#include "src/heap/cppgc/heap-page.h"
+#include "src/base/platform/mutex.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/object-start-bitmap-inl.h"
namespace cppgc {
@@ -17,11 +18,13 @@ BaseSpace::BaseSpace(RawHeap* heap, size_t index, PageType type)
: heap_(heap), index_(index), type_(type) {}
void BaseSpace::AddPage(BasePage* page) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&pages_mutex_);
DCHECK_EQ(pages_.cend(), std::find(pages_.cbegin(), pages_.cend(), page));
pages_.push_back(page);
}
void BaseSpace::RemovePage(BasePage* page) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&pages_mutex_);
auto it = std::find(pages_.cbegin(), pages_.cend(), page);
DCHECK_NE(pages_.cend(), it);
pages_.erase(it);
@@ -36,21 +39,6 @@ BaseSpace::Pages BaseSpace::RemoveAllPages() {
NormalPageSpace::NormalPageSpace(RawHeap* heap, size_t index)
: BaseSpace(heap, index, PageType::kNormal) {}
-void NormalPageSpace::AddToFreeList(void* address, size_t size) {
- free_list_.Add({address, size});
- NormalPage::From(BasePage::FromPayload(address))
- ->object_start_bitmap()
- .SetBit(static_cast<Address>(address));
-}
-
-void NormalPageSpace::ResetLinearAllocationBuffer() {
- if (current_lab_.size()) {
- DCHECK_NOT_NULL(current_lab_.start());
- AddToFreeList(current_lab_.start(), current_lab_.size());
- current_lab_.Set(nullptr, 0);
- }
-}
-
LargePageSpace::LargePageSpace(RawHeap* heap, size_t index)
: BaseSpace(heap, index, PageType::kLarge) {}
diff --git a/chromium/v8/src/heap/cppgc/heap-space.h b/chromium/v8/src/heap/cppgc/heap-space.h
index d84207c2cd4..a7e50d4f48d 100644
--- a/chromium/v8/src/heap/cppgc/heap-space.h
+++ b/chromium/v8/src/heap/cppgc/heap-space.h
@@ -9,6 +9,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/free-list.h"
namespace cppgc {
@@ -53,6 +54,7 @@ class V8_EXPORT_PRIVATE BaseSpace {
private:
RawHeap* heap_;
Pages pages_;
+ v8::base::Mutex pages_mutex_;
const size_t index_;
const PageType type_;
};
@@ -92,9 +94,6 @@ class V8_EXPORT_PRIVATE NormalPageSpace final : public BaseSpace {
NormalPageSpace(RawHeap* heap, size_t index);
- void AddToFreeList(void*, size_t);
- void ResetLinearAllocationBuffer();
-
LinearAllocationBuffer& linear_allocation_buffer() { return current_lab_; }
const LinearAllocationBuffer& linear_allocation_buffer() const {
return current_lab_;
diff --git a/chromium/v8/src/heap/cppgc/heap.cc b/chromium/v8/src/heap/cppgc/heap.cc
index ee400cee28c..431ad8df668 100644
--- a/chromium/v8/src/heap/cppgc/heap.cc
+++ b/chromium/v8/src/heap/cppgc/heap.cc
@@ -4,15 +4,13 @@
#include "src/heap/cppgc/heap.h"
-#include <memory>
-
-#include "src/base/platform/platform.h"
+#include "src/heap/base/stack.h"
+#include "src/heap/cppgc/garbage-collector.h"
+#include "src/heap/cppgc/gc-invoker.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
-#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-visitor.h"
-#include "src/heap/cppgc/stack.h"
-#include "src/heap/cppgc/sweeper.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
namespace cppgc {
@@ -31,49 +29,49 @@ void VerifyCustomSpaces(
} // namespace
-std::unique_ptr<Heap> Heap::Create(cppgc::Heap::HeapOptions options) {
+std::unique_ptr<Heap> Heap::Create(std::shared_ptr<cppgc::Platform> platform,
+ cppgc::Heap::HeapOptions options) {
+ DCHECK(platform.get());
VerifyCustomSpaces(options.custom_spaces);
- return std::make_unique<internal::Heap>(options.custom_spaces.size());
+ return std::make_unique<internal::Heap>(std::move(platform),
+ std::move(options));
}
void Heap::ForceGarbageCollectionSlow(const char* source, const char* reason,
Heap::StackState stack_state) {
- internal::Heap::From(this)->CollectGarbage({stack_state});
+ internal::Heap::From(this)->CollectGarbage(
+ {internal::GarbageCollector::Config::CollectionType::kMajor,
+ stack_state});
+}
+
+AllocationHandle& Heap::GetAllocationHandle() {
+ return internal::Heap::From(this)->object_allocator();
}
namespace internal {
namespace {
-class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
- friend class HeapVisitor<ObjectSizeCounter>;
+class Unmarker final : private HeapVisitor<Unmarker> {
+ friend class HeapVisitor<Unmarker>;
public:
- size_t GetSize(RawHeap* heap) {
- Traverse(heap);
- return accumulated_size_;
- }
+ explicit Unmarker(RawHeap* heap) { Traverse(heap); }
private:
- static size_t ObjectSize(const HeapObjectHeader* header) {
- const size_t size =
- header->IsLargeObject()
- ? static_cast<const LargePage*>(BasePage::FromPayload(header))
- ->PayloadSize()
- : header->GetSize();
- DCHECK_GE(size, sizeof(HeapObjectHeader));
- return size - sizeof(HeapObjectHeader);
- }
-
bool VisitHeapObjectHeader(HeapObjectHeader* header) {
- if (header->IsFree()) return true;
- accumulated_size_ += ObjectSize(header);
+ if (header->IsMarked()) header->Unmark();
return true;
}
-
- size_t accumulated_size_ = 0;
};
+void CheckConfig(Heap::Config config) {
+ CHECK_WITH_MSG(
+ (config.collection_type != Heap::Config::CollectionType::kMinor) ||
+ (config.stack_state == Heap::Config::StackState::kNoHeapPointers),
+ "Minor GCs with stack is currently not supported");
+}
+
} // namespace
// static
@@ -81,56 +79,50 @@ cppgc::LivenessBroker LivenessBrokerFactory::Create() {
return cppgc::LivenessBroker();
}
-Heap::Heap(size_t custom_spaces)
- : raw_heap_(this, custom_spaces),
- page_backend_(std::make_unique<PageBackend>(&system_allocator_)),
- object_allocator_(&raw_heap_),
- sweeper_(&raw_heap_),
- stack_(std::make_unique<Stack>(v8::base::Stack::GetStackStart())),
- prefinalizer_handler_(std::make_unique<PreFinalizerHandler>()) {}
+Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
+ cppgc::Heap::HeapOptions options)
+ : HeapBase(platform, options.custom_spaces.size()),
+ gc_invoker_(this, platform_.get(), options.stack_support),
+ growing_(&gc_invoker_, stats_collector_.get(),
+ options.resource_constraints) {}
Heap::~Heap() {
- NoGCScope no_gc(this);
+ NoGCScope no_gc(*this);
// Finish already running GC if any, but don't finalize live objects.
sweeper_.Finish();
}
-void Heap::CollectGarbage(GCConfig config) {
+void Heap::CollectGarbage(Config config) {
+ CheckConfig(config);
+
if (in_no_gc_scope()) return;
epoch_++;
- // TODO(chromium:1056170): Replace with proper mark-sweep algorithm.
+#if defined(CPPGC_YOUNG_GENERATION)
+ if (config.collection_type == Config::CollectionType::kMajor)
+ Unmarker unmarker(&raw_heap());
+#endif
+
// "Marking".
- marker_ = std::make_unique<Marker>(this);
- marker_->StartMarking(Marker::MarkingConfig(config.stack_state));
- marker_->FinishMarking();
+ marker_ = std::make_unique<Marker>(AsBase());
+ const Marker::MarkingConfig marking_config{
+ config.collection_type, config.stack_state, config.marking_type};
+ marker_->StartMarking(marking_config);
+ marker_->FinishMarking(marking_config);
// "Sweeping and finalization".
{
// Pre finalizers are forbidden from allocating objects
- NoAllocationScope no_allocation_scope_(this);
+ ObjectAllocator::NoAllocationScope no_allocation_scope_(object_allocator_);
marker_->ProcessWeakness();
prefinalizer_handler_->InvokePreFinalizers();
}
marker_.reset();
{
- NoGCScope no_gc(this);
- sweeper_.Start(Sweeper::Config::kAtomic);
+ NoGCScope no_gc(*this);
+ sweeper_.Start(config.sweeping_type);
}
}
-size_t Heap::ObjectPayloadSize() const {
- return ObjectSizeCounter().GetSize(const_cast<RawHeap*>(&raw_heap()));
-}
-
-Heap::NoGCScope::NoGCScope(Heap* heap) : heap_(heap) { heap_->no_gc_scope_++; }
-
-Heap::NoGCScope::~NoGCScope() { heap_->no_gc_scope_--; }
-
-Heap::NoAllocationScope::NoAllocationScope(Heap* heap) : heap_(heap) {
- heap_->no_allocation_scope_++;
-}
-Heap::NoAllocationScope::~NoAllocationScope() { heap_->no_allocation_scope_--; }
-
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/heap.h b/chromium/v8/src/heap/cppgc/heap.h
index fa19b74be53..f96f81e3217 100644
--- a/chromium/v8/src/heap/cppgc/heap.h
+++ b/chromium/v8/src/heap/cppgc/heap.h
@@ -5,143 +5,47 @@
#ifndef V8_HEAP_CPPGC_HEAP_H_
#define V8_HEAP_CPPGC_HEAP_H_
-#include <memory>
-#include <vector>
-
#include "include/cppgc/heap.h"
-#include "include/cppgc/internal/gc-info.h"
-#include "include/cppgc/internal/persistent-node.h"
#include "include/cppgc/liveness-broker.h"
-#include "src/base/page-allocator.h"
-#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/marker.h"
-#include "src/heap/cppgc/object-allocator.h"
-#include "src/heap/cppgc/page-memory.h"
-#include "src/heap/cppgc/prefinalizer-handler.h"
-#include "src/heap/cppgc/raw-heap.h"
-#include "src/heap/cppgc/sweeper.h"
+#include "include/cppgc/macros.h"
+#include "src/heap/cppgc/garbage-collector.h"
+#include "src/heap/cppgc/gc-invoker.h"
+#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/heap-growing.h"
namespace cppgc {
namespace internal {
-class Stack;
-
class V8_EXPORT_PRIVATE LivenessBrokerFactory {
public:
static LivenessBroker Create();
};
-class V8_EXPORT_PRIVATE Heap final : public cppgc::Heap {
+class V8_EXPORT_PRIVATE Heap final : public HeapBase,
+ public cppgc::Heap,
+ public GarbageCollector {
public:
- // NoGCScope allows going over limits and avoids triggering garbage
- // collection triggered through allocations or even explicitly.
- class V8_EXPORT_PRIVATE NoGCScope final {
- CPPGC_STACK_ALLOCATED();
-
- public:
- explicit NoGCScope(Heap* heap);
- ~NoGCScope();
-
- NoGCScope(const NoGCScope&) = delete;
- NoGCScope& operator=(const NoGCScope&) = delete;
-
- private:
- Heap* const heap_;
- };
-
- // NoAllocationScope is used in debug mode to catch unwanted allocations. E.g.
- // allocations during GC.
- class V8_EXPORT_PRIVATE NoAllocationScope final {
- CPPGC_STACK_ALLOCATED();
-
- public:
- explicit NoAllocationScope(Heap* heap);
- ~NoAllocationScope();
-
- NoAllocationScope(const NoAllocationScope&) = delete;
- NoAllocationScope& operator=(const NoAllocationScope&) = delete;
-
- private:
- Heap* const heap_;
- };
-
- struct GCConfig {
- using StackState = Heap::StackState;
-
- static GCConfig Default() { return {StackState::kMayContainHeapPointers}; }
-
- StackState stack_state = StackState::kMayContainHeapPointers;
- };
-
static Heap* From(cppgc::Heap* heap) { return static_cast<Heap*>(heap); }
-
- explicit Heap(size_t custom_spaces);
- ~Heap() final;
-
- inline void* Allocate(size_t size, GCInfoIndex index);
- inline void* Allocate(size_t size, GCInfoIndex index,
- CustomSpaceIndex space_index);
-
- void CollectGarbage(GCConfig config = GCConfig::Default());
-
- PreFinalizerHandler* prefinalizer_handler() {
- return prefinalizer_handler_.get();
- }
-
- PersistentRegion& GetStrongPersistentRegion() {
- return strong_persistent_region_;
+ static const Heap* From(const cppgc::Heap* heap) {
+ return static_cast<const Heap*>(heap);
}
- const PersistentRegion& GetStrongPersistentRegion() const {
- return strong_persistent_region_;
- }
- PersistentRegion& GetWeakPersistentRegion() {
- return weak_persistent_region_;
- }
- const PersistentRegion& GetWeakPersistentRegion() const {
- return weak_persistent_region_;
- }
-
- RawHeap& raw_heap() { return raw_heap_; }
- const RawHeap& raw_heap() const { return raw_heap_; }
- Stack* stack() { return stack_.get(); }
-
- PageBackend* page_backend() { return page_backend_.get(); }
- const PageBackend* page_backend() const { return page_backend_.get(); }
-
- Sweeper& sweeper() { return sweeper_; }
+ Heap(std::shared_ptr<cppgc::Platform> platform,
+ cppgc::Heap::HeapOptions options);
+ ~Heap() final;
- size_t epoch() const { return epoch_; }
+ HeapBase& AsBase() { return *this; }
+ const HeapBase& AsBase() const { return *this; }
- size_t ObjectPayloadSize() const;
+ void CollectGarbage(Config config) final;
- // Temporary getter until proper visitation of on-stack objects is
- // implemented.
- std::vector<HeapObjectHeader*>& objects() { return objects_; }
+ size_t epoch() const final { return epoch_; }
private:
- bool in_no_gc_scope() const { return no_gc_scope_ > 0; }
- bool is_allocation_allowed() const { return no_allocation_scope_ == 0; }
-
- RawHeap raw_heap_;
-
- v8::base::PageAllocator system_allocator_;
- std::unique_ptr<PageBackend> page_backend_;
- ObjectAllocator object_allocator_;
- Sweeper sweeper_;
-
- std::unique_ptr<Stack> stack_;
- std::unique_ptr<PreFinalizerHandler> prefinalizer_handler_;
- std::unique_ptr<Marker> marker_;
- std::vector<HeapObjectHeader*> objects_;
-
- PersistentRegion strong_persistent_region_;
- PersistentRegion weak_persistent_region_;
+ GCInvoker gc_invoker_;
+ HeapGrowing growing_;
size_t epoch_ = 0;
-
- size_t no_gc_scope_ = 0;
- size_t no_allocation_scope_ = 0;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/marker.cc b/chromium/v8/src/heap/cppgc/marker.cc
index 5a30c89f0dd..1ba6d766a4f 100644
--- a/chromium/v8/src/heap/cppgc/marker.cc
+++ b/chromium/v8/src/heap/cppgc/marker.cc
@@ -4,14 +4,75 @@
#include "src/heap/cppgc/marker.h"
+#include "include/cppgc/internal/process-heap.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-page-inl.h"
+#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/stats-collector.h"
+
+#if defined(CPPGC_CAGED_HEAP)
+#include "include/cppgc/internal/caged-heap-local-data.h"
+#endif
namespace cppgc {
namespace internal {
namespace {
+
+void EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
+ HeapBase& heap) {
+ if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
+ config.marking_type ==
+ Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ ProcessHeap::EnterIncrementalOrConcurrentMarking();
+ }
+#if defined(CPPGC_CAGED_HEAP)
+ heap.caged_heap().local_data().is_marking_in_progress = true;
+#endif
+}
+
+void ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
+ HeapBase& heap) {
+ if (config.marking_type == Marker::MarkingConfig::MarkingType::kIncremental ||
+ config.marking_type ==
+ Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
+ ProcessHeap::ExitIncrementalOrConcurrentMarking();
+ }
+#if defined(CPPGC_CAGED_HEAP)
+ heap.caged_heap().local_data().is_marking_in_progress = false;
+#endif
+}
+
+// Visit remembered set that was recorded in the generational barrier.
+void VisitRememberedSlots(HeapBase& heap, MarkingVisitor* visitor) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ for (void* slot : heap.remembered_slots()) {
+ auto& slot_header = BasePage::FromInnerAddress(&heap, slot)
+ ->ObjectHeaderFromInnerAddress(slot);
+ if (slot_header.IsYoung()) continue;
+ // The design of young generation requires collections to be executed at the
+ // top level (with the guarantee that no objects are currently being in
+ // construction). This can be ensured by running young GCs from safe points
+ // or by reintroducing nested allocation scopes that avoid finalization.
+ DCHECK(!MarkingVisitor::IsInConstruction(slot_header));
+
+ void* value = *reinterpret_cast<void**>(slot);
+ visitor->DynamicallyMarkAddress(static_cast<Address>(value));
+ }
+#endif
+}
+
+// Assumes that all spaces have their LABs reset.
+void ResetRememberedSet(HeapBase& heap) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ auto& local_data = heap.caged_heap().local_data();
+ local_data.age_table.Reset(&heap.caged_heap().allocator());
+ heap.remembered_slots().clear();
+#endif
+}
+
template <typename Worklist, typename Callback>
bool DrainWorklistWithDeadline(v8::base::TimeTicks deadline, Worklist* worklist,
Callback callback, int task_id) {
@@ -31,11 +92,12 @@ bool DrainWorklistWithDeadline(v8::base::TimeTicks deadline, Worklist* worklist,
}
return true;
}
+
} // namespace
constexpr int Marker::kMutatorThreadId;
-Marker::Marker(Heap* heap)
+Marker::Marker(HeapBase& heap)
: heap_(heap), marking_visitor_(CreateMutatorThreadMarkingVisitor()) {}
Marker::~Marker() {
@@ -44,17 +106,15 @@ Marker::~Marker() {
// and should thus already be marked.
if (!not_fully_constructed_worklist_.IsEmpty()) {
#if DEBUG
- DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state_);
+ DCHECK_NE(MarkingConfig::StackState::kNoHeapPointers, config_.stack_state);
NotFullyConstructedItem item;
NotFullyConstructedWorklist::View view(&not_fully_constructed_worklist_,
kMutatorThreadId);
while (view.Pop(&item)) {
- // TODO(chromium:1056170): uncomment following check after implementing
- // FromInnerAddress.
- //
- // HeapObjectHeader* const header = HeapObjectHeader::FromInnerAddress(
- // reinterpret_cast<Address>(const_cast<void*>(item)));
- // DCHECK(header->IsMarked())
+ const HeapObjectHeader& header =
+ BasePage::FromPayload(item)->ObjectHeaderFromInnerAddress(
+ static_cast<ConstAddress>(item));
+ DCHECK(header.IsMarked());
}
#else
not_fully_constructed_worklist_.Clear();
@@ -63,19 +123,40 @@ Marker::~Marker() {
}
void Marker::StartMarking(MarkingConfig config) {
+ heap().stats_collector()->NotifyMarkingStarted();
+
config_ = config;
VisitRoots();
+ EnterIncrementalMarkingIfNeeded(config, heap());
}
-void Marker::FinishMarking() {
- if (config_.stack_state_ == MarkingConfig::StackState::kNoHeapPointers) {
+void Marker::EnterAtomicPause(MarkingConfig config) {
+ ExitIncrementalMarkingIfNeeded(config_, heap());
+ config_ = config;
+
+ // VisitRoots also resets the LABs.
+ VisitRoots();
+ if (config_.stack_state == MarkingConfig::StackState::kNoHeapPointers) {
FlushNotFullyConstructedObjects();
+ } else {
+ MarkNotFullyConstructedObjects();
}
+}
+
+void Marker::LeaveAtomicPause() {
+ ResetRememberedSet(heap());
+ heap().stats_collector()->NotifyMarkingCompleted(
+ marking_visitor_->marked_bytes());
+}
+
+void Marker::FinishMarking(MarkingConfig config) {
+ EnterAtomicPause(config);
AdvanceMarkingWithDeadline(v8::base::TimeDelta::Max());
+ LeaveAtomicPause();
}
void Marker::ProcessWeakness() {
- heap_->GetWeakPersistentRegion().Trace(marking_visitor_.get());
+ heap().GetWeakPersistentRegion().Trace(marking_visitor_.get());
// Call weak callbacks on objects that may now be pointing to dead objects.
WeakCallbackItem item;
@@ -89,9 +170,17 @@ void Marker::ProcessWeakness() {
}
void Marker::VisitRoots() {
- heap_->GetStrongPersistentRegion().Trace(marking_visitor_.get());
- if (config_.stack_state_ != MarkingConfig::StackState::kNoHeapPointers)
- heap_->stack()->IteratePointers(marking_visitor_.get());
+ // Reset LABs before scanning roots. LABs are cleared to allow
+ // ObjectStartBitmap handling without considering LABs.
+ heap().object_allocator().ResetLinearAllocationBuffers();
+
+ heap().GetStrongPersistentRegion().Trace(marking_visitor_.get());
+ if (config_.stack_state != MarkingConfig::StackState::kNoHeapPointers) {
+ heap().stack()->IteratePointers(marking_visitor_.get());
+ }
+ if (config_.collection_type == MarkingConfig::CollectionType::kMinor) {
+ VisitRememberedSlots(heap(), marking_visitor_.get());
+ }
}
std::unique_ptr<MutatorThreadMarkingVisitor>
@@ -127,6 +216,19 @@ bool Marker::AdvanceMarkingWithDeadline(v8::base::TimeDelta duration) {
},
kMutatorThreadId))
return false;
+
+ if (!DrainWorklistWithDeadline(
+ deadline, &write_barrier_worklist_,
+ [visitor](HeapObjectHeader* header) {
+ DCHECK(header);
+ DCHECK(!MutatorThreadMarkingVisitor::IsInConstruction(*header));
+ const GCInfo& gcinfo =
+ GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex());
+ gcinfo.trace(visitor, header->Payload());
+ visitor->AccountMarkedBytes(*header);
+ },
+ kMutatorThreadId))
+ return false;
} while (!marking_worklist_.IsLocalViewEmpty(kMutatorThreadId));
return true;
@@ -141,10 +243,20 @@ void Marker::FlushNotFullyConstructedObjects() {
DCHECK(not_fully_constructed_worklist_.IsLocalViewEmpty(kMutatorThreadId));
}
+void Marker::MarkNotFullyConstructedObjects() {
+ NotFullyConstructedItem item;
+ NotFullyConstructedWorklist::View view(&not_fully_constructed_worklist_,
+ kMutatorThreadId);
+ while (view.Pop(&item)) {
+ marking_visitor_->TraceConservativelyIfNeeded(item);
+ }
+}
+
void Marker::ClearAllWorklistsForTesting() {
marking_worklist_.Clear();
not_fully_constructed_worklist_.Clear();
previously_not_fully_constructed_worklist_.Clear();
+ write_barrier_worklist_.Clear();
weak_callback_worklist_.Clear();
}
diff --git a/chromium/v8/src/heap/cppgc/marker.h b/chromium/v8/src/heap/cppgc/marker.h
index c18c23df2ca..3edba06c4b6 100644
--- a/chromium/v8/src/heap/cppgc/marker.h
+++ b/chromium/v8/src/heap/cppgc/marker.h
@@ -16,9 +16,19 @@
namespace cppgc {
namespace internal {
-class Heap;
+class HeapBase;
+class HeapObjectHeader;
class MutatorThreadMarkingVisitor;
+// Marking algorithm. Example for a valid call sequence creating the marking
+// phase:
+// 1. StartMarking()
+// 2. AdvanceMarkingWithDeadline() [Optional, depending on environment.]
+// 3. EnterAtomicPause()
+// 4. AdvanceMarkingWithDeadline()
+// 5. LeaveAtomicPause()
+//
+// Alternatively, FinishMarking combines steps 3.-5.
class V8_EXPORT_PRIVATE Marker {
static constexpr int kNumConcurrentMarkers = 0;
static constexpr int kNumMarkers = 1 + kNumConcurrentMarkers;
@@ -41,34 +51,29 @@ class V8_EXPORT_PRIVATE Marker {
Worklist<NotFullyConstructedItem, 16 /* local entries */, kNumMarkers>;
using WeakCallbackWorklist =
Worklist<WeakCallbackItem, 64 /* local entries */, kNumMarkers>;
+ using WriteBarrierWorklist =
+ Worklist<HeapObjectHeader*, 64 /*local entries */, kNumMarkers>;
struct MarkingConfig {
+ enum class CollectionType : uint8_t {
+ kMinor,
+ kMajor,
+ };
using StackState = cppgc::Heap::StackState;
- enum class IncrementalMarking : uint8_t { kDisabled };
- enum class ConcurrentMarking : uint8_t { kDisabled };
-
- static MarkingConfig Default() {
- return {StackState::kMayContainHeapPointers,
- IncrementalMarking::kDisabled, ConcurrentMarking::kDisabled};
- }
-
- explicit MarkingConfig(StackState stack_state)
- : MarkingConfig(stack_state, IncrementalMarking::kDisabled,
- ConcurrentMarking::kDisabled) {}
-
- MarkingConfig(StackState stack_state,
- IncrementalMarking incremental_marking_state,
- ConcurrentMarking concurrent_marking_state)
- : stack_state_(stack_state),
- incremental_marking_state_(incremental_marking_state),
- concurrent_marking_state_(concurrent_marking_state) {}
-
- StackState stack_state_;
- IncrementalMarking incremental_marking_state_;
- ConcurrentMarking concurrent_marking_state_;
+ enum MarkingType : uint8_t {
+ kAtomic,
+ kIncremental,
+ kIncrementalAndConcurrent
+ };
+
+ static constexpr MarkingConfig Default() { return {}; }
+
+ CollectionType collection_type = CollectionType::kMajor;
+ StackState stack_state = StackState::kMayContainHeapPointers;
+ MarkingType marking_type = MarkingType::kAtomic;
};
- explicit Marker(Heap* heap);
+ explicit Marker(HeapBase& heap);
virtual ~Marker();
Marker(const Marker&) = delete;
@@ -77,34 +82,56 @@ class V8_EXPORT_PRIVATE Marker {
// Initialize marking according to the given config. This method will
// trigger incremental/concurrent marking if needed.
void StartMarking(MarkingConfig config);
- // Finalize marking. This method stops incremental/concurrent marking
- // if exsists and performs atomic pause marking.
- void FinishMarking();
+
+ // Signals entering the atomic marking pause. The method
+ // - stops incremental/concurrent marking;
+ // - flushes back any in-construction worklists if needed;
+ // - Updates the MarkingConfig if the stack state has changed;
+ void EnterAtomicPause(MarkingConfig config);
+
+ // Makes marking progress.
+ virtual bool AdvanceMarkingWithDeadline(v8::base::TimeDelta);
+
+ // Signals leaving the atomic marking pause. This method expects no more
+ // objects to be marked and merely updates marking states if needed.
+ void LeaveAtomicPause();
+
+ // Combines:
+ // - EnterAtomicPause()
+ // - AdvanceMarkingWithDeadline()
+ // - LeaveAtomicPause()
+ void FinishMarking(MarkingConfig config);
void ProcessWeakness();
- Heap* heap() { return heap_; }
+ HeapBase& heap() { return heap_; }
MarkingWorklist* marking_worklist() { return &marking_worklist_; }
NotFullyConstructedWorklist* not_fully_constructed_worklist() {
return &not_fully_constructed_worklist_;
}
+ WriteBarrierWorklist* write_barrier_worklist() {
+ return &write_barrier_worklist_;
+ }
WeakCallbackWorklist* weak_callback_worklist() {
return &weak_callback_worklist_;
}
void ClearAllWorklistsForTesting();
+ MutatorThreadMarkingVisitor* GetMarkingVisitorForTesting() {
+ return marking_visitor_.get();
+ }
+
protected:
virtual std::unique_ptr<MutatorThreadMarkingVisitor>
CreateMutatorThreadMarkingVisitor();
- private:
void VisitRoots();
- bool AdvanceMarkingWithDeadline(v8::base::TimeDelta);
void FlushNotFullyConstructedObjects();
+ void MarkNotFullyConstructedObjects();
- Heap* const heap_;
+ HeapBase& heap_;
MarkingConfig config_ = MarkingConfig::Default();
std::unique_ptr<MutatorThreadMarkingVisitor> marking_visitor_;
@@ -112,6 +139,7 @@ class V8_EXPORT_PRIVATE Marker {
MarkingWorklist marking_worklist_;
NotFullyConstructedWorklist not_fully_constructed_worklist_;
NotFullyConstructedWorklist previously_not_fully_constructed_worklist_;
+ WriteBarrierWorklist write_barrier_worklist_;
WeakCallbackWorklist weak_callback_worklist_;
};
diff --git a/chromium/v8/src/heap/cppgc/marking-visitor.cc b/chromium/v8/src/heap/cppgc/marking-visitor.cc
index 9647f9b3ca3..37d88e65ee3 100644
--- a/chromium/v8/src/heap/cppgc/marking-visitor.cc
+++ b/chromium/v8/src/heap/cppgc/marking-visitor.cc
@@ -5,8 +5,8 @@
#include "src/heap/cppgc/marking-visitor.h"
#include "include/cppgc/garbage-collected.h"
-#include "include/cppgc/internal/accessors.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/heap.h"
namespace cppgc {
@@ -17,13 +17,14 @@ bool MarkingVisitor::IsInConstruction(const HeapObjectHeader& header) {
return header.IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>();
}
-MarkingVisitor::MarkingVisitor(Marker* marking_handler, int task_id)
- : marker_(marking_handler),
- marking_worklist_(marking_handler->marking_worklist(), task_id),
- not_fully_constructed_worklist_(
- marking_handler->not_fully_constructed_worklist(), task_id),
- weak_callback_worklist_(marking_handler->weak_callback_worklist(),
- task_id) {}
+MarkingVisitor::MarkingVisitor(
+ HeapBase& heap, Marker::MarkingWorklist* marking_worklist,
+ Marker::NotFullyConstructedWorklist* not_fully_constructed_worklist,
+ Marker::WeakCallbackWorklist* weak_callback_worklist, int task_id)
+ : ConservativeTracingVisitor(heap, *heap.page_backend()),
+ marking_worklist_(marking_worklist, task_id),
+ not_fully_constructed_worklist_(not_fully_constructed_worklist, task_id),
+ weak_callback_worklist_(weak_callback_worklist, task_id) {}
void MarkingVisitor::AccountMarkedBytes(const HeapObjectHeader& header) {
marked_bytes_ +=
@@ -74,11 +75,22 @@ void MarkingVisitor::VisitWeakRoot(const void* object, TraceDescriptor desc,
// construction, then it should be reachable from the stack.
return;
}
- // Since weak roots arev only traced at the end of marking, we can execute
+ // Since weak roots are only traced at the end of marking, we can execute
// the callback instead of registering it.
weak_callback(LivenessBrokerFactory::Create(), weak_root);
}
+void MarkingVisitor::VisitPointer(const void* address) {
+ TraceConservativelyIfNeeded(address);
+}
+
+void MarkingVisitor::VisitConservatively(HeapObjectHeader& header,
+ TraceConservativelyCallback callback) {
+ MarkHeaderNoTracing(&header);
+ callback(this, header);
+ AccountMarkedBytes(header);
+}
+
void MarkingVisitor::MarkHeader(HeapObjectHeader* header,
TraceDescriptor desc) {
DCHECK(header);
@@ -94,7 +106,7 @@ void MarkingVisitor::MarkHeader(HeapObjectHeader* header,
bool MarkingVisitor::MarkHeaderNoTracing(HeapObjectHeader* header) {
DCHECK(header);
// A GC should only mark the objects that belong in its heap.
- DCHECK_EQ(marker_->heap(), BasePage::FromPayload(header)->heap());
+ DCHECK_EQ(&heap_, BasePage::FromPayload(header)->heap());
// Never mark free space objects. This would e.g. hint to marking a promptly
// freed backing store.
DCHECK(!header->IsFree());
@@ -114,30 +126,29 @@ void MarkingVisitor::FlushWorklists() {
}
void MarkingVisitor::DynamicallyMarkAddress(ConstAddress address) {
- for (auto* header : marker_->heap()->objects()) {
- if (address >= header->Payload() &&
- address < (header->Payload() + header->GetSize())) {
- header->TryMarkAtomic();
- }
+ HeapObjectHeader& header =
+ BasePage::FromPayload(address)->ObjectHeaderFromInnerAddress(
+ const_cast<Address>(address));
+ DCHECK(!IsInConstruction(header));
+ if (MarkHeaderNoTracing(&header)) {
+ marking_worklist_.Push(
+ {reinterpret_cast<void*>(header.Payload()),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
- // TODO(chromium:1056170): Implement dynamically getting HeapObjectHeader
- // for handling previously_not_fully_constructed objects. Requires object
- // start bitmap.
}
-void MarkingVisitor::VisitPointer(const void* address) {
- for (auto* header : marker_->heap()->objects()) {
- if (address >= header->Payload() &&
- address < (header->Payload() + header->GetSize())) {
- header->TryMarkAtomic();
- }
- }
- // TODO(chromium:1056170): Implement proper conservative scanning for
- // on-stack objects. Requires page bloom filter.
+void MarkingVisitor::MarkObject(HeapObjectHeader& header) {
+ MarkHeader(
+ &header,
+ {header.Payload(),
+ GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
MutatorThreadMarkingVisitor::MutatorThreadMarkingVisitor(Marker* marker)
- : MarkingVisitor(marker, Marker::kMutatorThreadId) {}
+ : MarkingVisitor(marker->heap(), marker->marking_worklist(),
+ marker->not_fully_constructed_worklist(),
+ marker->weak_callback_worklist(),
+ Marker::kMutatorThreadId) {}
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/marking-visitor.h b/chromium/v8/src/heap/cppgc/marking-visitor.h
index 33616b37844..50427162a14 100644
--- a/chromium/v8/src/heap/cppgc/marking-visitor.h
+++ b/chromium/v8/src/heap/cppgc/marking-visitor.h
@@ -8,20 +8,25 @@
#include "include/cppgc/source-location.h"
#include "include/cppgc/trace-trait.h"
#include "include/v8config.h"
+#include "src/base/macros.h"
+#include "src/heap/base/stack.h"
#include "src/heap/cppgc/globals.h"
-#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marker.h"
-#include "src/heap/cppgc/stack.h"
#include "src/heap/cppgc/visitor.h"
namespace cppgc {
namespace internal {
-class MarkingVisitor : public VisitorBase, public StackVisitor {
+class BasePage;
+class HeapObjectHeader;
+
+class MarkingVisitor : public ConservativeTracingVisitor,
+ public heap::base::StackVisitor {
public:
- MarkingVisitor(Marker*, int);
+ MarkingVisitor(HeapBase&, Marker::MarkingWorklist*,
+ Marker::NotFullyConstructedWorklist*,
+ Marker::WeakCallbackWorklist*, int);
virtual ~MarkingVisitor() = default;
MarkingVisitor(const MarkingVisitor&) = delete;
@@ -30,6 +35,7 @@ class MarkingVisitor : public VisitorBase, public StackVisitor {
void FlushWorklists();
void DynamicallyMarkAddress(ConstAddress);
+ void MarkObject(HeapObjectHeader&);
void AccountMarkedBytes(const HeapObjectHeader&);
size_t marked_bytes() const { return marked_bytes_; }
@@ -43,7 +49,10 @@ class MarkingVisitor : public VisitorBase, public StackVisitor {
void VisitRoot(const void*, TraceDescriptor) override;
void VisitWeakRoot(const void*, TraceDescriptor, WeakCallback,
const void*) override;
+ void VisitConservatively(HeapObjectHeader&,
+ TraceConservativelyCallback) override;
+ // StackMarker interface.
void VisitPointer(const void*) override;
private:
@@ -51,12 +60,11 @@ class MarkingVisitor : public VisitorBase, public StackVisitor {
bool MarkHeaderNoTracing(HeapObjectHeader*);
void RegisterWeakCallback(WeakCallback, const void*) override;
- Marker* const marker_;
Marker::MarkingWorklist::View marking_worklist_;
Marker::NotFullyConstructedWorklist::View not_fully_constructed_worklist_;
Marker::WeakCallbackWorklist::View weak_callback_worklist_;
- size_t marked_bytes_;
+ size_t marked_bytes_ = 0;
};
class V8_EXPORT_PRIVATE MutatorThreadMarkingVisitor : public MarkingVisitor {
diff --git a/chromium/v8/src/heap/cppgc/object-allocator-inl.h b/chromium/v8/src/heap/cppgc/object-allocator-inl.h
index 7d8d126d633..b75c296f51a 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator-inl.h
+++ b/chromium/v8/src/heap/cppgc/object-allocator-inl.h
@@ -10,7 +10,7 @@
#include "src/base/logging.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/object-start-bitmap-inl.h"
#include "src/heap/cppgc/object-start-bitmap.h"
@@ -20,6 +20,7 @@ namespace cppgc {
namespace internal {
void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
+ DCHECK(is_allocation_allowed());
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
const RawHeap::RegularSpaceType type =
@@ -30,6 +31,7 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo,
CustomSpaceIndex space_index) {
+ DCHECK(is_allocation_allowed());
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
return AllocateObjectOnSpace(
diff --git a/chromium/v8/src/heap/cppgc/object-allocator.cc b/chromium/v8/src/heap/cppgc/object-allocator.cc
index df83d8ee9d3..b8203a1d8a2 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator.cc
+++ b/chromium/v8/src/heap/cppgc/object-allocator.cc
@@ -4,36 +4,119 @@
#include "src/heap/cppgc/object-allocator.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/heap/cppgc/free-list.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/object-allocator-inl.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/sweeper.h"
namespace cppgc {
namespace internal {
namespace {
-void* AllocateLargeObject(RawHeap* raw_heap, LargePageSpace* space, size_t size,
+void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ DCHECK_LT(begin, end);
+
+ static constexpr auto kEntrySize = AgeTable::kEntrySizeInBytes;
+
+ const uintptr_t offset_begin = CagedHeap::OffsetFromAddress(begin);
+ const uintptr_t offset_end = CagedHeap::OffsetFromAddress(end);
+
+ const uintptr_t young_offset_begin = (begin == page->PayloadStart())
+ ? RoundDown(offset_begin, kEntrySize)
+ : RoundUp(offset_begin, kEntrySize);
+ const uintptr_t young_offset_end = (end == page->PayloadEnd())
+ ? RoundUp(offset_end, kEntrySize)
+ : RoundDown(offset_end, kEntrySize);
+
+ auto& age_table = page->heap()->caged_heap().local_data().age_table;
+ for (auto offset = young_offset_begin; offset < young_offset_end;
+ offset += AgeTable::kEntrySizeInBytes) {
+ age_table[offset] = AgeTable::Age::kYoung;
+ }
+
+ // Set to kUnknown the first and the last regions of the newly allocated
+ // linear buffer.
+ if (begin != page->PayloadStart() && !IsAligned(offset_begin, kEntrySize))
+ age_table[offset_begin] = AgeTable::Age::kUnknown;
+ if (end != page->PayloadEnd() && !IsAligned(offset_end, kEntrySize))
+ age_table[offset_end] = AgeTable::Age::kUnknown;
+#endif
+}
+
+void AddToFreeList(NormalPageSpace* space, Address start, size_t size) {
+ auto& free_list = space->free_list();
+ free_list.Add({start, size});
+ NormalPage::From(BasePage::FromPayload(start))
+ ->object_start_bitmap()
+ .SetBit(start);
+}
+
+void ReplaceLinearAllocationBuffer(NormalPageSpace* space,
+ StatsCollector* stats_collector,
+ Address new_buffer, size_t new_size) {
+ DCHECK_NOT_NULL(space);
+ DCHECK_NOT_NULL(stats_collector);
+
+ auto& lab = space->linear_allocation_buffer();
+ if (lab.size()) {
+ AddToFreeList(space, lab.start(), lab.size());
+ stats_collector->NotifyExplicitFree(lab.size());
+ }
+
+ lab.Set(new_buffer, new_size);
+ if (new_size) {
+ DCHECK_NOT_NULL(new_buffer);
+ stats_collector->NotifyAllocation(new_size);
+ auto* page = NormalPage::From(BasePage::FromPayload(new_buffer));
+ page->object_start_bitmap().ClearBit(new_buffer);
+ MarkRangeAsYoung(page, new_buffer, new_buffer + new_size);
+ }
+}
+
+void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
+ StatsCollector* stats_collector, size_t size,
GCInfoIndex gcinfo) {
- LargePage* page = LargePage::Create(space, size);
+ LargePage* page = LargePage::Create(page_backend, space, size);
+ space->AddPage(page);
+
auto* header = new (page->ObjectHeader())
HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo);
+ stats_collector->NotifyAllocation(size);
+ MarkRangeAsYoung(page, page->PayloadStart(), page->PayloadEnd());
+
return header->Payload();
}
} // namespace
-ObjectAllocator::ObjectAllocator(RawHeap* heap) : raw_heap_(heap) {}
+ObjectAllocator::ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
+ StatsCollector* stats_collector)
+ : raw_heap_(heap),
+ page_backend_(page_backend),
+ stats_collector_(stats_collector) {}
void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
GCInfoIndex gcinfo) {
+ void* memory = OutOfLineAllocateImpl(space, size, gcinfo);
+ stats_collector_->NotifySafePointForConservativeCollection();
+ return memory;
+}
+
+void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace* space,
+ size_t size, GCInfoIndex gcinfo) {
DCHECK_EQ(0, size & kAllocationMask);
DCHECK_LE(kFreeListEntrySize, size);
@@ -41,7 +124,8 @@ void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
if (size >= kLargeObjectSizeThreshold) {
auto* large_space = LargePageSpace::From(
raw_heap_->Space(RawHeap::RegularSpaceType::kLarge));
- return AllocateLargeObject(raw_heap_, large_space, size, gcinfo);
+ return AllocateLargeObject(page_backend_, large_space, stats_collector_,
+ size, gcinfo);
}
// 2. Try to allocate from the freelist.
@@ -57,11 +141,17 @@ void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace* space, size_t size,
raw_heap_->heap()->sweeper().Finish();
// 5. Add a new page to this heap.
- NormalPage::Create(space);
+ auto* new_page = NormalPage::Create(page_backend_, space);
+ space->AddPage(new_page);
+
+ // 6. Set linear allocation buffer to new page.
+ ReplaceLinearAllocationBuffer(space, stats_collector_,
+ new_page->PayloadStart(),
+ new_page->PayloadSize());
- // 6. Try to allocate from the freelist. This allocation must succeed.
- void* result = AllocateFromFreeList(space, size, gcinfo);
- CPPGC_CHECK(result);
+ // 7. Allocate from it. The allocation must succeed.
+ void* result = AllocateObjectOnSpace(space, size, gcinfo);
+ CHECK(result);
return result;
}
@@ -71,17 +161,40 @@ void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace* space, size_t size,
const FreeList::Block entry = space->free_list().Allocate(size);
if (!entry.address) return nullptr;
- auto& current_lab = space->linear_allocation_buffer();
- if (current_lab.size()) {
- space->AddToFreeList(current_lab.start(), current_lab.size());
- }
+ ReplaceLinearAllocationBuffer(
+ space, stats_collector_, static_cast<Address>(entry.address), entry.size);
- current_lab.Set(static_cast<Address>(entry.address), entry.size);
- NormalPage::From(BasePage::FromPayload(current_lab.start()))
- ->object_start_bitmap()
- .ClearBit(current_lab.start());
return AllocateObjectOnSpace(space, size, gcinfo);
}
+void ObjectAllocator::ResetLinearAllocationBuffers() {
+ class Resetter : public HeapVisitor<Resetter> {
+ public:
+ explicit Resetter(StatsCollector* stats) : stats_collector_(stats) {}
+
+ bool VisitLargePageSpace(LargePageSpace*) { return true; }
+
+ bool VisitNormalPageSpace(NormalPageSpace* space) {
+ ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
+ return true;
+ }
+
+ private:
+ StatsCollector* stats_collector_;
+ } visitor(stats_collector_);
+
+ visitor.Traverse(raw_heap_);
+}
+
+ObjectAllocator::NoAllocationScope::NoAllocationScope(
+ ObjectAllocator& allocator)
+ : allocator_(allocator) {
+ allocator.no_allocation_scope_++;
+}
+
+ObjectAllocator::NoAllocationScope::~NoAllocationScope() {
+ allocator_.no_allocation_scope_--;
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/object-allocator.h b/chromium/v8/src/heap/cppgc/object-allocator.h
index 510a935f565..1536ed63730 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator.h
+++ b/chromium/v8/src/heap/cppgc/object-allocator.h
@@ -5,33 +5,70 @@
#ifndef V8_HEAP_CPPGC_OBJECT_ALLOCATOR_H_
#define V8_HEAP_CPPGC_OBJECT_ALLOCATOR_H_
+#include "include/cppgc/allocation.h"
#include "include/cppgc/internal/gc-info.h"
+#include "include/cppgc/macros.h"
#include "src/heap/cppgc/heap-space.h"
#include "src/heap/cppgc/raw-heap.h"
namespace cppgc {
+
+class V8_EXPORT AllocationHandle {
+ private:
+ AllocationHandle() = default;
+ friend class internal::ObjectAllocator;
+};
+
namespace internal {
-class V8_EXPORT_PRIVATE ObjectAllocator final {
+class StatsCollector;
+class PageBackend;
+
+class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
public:
- explicit ObjectAllocator(RawHeap* heap);
+ // NoAllocationScope is used in debug mode to catch unwanted allocations. E.g.
+ // allocations during GC.
+ class V8_EXPORT_PRIVATE NoAllocationScope final {
+ CPPGC_STACK_ALLOCATED();
+
+ public:
+ explicit NoAllocationScope(ObjectAllocator&);
+ ~NoAllocationScope();
+
+ NoAllocationScope(const NoAllocationScope&) = delete;
+ NoAllocationScope& operator=(const NoAllocationScope&) = delete;
+
+ private:
+ ObjectAllocator& allocator_;
+ };
+
+ ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
+ StatsCollector* stats_collector);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo,
CustomSpaceIndex space_index);
+ void ResetLinearAllocationBuffers();
+
private:
// Returns the initially tried SpaceType to allocate an object of |size| bytes
// on. Returns the largest regular object size bucket for large objects.
inline static RawHeap::RegularSpaceType GetInitialSpaceIndexForSize(
size_t size);
+ bool is_allocation_allowed() const { return no_allocation_scope_ == 0; }
+
inline void* AllocateObjectOnSpace(NormalPageSpace* space, size_t size,
GCInfoIndex gcinfo);
void* OutOfLineAllocate(NormalPageSpace*, size_t, GCInfoIndex);
+ void* OutOfLineAllocateImpl(NormalPageSpace*, size_t, GCInfoIndex);
void* AllocateFromFreeList(NormalPageSpace*, size_t, GCInfoIndex);
RawHeap* raw_heap_;
+ PageBackend* page_backend_;
+ StatsCollector* stats_collector_;
+ size_t no_allocation_scope_ = 0;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h b/chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h
index 93243979aac..6d963cc9486 100644
--- a/chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h
+++ b/chromium/v8/src/heap/cppgc/object-start-bitmap-inl.h
@@ -19,6 +19,7 @@ ObjectStartBitmap::ObjectStartBitmap(Address offset) : offset_(offset) {
HeapObjectHeader* ObjectStartBitmap::FindHeader(
ConstAddress address_maybe_pointing_to_the_middle_of_object) const {
+ DCHECK_LE(offset_, address_maybe_pointing_to_the_middle_of_object);
size_t object_offset =
address_maybe_pointing_to_the_middle_of_object - offset_;
size_t object_start_number = object_offset / kAllocationGranularity;
diff --git a/chromium/v8/src/heap/cppgc/page-memory-inl.h b/chromium/v8/src/heap/cppgc/page-memory-inl.h
index 23ce061b435..8b2022eeb26 100644
--- a/chromium/v8/src/heap/cppgc/page-memory-inl.h
+++ b/chromium/v8/src/heap/cppgc/page-memory-inl.h
@@ -16,19 +16,19 @@ inline bool SupportsCommittingGuardPages(PageAllocator* allocator) {
return kGuardPageSize % allocator->CommitPageSize() == 0;
}
-Address NormalPageMemoryRegion::Lookup(Address address) const {
+Address NormalPageMemoryRegion::Lookup(ConstAddress address) const {
size_t index = GetIndex(address);
if (!page_memories_in_use_[index]) return nullptr;
const MemoryRegion writeable_region = GetPageMemory(index).writeable_region();
return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
}
-Address LargePageMemoryRegion::Lookup(Address address) const {
+Address LargePageMemoryRegion::Lookup(ConstAddress address) const {
const MemoryRegion writeable_region = GetPageMemory().writeable_region();
return writeable_region.Contains(address) ? writeable_region.base() : nullptr;
}
-Address PageMemoryRegion::Lookup(Address address) const {
+Address PageMemoryRegion::Lookup(ConstAddress address) const {
DCHECK(reserved_region().Contains(address));
return is_large()
? static_cast<const LargePageMemoryRegion*>(this)->Lookup(address)
@@ -36,7 +36,7 @@ Address PageMemoryRegion::Lookup(Address address) const {
address);
}
-PageMemoryRegion* PageMemoryRegionTree::Lookup(Address address) const {
+PageMemoryRegion* PageMemoryRegionTree::Lookup(ConstAddress address) const {
auto it = set_.upper_bound(address);
// This check also covers set_.size() > 0, since for empty vectors it is
// guaranteed that begin() == end().
@@ -46,7 +46,7 @@ PageMemoryRegion* PageMemoryRegionTree::Lookup(Address address) const {
return nullptr;
}
-Address PageBackend::Lookup(Address address) const {
+Address PageBackend::Lookup(ConstAddress address) const {
PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(address);
return pmr ? pmr->Lookup(address) : nullptr;
}
diff --git a/chromium/v8/src/heap/cppgc/page-memory.h b/chromium/v8/src/heap/cppgc/page-memory.h
index f3bc685fa31..b7f1917be7f 100644
--- a/chromium/v8/src/heap/cppgc/page-memory.h
+++ b/chromium/v8/src/heap/cppgc/page-memory.h
@@ -30,7 +30,7 @@ class V8_EXPORT_PRIVATE MemoryRegion final {
size_t size() const { return size_; }
Address end() const { return base_ + size_; }
- bool Contains(Address addr) const {
+ bool Contains(ConstAddress addr) const {
return (reinterpret_cast<uintptr_t>(addr) -
reinterpret_cast<uintptr_t>(base_)) < size_;
}
@@ -70,7 +70,7 @@ class V8_EXPORT_PRIVATE PageMemoryRegion {
// Lookup writeable base for an |address| that's contained in
// PageMemoryRegion. Filters out addresses that are contained in non-writeable
// regions (e.g. guard pages).
- inline Address Lookup(Address address) const;
+ inline Address Lookup(ConstAddress address) const;
// Disallow copy/move.
PageMemoryRegion(const PageMemoryRegion&) = delete;
@@ -111,7 +111,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
// protection.
void Free(Address);
- inline Address Lookup(Address) const;
+ inline Address Lookup(ConstAddress) const;
void UnprotectForTesting() final;
@@ -122,7 +122,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
page_memories_in_use_[index] = value;
}
- size_t GetIndex(Address address) const {
+ size_t GetIndex(ConstAddress address) const {
return static_cast<size_t>(address - reserved_region().base()) >>
kPageSizeLog2;
}
@@ -143,7 +143,7 @@ class V8_EXPORT_PRIVATE LargePageMemoryRegion final : public PageMemoryRegion {
reserved_region().size() - 2 * kGuardPageSize));
}
- inline Address Lookup(Address) const;
+ inline Address Lookup(ConstAddress) const;
void UnprotectForTesting() final;
};
@@ -161,10 +161,10 @@ class V8_EXPORT_PRIVATE PageMemoryRegionTree final {
void Add(PageMemoryRegion*);
void Remove(PageMemoryRegion*);
- inline PageMemoryRegion* Lookup(Address) const;
+ inline PageMemoryRegion* Lookup(ConstAddress) const;
private:
- std::map<Address, PageMemoryRegion*> set_;
+ std::map<ConstAddress, PageMemoryRegion*> set_;
};
// A pool of PageMemory objects represented by the writeable base addresses.
@@ -216,7 +216,7 @@ class V8_EXPORT_PRIVATE PageBackend final {
// Returns the writeable base if |address| is contained in a valid page
// memory.
- inline Address Lookup(Address) const;
+ inline Address Lookup(ConstAddress) const;
// Disallow copy/move.
PageBackend(const PageBackend&) = delete;
diff --git a/chromium/v8/src/heap/cppgc/persistent-node.cc b/chromium/v8/src/heap/cppgc/persistent-node.cc
index 299cefc5210..9c5113f86a2 100644
--- a/chromium/v8/src/heap/cppgc/persistent-node.cc
+++ b/chromium/v8/src/heap/cppgc/persistent-node.cc
@@ -7,9 +7,21 @@
#include <algorithm>
#include <numeric>
+#include "include/cppgc/persistent.h"
+
namespace cppgc {
namespace internal {
+PersistentRegion::~PersistentRegion() {
+ for (auto& slots : nodes_) {
+ for (auto& node : *slots) {
+ if (node.IsUsed()) {
+ static_cast<PersistentBase*>(node.owner())->ClearFromGC();
+ }
+ }
+ }
+}
+
size_t PersistentRegion::NodesInUse() const {
return std::accumulate(
nodes_.cbegin(), nodes_.cend(), 0u, [](size_t acc, const auto& slots) {
diff --git a/chromium/v8/src/heap/cppgc/platform.cc b/chromium/v8/src/heap/cppgc/platform.cc
index 3b20060392d..e96d69b2257 100644
--- a/chromium/v8/src/heap/cppgc/platform.cc
+++ b/chromium/v8/src/heap/cppgc/platform.cc
@@ -8,18 +8,12 @@
#include "src/heap/cppgc/gc-info-table.h"
namespace cppgc {
-namespace internal {
-
-static PageAllocator* g_page_allocator;
-
-} // namespace internal
-void InitializePlatform(PageAllocator* page_allocator) {
- internal::g_page_allocator = page_allocator;
+void InitializeProcess(PageAllocator* page_allocator) {
internal::GlobalGCInfoTable::Create(page_allocator);
}
-void ShutdownPlatform() { internal::g_page_allocator = nullptr; }
+void ShutdownProcess() {}
namespace internal {
diff --git a/chromium/v8/src/heap/cppgc/pointer-policies.cc b/chromium/v8/src/heap/cppgc/pointer-policies.cc
index e9dfcecdf3e..5048d1bd59f 100644
--- a/chromium/v8/src/heap/cppgc/pointer-policies.cc
+++ b/chromium/v8/src/heap/cppgc/pointer-policies.cc
@@ -3,10 +3,10 @@
// found in the LICENSE file.
#include "include/cppgc/internal/pointer-policies.h"
-#include "include/cppgc/internal/persistent-node.h"
+#include "include/cppgc/internal/persistent-node.h"
#include "src/base/macros.h"
-#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/heap.h"
namespace cppgc {
diff --git a/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc b/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc
index 40107c15262..c28cedfbab9 100644
--- a/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc
+++ b/chromium/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -8,6 +8,7 @@
#include <memory>
#include "src/base/platform/platform.h"
+#include "src/heap/cppgc/heap-page-inl.h"
#include "src/heap/cppgc/heap.h"
namespace cppgc {
@@ -15,14 +16,16 @@ namespace internal {
// static
void PreFinalizerRegistrationDispatcher::RegisterPrefinalizer(
- cppgc::Heap* heap, PreFinalizer prefinalzier) {
- internal::Heap::From(heap)->prefinalizer_handler()->RegisterPrefinalizer(
- prefinalzier);
+ PreFinalizer pre_finalizer) {
+ BasePage::FromPayload(pre_finalizer.object)
+ ->heap()
+ ->prefinalizer_handler()
+ ->RegisterPrefinalizer(pre_finalizer);
}
bool PreFinalizerRegistrationDispatcher::PreFinalizer::operator==(
const PreFinalizer& other) {
- return (object_ == other.object_) && (callback_ == other.callback_);
+ return (object == other.object) && (callback == other.callback);
}
PreFinalizerHandler::PreFinalizerHandler()
@@ -32,12 +35,12 @@ PreFinalizerHandler::PreFinalizerHandler()
{
}
-void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer prefinalizer) {
+void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer pre_finalizer) {
DCHECK(CurrentThreadIsCreationThread());
DCHECK_EQ(ordered_pre_finalizers_.end(),
std::find(ordered_pre_finalizers_.begin(),
- ordered_pre_finalizers_.end(), prefinalizer));
- ordered_pre_finalizers_.push_back(prefinalizer);
+ ordered_pre_finalizers_.end(), pre_finalizer));
+ ordered_pre_finalizers_.push_back(pre_finalizer);
}
void PreFinalizerHandler::InvokePreFinalizers() {
@@ -48,7 +51,7 @@ void PreFinalizerHandler::InvokePreFinalizers() {
std::remove_if(ordered_pre_finalizers_.rbegin(),
ordered_pre_finalizers_.rend(),
[liveness_broker](const PreFinalizer& pf) {
- return (pf.callback_)(liveness_broker, pf.object_);
+ return (pf.callback)(liveness_broker, pf.object);
})
.base());
ordered_pre_finalizers_.shrink_to_fit();
diff --git a/chromium/v8/src/heap/cppgc/prefinalizer-handler.h b/chromium/v8/src/heap/cppgc/prefinalizer-handler.h
index a6255534710..15d24e862cf 100644
--- a/chromium/v8/src/heap/cppgc/prefinalizer-handler.h
+++ b/chromium/v8/src/heap/cppgc/prefinalizer-handler.h
@@ -19,7 +19,7 @@ class PreFinalizerHandler final {
PreFinalizerHandler();
- void RegisterPrefinalizer(PreFinalizer prefinalzier);
+ void RegisterPrefinalizer(PreFinalizer pre_finalizer);
void InvokePreFinalizers();
diff --git a/chromium/v8/src/heap/cppgc/process-heap.cc b/chromium/v8/src/heap/cppgc/process-heap.cc
new file mode 100644
index 00000000000..14089883967
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/process-heap.cc
@@ -0,0 +1,13 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/process-heap.h"
+
+namespace cppgc {
+namespace internal {
+
+AtomicEntryFlag ProcessHeap::concurrent_marking_flag_;
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/raw-heap.cc b/chromium/v8/src/heap/cppgc/raw-heap.cc
index cf7311b46f2..19200ae8a20 100644
--- a/chromium/v8/src/heap/cppgc/raw-heap.cc
+++ b/chromium/v8/src/heap/cppgc/raw-heap.cc
@@ -12,7 +12,7 @@ namespace internal {
// static
constexpr size_t RawHeap::kNumberOfRegularSpaces;
-RawHeap::RawHeap(Heap* heap, size_t custom_spaces) : main_heap_(heap) {
+RawHeap::RawHeap(HeapBase* heap, size_t custom_spaces) : main_heap_(heap) {
size_t i = 0;
for (; i < static_cast<size_t>(RegularSpaceType::kLarge); ++i) {
spaces_.push_back(std::make_unique<NormalPageSpace>(this, i));
diff --git a/chromium/v8/src/heap/cppgc/raw-heap.h b/chromium/v8/src/heap/cppgc/raw-heap.h
index 0591fa87ab7..e63fc32c439 100644
--- a/chromium/v8/src/heap/cppgc/raw-heap.h
+++ b/chromium/v8/src/heap/cppgc/raw-heap.h
@@ -16,7 +16,7 @@
namespace cppgc {
namespace internal {
-class Heap;
+class HeapBase;
class BaseSpace;
// RawHeap is responsible for space management.
@@ -47,7 +47,7 @@ class V8_EXPORT_PRIVATE RawHeap final {
using iterator = Spaces::iterator;
using const_iterator = Spaces::const_iterator;
- explicit RawHeap(Heap* heap, size_t custom_spaces);
+ explicit RawHeap(HeapBase* heap, size_t custom_spaces);
~RawHeap();
// Space iteration support.
@@ -77,8 +77,8 @@ class V8_EXPORT_PRIVATE RawHeap final {
return const_cast<RawHeap&>(*this).CustomSpace(space_index);
}
- Heap* heap() { return main_heap_; }
- const Heap* heap() const { return main_heap_; }
+ HeapBase* heap() { return main_heap_; }
+ const HeapBase* heap() const { return main_heap_; }
private:
size_t SpaceIndexForCustomSpace(CustomSpaceIndex space_index) const {
@@ -96,7 +96,7 @@ class V8_EXPORT_PRIVATE RawHeap final {
return const_cast<RawHeap&>(*this).Space(space_index);
}
- Heap* main_heap_;
+ HeapBase* main_heap_;
Spaces spaces_;
};
diff --git a/chromium/v8/src/heap/cppgc/stack.cc b/chromium/v8/src/heap/cppgc/stack.cc
deleted file mode 100644
index b99693708c6..00000000000
--- a/chromium/v8/src/heap/cppgc/stack.cc
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/cppgc/stack.h"
-
-#include <limits>
-
-#include "src/base/platform/platform.h"
-#include "src/heap/cppgc/globals.h"
-#include "src/heap/cppgc/sanitizers.h"
-
-namespace cppgc {
-namespace internal {
-
-using IterateStackCallback = void (*)(const Stack*, StackVisitor*, intptr_t*);
-extern "C" void PushAllRegistersAndIterateStack(const Stack*, StackVisitor*,
- IterateStackCallback);
-
-Stack::Stack(const void* stack_start) : stack_start_(stack_start) {}
-
-bool Stack::IsOnStack(void* slot) const {
- void* raw_slot = v8::base::Stack::GetStackSlot(slot);
- return v8::base::Stack::GetCurrentStackPosition() <= raw_slot &&
- raw_slot <= stack_start_;
-}
-
-namespace {
-
-#ifdef V8_USE_ADDRESS_SANITIZER
-
-// No ASAN support as accessing fake frames otherwise results in
-// "stack-use-after-scope" warnings.
-NO_SANITIZE_ADDRESS
-void IterateAsanFakeFrameIfNecessary(StackVisitor* visitor,
- void* asan_fake_stack,
- const void* stack_start,
- const void* stack_end, void* address) {
- // When using ASAN fake stack a pointer to the fake frame is kept on the
- // native frame. In case |addr| points to a fake frame of the current stack
- // iterate the fake frame. Frame layout see
- // https://github.com/google/sanitizers/wiki/AddressSanitizerUseAfterReturn
- if (asan_fake_stack) {
- void* fake_frame_begin;
- void* fake_frame_end;
- void* real_stack_frame = __asan_addr_is_in_fake_stack(
- asan_fake_stack, address, &fake_frame_begin, &fake_frame_end);
- if (real_stack_frame) {
- // |address| points to a fake frame. Check that the fake frame is part
- // of this stack.
- if (stack_start >= real_stack_frame && real_stack_frame >= stack_end) {
- // Iterate the fake frame.
- for (void** current = reinterpret_cast<void**>(fake_frame_begin);
- current < fake_frame_end; ++current) {
- void* addr = *current;
- if (addr == nullptr) continue;
- visitor->VisitPointer(addr);
- }
- }
- }
- }
-}
-
-#endif // V8_USE_ADDRESS_SANITIZER
-
-void IterateSafeStackIfNecessary(StackVisitor* visitor) {
-#if defined(__has_feature)
-#if __has_feature(safe_stack)
- // Source:
- // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/safestack/safestack.cpp
- constexpr size_t kSafeStackAlignmentBytes = 16;
- void* stack_end = __builtin___get_unsafe_stack_ptr();
- void* stack_start = __builtin___get_unsafe_stack_top();
- CHECK_GT(stack_start, stack_end);
- CHECK_EQ(0u, reinterpret_cast<uintptr_t>(stack_end) &
- (kSafeStackAlignmentBytes - 1));
- CHECK_EQ(0u, reinterpret_cast<uintptr_t>(stack_start) &
- (kSafeStackAlignmentBytes - 1));
- void** current = reinterpret_cast<void**>(stack_end);
- for (; current < stack_start; ++current) {
- void* address = *current;
- if (address == nullptr) continue;
- visitor->VisitPointer(address);
- }
-#endif // __has_feature(safe_stack)
-#endif // defined(__has_feature)
-}
-
-// Called by the trampoline that pushes registers on the stack. This method
-// should never be inlined to ensure that a possible redzone cannot contain
-// any data that needs to be scanned.
-V8_NOINLINE
-// No ASAN support as method accesses redzones while walking the stack.
-NO_SANITIZE_ADDRESS
-void IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
- intptr_t* stack_end) {
-#ifdef V8_USE_ADDRESS_SANITIZER
- void* asan_fake_stack = __asan_get_current_fake_stack();
-#endif // V8_USE_ADDRESS_SANITIZER
- // All supported platforms should have their stack aligned to at least
- // sizeof(void*).
- constexpr size_t kMinStackAlignment = sizeof(void*);
- void** current = reinterpret_cast<void**>(stack_end);
- CHECK_EQ(0u, reinterpret_cast<uintptr_t>(current) & (kMinStackAlignment - 1));
- for (; current < stack->stack_start(); ++current) {
- // MSAN: Instead of unpoisoning the whole stack, the slot's value is copied
- // into a local which is unpoisoned.
- void* address = *current;
- MSAN_UNPOISON(&address, sizeof(address));
- if (address == nullptr) continue;
- visitor->VisitPointer(address);
-#ifdef V8_USE_ADDRESS_SANITIZER
- IterateAsanFakeFrameIfNecessary(visitor, asan_fake_stack,
- stack->stack_start(), stack_end, address);
-#endif // V8_USE_ADDRESS_SANITIZER
- }
-}
-
-} // namespace
-
-void Stack::IteratePointers(StackVisitor* visitor) const {
- PushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
- // No need to deal with callee-saved registers as they will be kept alive by
- // the regular conservative stack iteration.
- IterateSafeStackIfNecessary(visitor);
-}
-
-} // namespace internal
-} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/stack.h b/chromium/v8/src/heap/cppgc/stack.h
deleted file mode 100644
index 3f561aed08e..00000000000
--- a/chromium/v8/src/heap/cppgc/stack.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_CPPGC_STACK_H_
-#define V8_HEAP_CPPGC_STACK_H_
-
-#include "src/base/macros.h"
-
-namespace cppgc {
-namespace internal {
-
-class StackVisitor {
- public:
- virtual void VisitPointer(const void* address) = 0;
-};
-
-// Abstraction over the stack. Supports handling of:
-// - native stack;
-// - ASAN/MSAN;
-// - SafeStack: https://releases.llvm.org/10.0.0/tools/clang/docs/SafeStack.html
-class V8_EXPORT_PRIVATE Stack final {
- public:
- explicit Stack(const void* stack_start);
-
- // Returns true if |slot| is part of the stack and false otherwise.
- bool IsOnStack(void* slot) const;
-
- // Word-aligned iteration of the stack. Slot values are passed on to
- // |visitor|.
- void IteratePointers(StackVisitor* visitor) const;
-
- // Returns the start of the stack.
- const void* stack_start() const { return stack_start_; }
-
- private:
- const void* stack_start_;
-};
-
-} // namespace internal
-} // namespace cppgc
-
-#endif // V8_HEAP_CPPGC_STACK_H_
diff --git a/chromium/v8/src/heap/cppgc/stats-collector.cc b/chromium/v8/src/heap/cppgc/stats-collector.cc
new file mode 100644
index 00000000000..a92aba021d7
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/stats-collector.cc
@@ -0,0 +1,114 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/stats-collector.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "src/base/logging.h"
+
+namespace cppgc {
+namespace internal {
+
+// static
+constexpr size_t StatsCollector::kAllocationThresholdBytes;
+
+void StatsCollector::RegisterObserver(AllocationObserver* observer) {
+ DCHECK_EQ(allocation_observers_.end(),
+ std::find(allocation_observers_.begin(),
+ allocation_observers_.end(), observer));
+ allocation_observers_.push_back(observer);
+}
+
+void StatsCollector::UnregisterObserver(AllocationObserver* observer) {
+ auto it = std::find(allocation_observers_.begin(),
+ allocation_observers_.end(), observer);
+ DCHECK_NE(allocation_observers_.end(), it);
+ allocation_observers_.erase(it);
+}
+
+void StatsCollector::NotifyAllocation(size_t bytes) {
+ // The current GC may not have been started. This is ok as recording considers
+ // the whole time range between garbage collections.
+ allocated_bytes_since_safepoint_ += bytes;
+}
+
+void StatsCollector::NotifyExplicitFree(size_t bytes) {
+ // See IncreaseAllocatedObjectSize for lifetime of the counter.
+ explicitly_freed_bytes_since_safepoint_ += bytes;
+}
+
+void StatsCollector::NotifySafePointForConservativeCollection() {
+ if (std::abs(allocated_bytes_since_safepoint_ -
+ explicitly_freed_bytes_since_safepoint_) >=
+ static_cast<int64_t>(kAllocationThresholdBytes)) {
+ AllocatedObjectSizeSafepointImpl();
+ }
+}
+
+void StatsCollector::AllocatedObjectSizeSafepointImpl() {
+ allocated_bytes_since_end_of_marking_ +=
+ static_cast<int64_t>(allocated_bytes_since_safepoint_) -
+ static_cast<int64_t>(explicitly_freed_bytes_since_safepoint_);
+
+ // These observer methods may start or finalize GC. In case they trigger a
+ // final GC pause, the delta counters are reset there and the following
+ // observer calls are called with '0' updates.
+ ForAllAllocationObservers([this](AllocationObserver* observer) {
+ // Recompute delta here so that a GC finalization is able to clear the
+ // delta for other observer calls.
+ int64_t delta = allocated_bytes_since_safepoint_ -
+ explicitly_freed_bytes_since_safepoint_;
+ if (delta < 0) {
+ observer->AllocatedObjectSizeDecreased(static_cast<size_t>(-delta));
+ } else {
+ observer->AllocatedObjectSizeIncreased(static_cast<size_t>(delta));
+ }
+ });
+ allocated_bytes_since_safepoint_ = 0;
+ explicitly_freed_bytes_since_safepoint_ = 0;
+}
+
+void StatsCollector::NotifyMarkingStarted() {
+ DCHECK_EQ(GarbageCollectionState::kNotRunning, gc_state_);
+ gc_state_ = GarbageCollectionState::kMarking;
+}
+
+void StatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
+ DCHECK_EQ(GarbageCollectionState::kMarking, gc_state_);
+ gc_state_ = GarbageCollectionState::kSweeping;
+ current_.marked_bytes = marked_bytes;
+ allocated_bytes_since_end_of_marking_ = 0;
+ allocated_bytes_since_safepoint_ = 0;
+ explicitly_freed_bytes_since_safepoint_ = 0;
+
+ ForAllAllocationObservers([marked_bytes](AllocationObserver* observer) {
+ observer->ResetAllocatedObjectSize(marked_bytes);
+ });
+}
+
+const StatsCollector::Event& StatsCollector::NotifySweepingCompleted() {
+ DCHECK_EQ(GarbageCollectionState::kSweeping, gc_state_);
+ gc_state_ = GarbageCollectionState::kNotRunning;
+ previous_ = std::move(current_);
+ current_ = Event();
+ return previous_;
+}
+
+size_t StatsCollector::allocated_object_size() const {
+ // During sweeping we refer to the current Event as that already holds the
+ // correct marking information. In all other phases, the previous event holds
+ // the most up-to-date marking information.
+ const Event& event =
+ gc_state_ == GarbageCollectionState::kSweeping ? current_ : previous_;
+ DCHECK_GE(static_cast<int64_t>(event.marked_bytes) +
+ allocated_bytes_since_end_of_marking_,
+ 0);
+ return static_cast<size_t>(static_cast<int64_t>(event.marked_bytes) +
+ allocated_bytes_since_end_of_marking_);
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/stats-collector.h b/chromium/v8/src/heap/cppgc/stats-collector.h
new file mode 100644
index 00000000000..cc122a17dd5
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/stats-collector.h
@@ -0,0 +1,130 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_STATS_COLLECTOR_H_
+#define V8_HEAP_CPPGC_STATS_COLLECTOR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+// Sink for various time and memory statistics.
+class V8_EXPORT_PRIVATE StatsCollector final {
+ public:
+ // POD to hold interesting data accumulated during a garbage collection cycle.
+ //
+ // The event is always fully populated when looking at previous events but
+ // may only be partially populated when looking at the current event.
+ struct Event final {
+ // Marked bytes collected during marking.
+ size_t marked_bytes = 0;
+ };
+
+ // Observer for allocated object size. May be used to implement heap growing
+ // heuristics.
+ class AllocationObserver {
+ public:
+ // Called after observing at least
+ // StatsCollector::kAllocationThresholdBytes changed bytes through
+ // allocation or explicit free. Reports both, negative and positive
+ // increments, to allow observer to decide whether absolute values or only
+ // the deltas is interesting.
+ //
+ // May trigger GC.
+ virtual void AllocatedObjectSizeIncreased(size_t) = 0;
+ virtual void AllocatedObjectSizeDecreased(size_t) = 0;
+
+ // Called when the exact size of allocated object size is known. In
+ // practice, this is after marking when marked bytes == allocated bytes.
+ //
+ // Must not trigger GC synchronously.
+ virtual void ResetAllocatedObjectSize(size_t) = 0;
+ };
+
+ // Observers are implemented using virtual calls. Avoid notifications below
+ // reasonably interesting sizes.
+ static constexpr size_t kAllocationThresholdBytes = 1024;
+
+ StatsCollector() = default;
+ StatsCollector(const StatsCollector&) = delete;
+ StatsCollector& operator=(const StatsCollector&) = delete;
+
+ void RegisterObserver(AllocationObserver*);
+ void UnregisterObserver(AllocationObserver*);
+
+ void NotifyAllocation(size_t);
+ void NotifyExplicitFree(size_t);
+ // Safepoints should only be invoked when garabge collections are possible.
+ // This is necessary as increments and decrements are reported as close to
+ // their actual allocation/reclamation as possible.
+ void NotifySafePointForConservativeCollection();
+
+ // Indicates a new garbage collection cycle.
+ void NotifyMarkingStarted();
+ // Indicates that marking of the current garbage collection cycle is
+ // completed.
+ void NotifyMarkingCompleted(size_t marked_bytes);
+ // Indicates the end of a garbage collection cycle. This means that sweeping
+ // is finished at this point.
+ const Event& NotifySweepingCompleted();
+
+ // Size of live objects in bytes on the heap. Based on the most recent marked
+ // bytes and the bytes allocated since last marking.
+ size_t allocated_object_size() const;
+
+ private:
+ enum class GarbageCollectionState : uint8_t {
+ kNotRunning,
+ kMarking,
+ kSweeping
+ };
+
+ // Invokes |callback| for all registered observers.
+ template <typename Callback>
+ void ForAllAllocationObservers(Callback callback);
+
+ void AllocatedObjectSizeSafepointImpl();
+
+ // Allocated bytes since the end of marking. These bytes are reset after
+ // marking as they are accounted in marked_bytes then. May be negative in case
+ // an object was explicitly freed that was marked as live in the previous
+ // cycle.
+ int64_t allocated_bytes_since_end_of_marking_ = 0;
+ // Counters for allocation and free. The individual values are never negative
+ // but their delta may be because of the same reason the overall
+ // allocated_bytes_since_end_of_marking_ may be negative. Keep integer
+ // arithmetic for simplicity.
+ int64_t allocated_bytes_since_safepoint_ = 0;
+ int64_t explicitly_freed_bytes_since_safepoint_ = 0;
+
+ // vector to allow fast iteration of observers. Register/Unregisters only
+ // happens on startup/teardown.
+ std::vector<AllocationObserver*> allocation_observers_;
+
+ GarbageCollectionState gc_state_ = GarbageCollectionState::kNotRunning;
+
+ // The event being filled by the current GC cycle between NotifyMarkingStarted
+ // and NotifySweepingFinished.
+ Event current_;
+ // The previous GC event which is populated at NotifySweepingFinished.
+ Event previous_;
+};
+
+template <typename Callback>
+void StatsCollector::ForAllAllocationObservers(Callback callback) {
+ for (AllocationObserver* observer : allocation_observers_) {
+ callback(observer);
+ }
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_STATS_COLLECTOR_H_
diff --git a/chromium/v8/src/heap/cppgc/sweeper.cc b/chromium/v8/src/heap/cppgc/sweeper.cc
index 77d2d3c33e7..98a3117a2d4 100644
--- a/chromium/v8/src/heap/cppgc/sweeper.cc
+++ b/chromium/v8/src/heap/cppgc/sweeper.cc
@@ -4,8 +4,13 @@
#include "src/heap/cppgc/sweeper.h"
+#include <atomic>
+#include <memory>
#include <vector>
+#include "include/cppgc/platform.h"
+#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/free-list.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header-inl.h"
@@ -17,12 +22,16 @@
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sanitizers.h"
+#include "src/heap/cppgc/stats-collector.h"
+#include "src/heap/cppgc/task-handle.h"
namespace cppgc {
namespace internal {
namespace {
+using v8::base::Optional;
+
class ObjectStartBitmapVerifier
: private HeapVisitor<ObjectStartBitmapVerifier> {
friend class HeapVisitor<ObjectStartBitmapVerifier>;
@@ -54,15 +63,126 @@ class ObjectStartBitmapVerifier
HeapObjectHeader* prev_ = nullptr;
};
+template <typename T>
+class ThreadSafeStack {
+ public:
+ ThreadSafeStack() = default;
+
+ void Push(T t) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ vector_.push_back(std::move(t));
+ }
+
+ Optional<T> Pop() {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ if (vector_.empty()) return v8::base::nullopt;
+ T top = std::move(vector_.back());
+ vector_.pop_back();
+ // std::move is redundant but is needed to avoid the bug in gcc-7.
+ return std::move(top);
+ }
+
+ template <typename It>
+ void Insert(It begin, It end) {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ vector_.insert(vector_.end(), begin, end);
+ }
+
+ bool IsEmpty() const {
+ v8::base::LockGuard<v8::base::Mutex> lock(&mutex_);
+ return vector_.empty();
+ }
+
+ private:
+ std::vector<T> vector_;
+ mutable v8::base::Mutex mutex_;
+};
+
struct SpaceState {
- BaseSpace::Pages unswept_pages;
+ struct SweptPageState {
+ BasePage* page = nullptr;
+ std::vector<HeapObjectHeader*> unfinalized_objects;
+ FreeList cached_free_list;
+ std::vector<FreeList::Block> unfinalized_free_list;
+ bool is_empty = false;
+ };
+
+ ThreadSafeStack<BasePage*> unswept_pages;
+ ThreadSafeStack<SweptPageState> swept_unfinalized_pages;
};
+
using SpaceStates = std::vector<SpaceState>;
-bool SweepNormalPage(NormalPage* page) {
+void StickyUnmark(HeapObjectHeader* header) {
+ // Young generation in Oilpan uses sticky mark bits.
+#if !defined(CPPGC_YOUNG_GENERATION)
+ header->Unmark<HeapObjectHeader::AccessMode::kAtomic>();
+#endif
+}
+
+// Builder that finalizes objects and adds freelist entries right away.
+class InlinedFinalizationBuilder final {
+ public:
+ using ResultType = bool;
+
+ explicit InlinedFinalizationBuilder(BasePage* page) : page_(page) {}
+
+ void AddFinalizer(HeapObjectHeader* header, size_t size) {
+ header->Finalize();
+ SET_MEMORY_INACCESIBLE(header, size);
+ }
+
+ void AddFreeListEntry(Address start, size_t size) {
+ auto* space = NormalPageSpace::From(page_->space());
+ space->free_list().Add({start, size});
+ }
+
+ ResultType GetResult(bool is_empty) { return is_empty; }
+
+ private:
+ BasePage* page_;
+};
+
+// Builder that produces results for deferred processing.
+class DeferredFinalizationBuilder final {
+ public:
+ using ResultType = SpaceState::SweptPageState;
+
+ explicit DeferredFinalizationBuilder(BasePage* page) { result_.page = page; }
+
+ void AddFinalizer(HeapObjectHeader* header, size_t size) {
+ if (header->IsFinalizable()) {
+ result_.unfinalized_objects.push_back({header});
+ found_finalizer_ = true;
+ } else {
+ SET_MEMORY_INACCESIBLE(header, size);
+ }
+ }
+
+ void AddFreeListEntry(Address start, size_t size) {
+ if (found_finalizer_) {
+ result_.unfinalized_free_list.push_back({start, size});
+ } else {
+ result_.cached_free_list.Add({start, size});
+ }
+ found_finalizer_ = false;
+ }
+
+ ResultType&& GetResult(bool is_empty) {
+ result_.is_empty = is_empty;
+ return std::move(result_);
+ }
+
+ private:
+ ResultType result_;
+ bool found_finalizer_ = false;
+};
+
+template <typename FinalizationBuilder>
+typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
constexpr auto kAtomicAccess = HeapObjectHeader::AccessMode::kAtomic;
+ FinalizationBuilder builder(page);
- auto* space = NormalPageSpace::From(page->space());
ObjectStartBitmap& bitmap = page->object_start_bitmap();
bitmap.Clear();
@@ -79,18 +199,18 @@ bool SweepNormalPage(NormalPage* page) {
}
// Check if object is not marked (not reachable).
if (!header->IsMarked<kAtomicAccess>()) {
- header->Finalize();
- SET_MEMORY_INACCESIBLE(header, size);
+ builder.AddFinalizer(header, size);
begin += size;
continue;
}
// The object is alive.
const Address header_address = reinterpret_cast<Address>(header);
if (start_of_gap != header_address) {
- space->AddToFreeList(start_of_gap,
- static_cast<size_t>(header_address - start_of_gap));
+ builder.AddFreeListEntry(
+ start_of_gap, static_cast<size_t>(header_address - start_of_gap));
+ bitmap.SetBit(start_of_gap);
}
- header->Unmark<kAtomicAccess>();
+ StickyUnmark(header);
bitmap.SetBit(begin);
begin += size;
start_of_gap = begin;
@@ -98,56 +218,150 @@ bool SweepNormalPage(NormalPage* page) {
if (start_of_gap != page->PayloadStart() &&
start_of_gap != page->PayloadEnd()) {
- space->AddToFreeList(
+ builder.AddFreeListEntry(
start_of_gap, static_cast<size_t>(page->PayloadEnd() - start_of_gap));
+ bitmap.SetBit(start_of_gap);
}
const bool is_empty = (start_of_gap == page->PayloadStart());
- return is_empty;
+ return builder.GetResult(is_empty);
}
-// This visitor:
-// - resets linear allocation buffers and clears free lists for all spaces;
-// - moves all Heap pages to local Sweeper's state (SpaceStates).
-class PrepareForSweepVisitor final
- : public HeapVisitor<PrepareForSweepVisitor> {
+// SweepFinalizer is responsible for heap/space/page finalization. Finalization
+// is defined as a step following concurrent sweeping which:
+// - calls finalizers;
+// - returns (unmaps) empty pages;
+// - merges freelists to the space's freelist.
+class SweepFinalizer final {
public:
- explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
+ explicit SweepFinalizer(cppgc::Platform* platform) : platform_(platform) {}
- bool VisitNormalPageSpace(NormalPageSpace* space) {
- space->ResetLinearAllocationBuffer();
- space->free_list().Clear();
- (*states_)[space->index()].unswept_pages = space->RemoveAllPages();
- return true;
+ void FinalizeHeap(SpaceStates* space_states) {
+ for (SpaceState& space_state : *space_states) {
+ FinalizeSpace(&space_state);
+ }
}
- bool VisitLargePageSpace(LargePageSpace* space) {
- (*states_)[space->index()].unswept_pages = space->RemoveAllPages();
+ void FinalizeSpace(SpaceState* space_state) {
+ while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
+ FinalizePage(&*page_state);
+ }
+ }
+
+ bool FinalizeSpaceWithDeadline(SpaceState* space_state,
+ double deadline_in_seconds) {
+ DCHECK(platform_);
+ static constexpr size_t kDeadlineCheckInterval = 8;
+ size_t page_count = 1;
+
+ while (auto page_state = space_state->swept_unfinalized_pages.Pop()) {
+ FinalizePage(&*page_state);
+
+ if (page_count % kDeadlineCheckInterval == 0 &&
+ deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
+ return false;
+ }
+
+ page_count++;
+ }
return true;
}
+ void FinalizePage(SpaceState::SweptPageState* page_state) {
+ DCHECK(page_state);
+ DCHECK(page_state->page);
+ BasePage* page = page_state->page;
+
+ // Call finalizers.
+ for (HeapObjectHeader* object : page_state->unfinalized_objects) {
+ object->Finalize();
+ }
+
+ // Unmap page if empty.
+ if (page_state->is_empty) {
+ BasePage::Destroy(page);
+ return;
+ }
+
+ DCHECK(!page->is_large());
+
+ // Merge freelists without finalizers.
+ FreeList& space_freelist =
+ NormalPageSpace::From(page->space())->free_list();
+ space_freelist.Append(std::move(page_state->cached_free_list));
+
+ // Merge freelist with finalizers.
+ for (auto entry : page_state->unfinalized_free_list) {
+ space_freelist.Add(std::move(entry));
+ }
+
+ // Add the page to the space.
+ page->space()->AddPage(page);
+ }
+
private:
- SpaceStates* states_;
+ cppgc::Platform* platform_;
};
-class MutatorThreadSweepVisitor final
- : private HeapVisitor<MutatorThreadSweepVisitor> {
- friend class HeapVisitor<MutatorThreadSweepVisitor>;
+class MutatorThreadSweeper final : private HeapVisitor<MutatorThreadSweeper> {
+ friend class HeapVisitor<MutatorThreadSweeper>;
public:
- explicit MutatorThreadSweepVisitor(SpaceStates* space_states) {
- for (SpaceState& state : *space_states) {
- for (BasePage* page : state.unswept_pages) {
- Traverse(page);
+ explicit MutatorThreadSweeper(SpaceStates* states, cppgc::Platform* platform)
+ : states_(states), platform_(platform) {}
+
+ void Sweep() {
+ for (SpaceState& state : *states_) {
+ while (auto page = state.unswept_pages.Pop()) {
+ Traverse(*page);
+ }
+ }
+ }
+
+ bool SweepWithDeadline(double deadline_in_seconds) {
+ DCHECK(platform_);
+ static constexpr double kSlackInSeconds = 0.001;
+ for (SpaceState& state : *states_) {
+ // FinalizeSpaceWithDeadline() and SweepSpaceWithDeadline() won't check
+ // the deadline until it sweeps 10 pages. So we give a small slack for
+ // safety.
+ const double remaining_budget = deadline_in_seconds - kSlackInSeconds -
+ platform_->MonotonicallyIncreasingTime();
+ if (remaining_budget <= 0.) return false;
+
+ // First, prioritize finalization of pages that were swept concurrently.
+ SweepFinalizer finalizer(platform_);
+ if (!finalizer.FinalizeSpaceWithDeadline(&state, deadline_in_seconds)) {
+ return false;
+ }
+
+ // Help out the concurrent sweeper.
+ if (!SweepSpaceWithDeadline(&state, deadline_in_seconds)) {
+ return false;
}
- state.unswept_pages.clear();
}
+ return true;
}
private:
+ bool SweepSpaceWithDeadline(SpaceState* state, double deadline_in_seconds) {
+ static constexpr size_t kDeadlineCheckInterval = 8;
+ size_t page_count = 1;
+ while (auto page = state->unswept_pages.Pop()) {
+ Traverse(*page);
+ if (page_count % kDeadlineCheckInterval == 0 &&
+ deadline_in_seconds <= platform_->MonotonicallyIncreasingTime()) {
+ return false;
+ }
+ page_count++;
+ }
+
+ return true;
+ }
+
bool VisitNormalPage(NormalPage* page) {
- const bool is_empty = SweepNormalPage(page);
+ const bool is_empty = SweepNormalPage<InlinedFinalizationBuilder>(page);
if (is_empty) {
NormalPage::Destroy(page);
} else {
@@ -157,23 +371,119 @@ class MutatorThreadSweepVisitor final
}
bool VisitLargePage(LargePage* page) {
- if (page->ObjectHeader()->IsMarked()) {
+ HeapObjectHeader* header = page->ObjectHeader();
+ if (header->IsMarked()) {
+ StickyUnmark(header);
page->space()->AddPage(page);
} else {
- page->ObjectHeader()->Finalize();
+ header->Finalize();
LargePage::Destroy(page);
}
return true;
}
+
+ SpaceStates* states_;
+ cppgc::Platform* platform_;
+};
+
+class ConcurrentSweepTask final : public v8::JobTask,
+ private HeapVisitor<ConcurrentSweepTask> {
+ friend class HeapVisitor<ConcurrentSweepTask>;
+
+ public:
+ explicit ConcurrentSweepTask(SpaceStates* states) : states_(states) {}
+
+ void Run(v8::JobDelegate* delegate) final {
+ for (SpaceState& state : *states_) {
+ while (auto page = state.unswept_pages.Pop()) {
+ Traverse(*page);
+ if (delegate->ShouldYield()) return;
+ }
+ }
+ is_completed_.store(true, std::memory_order_relaxed);
+ }
+
+ size_t GetMaxConcurrency() const final {
+ return is_completed_.load(std::memory_order_relaxed) ? 0 : 1;
+ }
+
+ private:
+ bool VisitNormalPage(NormalPage* page) {
+ SpaceState::SweptPageState sweep_result =
+ SweepNormalPage<DeferredFinalizationBuilder>(page);
+ const size_t space_index = page->space()->index();
+ DCHECK_GT(states_->size(), space_index);
+ SpaceState& space_state = (*states_)[space_index];
+ space_state.swept_unfinalized_pages.Push(std::move(sweep_result));
+ return true;
+ }
+
+ bool VisitLargePage(LargePage* page) {
+ HeapObjectHeader* header = page->ObjectHeader();
+ if (header->IsMarked()) {
+ StickyUnmark(header);
+ page->space()->AddPage(page);
+ return true;
+ }
+ if (!header->IsFinalizable()) {
+ LargePage::Destroy(page);
+ return true;
+ }
+ const size_t space_index = page->space()->index();
+ DCHECK_GT(states_->size(), space_index);
+ SpaceState& state = (*states_)[space_index];
+ state.swept_unfinalized_pages.Push(
+ {page, {page->ObjectHeader()}, {}, {}, true});
+ return true;
+ }
+
+ SpaceStates* states_;
+ std::atomic_bool is_completed_{false};
+};
+
+// This visitor:
+// - resets linear allocation buffers and clears free lists for all spaces;
+// - moves all Heap pages to local Sweeper's state (SpaceStates).
+class PrepareForSweepVisitor final
+ : public HeapVisitor<PrepareForSweepVisitor> {
+ public:
+ explicit PrepareForSweepVisitor(SpaceStates* states) : states_(states) {}
+
+ bool VisitNormalPageSpace(NormalPageSpace* space) {
+ DCHECK(!space->linear_allocation_buffer().size());
+ space->free_list().Clear();
+ ExtractPages(space);
+ return true;
+ }
+
+ bool VisitLargePageSpace(LargePageSpace* space) {
+ ExtractPages(space);
+ return true;
+ }
+
+ private:
+ void ExtractPages(BaseSpace* space) {
+ BaseSpace::Pages space_pages = space->RemoveAllPages();
+ (*states_)[space->index()].unswept_pages.Insert(space_pages.begin(),
+ space_pages.end());
+ }
+
+ SpaceStates* states_;
};
} // namespace
class Sweeper::SweeperImpl final {
public:
- explicit SweeperImpl(RawHeap* heap) : heap_(heap) {
- space_states_.resize(heap_->size());
- }
+ SweeperImpl(RawHeap* heap, cppgc::Platform* platform,
+ StatsCollector* stats_collector)
+ : heap_(heap),
+ stats_collector_(stats_collector),
+ space_states_(heap->size()),
+ platform_(platform),
+ foreground_task_runner_(platform_->GetForegroundTaskRunner()) {}
+
+ ~SweeperImpl() { CancelSweepers(); }
void Start(Config config) {
is_in_progress_ = true;
@@ -181,29 +491,114 @@ class Sweeper::SweeperImpl final {
ObjectStartBitmapVerifier().Verify(heap_);
#endif
PrepareForSweepVisitor(&space_states_).Traverse(heap_);
+
if (config == Config::kAtomic) {
Finish();
} else {
DCHECK_EQ(Config::kIncrementalAndConcurrent, config);
- // TODO(chromium:1056170): Schedule concurrent sweeping.
+ ScheduleIncrementalSweeping();
+ ScheduleConcurrentSweeping();
}
}
void Finish() {
if (!is_in_progress_) return;
- MutatorThreadSweepVisitor s(&space_states_);
+ // First, call finalizers on the mutator thread.
+ SweepFinalizer finalizer(platform_);
+ finalizer.FinalizeHeap(&space_states_);
+
+ // Then, help out the concurrent thread.
+ MutatorThreadSweeper sweeper(&space_states_, platform_);
+ sweeper.Sweep();
+
+ // Synchronize with the concurrent sweeper and call remaining finalizers.
+ SynchronizeAndFinalizeConcurrentSweeping();
is_in_progress_ = false;
+
+ stats_collector_->NotifySweepingCompleted();
}
private:
- SpaceStates space_states_;
+ class IncrementalSweepTask : public v8::IdleTask {
+ public:
+ using Handle = SingleThreadedHandle;
+
+ explicit IncrementalSweepTask(SweeperImpl* sweeper)
+ : sweeper_(sweeper), handle_(Handle::NonEmptyTag{}) {}
+
+ static Handle Post(SweeperImpl* sweeper, v8::TaskRunner* runner) {
+ auto task = std::make_unique<IncrementalSweepTask>(sweeper);
+ auto handle = task->GetHandle();
+ runner->PostIdleTask(std::move(task));
+ return handle;
+ }
+
+ private:
+ void Run(double deadline_in_seconds) override {
+ if (handle_.IsCanceled() || !sweeper_->is_in_progress_) return;
+
+ MutatorThreadSweeper sweeper(&sweeper_->space_states_,
+ sweeper_->platform_);
+ const bool sweep_complete =
+ sweeper.SweepWithDeadline(deadline_in_seconds);
+
+ if (sweep_complete) {
+ sweeper_->SynchronizeAndFinalizeConcurrentSweeping();
+ } else {
+ sweeper_->ScheduleIncrementalSweeping();
+ }
+ }
+
+ Handle GetHandle() const { return handle_; }
+
+ SweeperImpl* sweeper_;
+ // TODO(chromium:1056170): Change to CancelableTask.
+ Handle handle_;
+ };
+
+ void ScheduleIncrementalSweeping() {
+ if (!platform_ || !foreground_task_runner_) return;
+
+ incremental_sweeper_handle_ =
+ IncrementalSweepTask::Post(this, foreground_task_runner_.get());
+ }
+
+ void ScheduleConcurrentSweeping() {
+ if (!platform_) return;
+
+ concurrent_sweeper_handle_ = platform_->PostJob(
+ v8::TaskPriority::kUserVisible,
+ std::make_unique<ConcurrentSweepTask>(&space_states_));
+ }
+
+ void CancelSweepers() {
+ if (incremental_sweeper_handle_) incremental_sweeper_handle_.Cancel();
+ if (concurrent_sweeper_handle_) concurrent_sweeper_handle_->Cancel();
+ }
+
+ void SynchronizeAndFinalizeConcurrentSweeping() {
+ CancelSweepers();
+
+ SweepFinalizer finalizer(platform_);
+ finalizer.FinalizeHeap(&space_states_);
+ }
+
RawHeap* heap_;
+ StatsCollector* stats_collector_;
+ SpaceStates space_states_;
+ cppgc::Platform* platform_;
+ std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
+ IncrementalSweepTask::Handle incremental_sweeper_handle_;
+ std::unique_ptr<v8::JobHandle> concurrent_sweeper_handle_;
bool is_in_progress_ = false;
};
-Sweeper::Sweeper(RawHeap* heap) : impl_(std::make_unique<SweeperImpl>(heap)) {}
+Sweeper::Sweeper(RawHeap* heap, cppgc::Platform* platform,
+ StatsCollector* stats_collector)
+ : impl_(std::make_unique<SweeperImpl>(heap, platform, stats_collector)) {}
+
Sweeper::~Sweeper() = default;
void Sweeper::Start(Config config) { impl_->Start(config); }
diff --git a/chromium/v8/src/heap/cppgc/sweeper.h b/chromium/v8/src/heap/cppgc/sweeper.h
index 3e387731686..6ce17ea8fc8 100644
--- a/chromium/v8/src/heap/cppgc/sweeper.h
+++ b/chromium/v8/src/heap/cppgc/sweeper.h
@@ -10,20 +10,25 @@
#include "src/base/macros.h"
namespace cppgc {
+
+class Platform;
+
namespace internal {
+class StatsCollector;
class RawHeap;
class V8_EXPORT_PRIVATE Sweeper final {
public:
enum class Config { kAtomic, kIncrementalAndConcurrent };
- explicit Sweeper(RawHeap*);
+ Sweeper(RawHeap*, cppgc::Platform*, StatsCollector*);
~Sweeper();
Sweeper(const Sweeper&) = delete;
Sweeper& operator=(const Sweeper&) = delete;
+ // Sweeper::Start assumes the heap holds no linear allocation buffers.
void Start(Config);
void Finish();
diff --git a/chromium/v8/src/heap/cppgc/task-handle.h b/chromium/v8/src/heap/cppgc/task-handle.h
new file mode 100644
index 00000000000..cbd8cc4a61f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/task-handle.h
@@ -0,0 +1,47 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_TASK_HANDLE_H_
+#define V8_HEAP_CPPGC_TASK_HANDLE_H_
+
+#include <memory>
+
+#include "src/base/logging.h"
+
+namespace cppgc {
+namespace internal {
+
+// A handle that is used for cancelling individual tasks.
+struct SingleThreadedHandle {
+ struct NonEmptyTag {};
+
+ // Default construction results in empty handle.
+ SingleThreadedHandle() = default;
+
+ explicit SingleThreadedHandle(NonEmptyTag)
+ : is_cancelled_(std::make_shared<bool>(false)) {}
+
+ void Cancel() {
+ DCHECK(is_cancelled_);
+ *is_cancelled_ = true;
+ }
+
+ bool IsCanceled() const {
+ DCHECK(is_cancelled_);
+ return *is_cancelled_;
+ }
+
+ // A handle is active if it is non-empty and not cancelled.
+ explicit operator bool() const {
+ return is_cancelled_.get() && !*is_cancelled_.get();
+ }
+
+ private:
+ std::shared_ptr<bool> is_cancelled_;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_TASK_HANDLE_H_
diff --git a/chromium/v8/src/heap/cppgc/virtual-memory.cc b/chromium/v8/src/heap/cppgc/virtual-memory.cc
new file mode 100644
index 00000000000..070baa71192
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/virtual-memory.cc
@@ -0,0 +1,56 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/virtual-memory.h"
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+VirtualMemory::VirtualMemory(PageAllocator* page_allocator, size_t size,
+ size_t alignment, void* hint)
+ : page_allocator_(page_allocator) {
+ DCHECK_NOT_NULL(page_allocator);
+ DCHECK(IsAligned(size, page_allocator->CommitPageSize()));
+
+ const size_t page_size = page_allocator_->AllocatePageSize();
+ start_ = page_allocator->AllocatePages(hint, RoundUp(size, page_size),
+ RoundUp(alignment, page_size),
+ PageAllocator::kNoAccess);
+ if (start_) {
+ size_ = RoundUp(size, page_size);
+ }
+}
+
+VirtualMemory::~VirtualMemory() V8_NOEXCEPT {
+ if (IsReserved()) {
+ page_allocator_->FreePages(start_, size_);
+ }
+}
+
+VirtualMemory::VirtualMemory(VirtualMemory&& other) V8_NOEXCEPT
+ : page_allocator_(std::move(other.page_allocator_)),
+ start_(std::move(other.start_)),
+ size_(std::move(other.size_)) {
+ other.Reset();
+}
+
+VirtualMemory& VirtualMemory::operator=(VirtualMemory&& other) V8_NOEXCEPT {
+ DCHECK(!IsReserved());
+ page_allocator_ = std::move(other.page_allocator_);
+ start_ = std::move(other.start_);
+ size_ = std::move(other.size_);
+ other.Reset();
+ return *this;
+}
+
+void VirtualMemory::Reset() {
+ start_ = nullptr;
+ size_ = 0;
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/virtual-memory.h b/chromium/v8/src/heap/cppgc/virtual-memory.h
new file mode 100644
index 00000000000..1489abb9dea
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/virtual-memory.h
@@ -0,0 +1,60 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_VIRTUAL_MEMORY_H_
+#define V8_HEAP_CPPGC_VIRTUAL_MEMORY_H_
+
+#include <cstdint>
+
+#include "include/cppgc/platform.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+// Represents and controls an area of reserved memory.
+class V8_EXPORT_PRIVATE VirtualMemory {
+ public:
+ // Empty VirtualMemory object, controlling no reserved memory.
+ VirtualMemory() = default;
+
+ // Reserves virtual memory containing an area of the given size that is
+ // aligned per |alignment| rounded up to the |page_allocator|'s allocate page
+ // size. The |size| is aligned with |page_allocator|'s commit page size.
+ VirtualMemory(PageAllocator*, size_t size, size_t alignment,
+ void* hint = nullptr);
+
+ // Releases the reserved memory, if any, controlled by this VirtualMemory
+ // object.
+ ~VirtualMemory() V8_NOEXCEPT;
+
+ VirtualMemory(VirtualMemory&&) V8_NOEXCEPT;
+ VirtualMemory& operator=(VirtualMemory&&) V8_NOEXCEPT;
+
+ // Returns whether the memory has been reserved.
+ bool IsReserved() const { return start_ != nullptr; }
+
+ void* address() const {
+ DCHECK(IsReserved());
+ return start_;
+ }
+
+ size_t size() const {
+ DCHECK(IsReserved());
+ return size_;
+ }
+
+ private:
+ // Resets to the default state.
+ void Reset();
+
+ PageAllocator* page_allocator_ = nullptr;
+ void* start_ = nullptr;
+ size_t size_ = 0;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_VIRTUAL_MEMORY_H_
diff --git a/chromium/v8/src/heap/cppgc/visitor.cc b/chromium/v8/src/heap/cppgc/visitor.cc
new file mode 100644
index 00000000000..74cab257b6e
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/visitor.cc
@@ -0,0 +1,76 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/visitor.h"
+
+#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/page-memory-inl.h"
+#include "src/heap/cppgc/sanitizers.h"
+
+namespace cppgc {
+
+#ifdef V8_ENABLE_CHECKS
+void Visitor::CheckObjectNotInConstruction(const void* address) {
+ // TODO(chromium:1056170): |address| is an inner pointer of an object. Check
+ // that the object is not in construction.
+}
+#endif // V8_ENABLE_CHECKS
+
+namespace internal {
+
+ConservativeTracingVisitor::ConservativeTracingVisitor(
+ HeapBase& heap, PageBackend& page_backend)
+ : heap_(heap), page_backend_(page_backend) {}
+
+namespace {
+
+void TraceConservatively(ConservativeTracingVisitor* visitor,
+ const HeapObjectHeader& header) {
+ Address* payload = reinterpret_cast<Address*>(header.Payload());
+ const size_t payload_size = header.GetSize();
+ for (size_t i = 0; i < (payload_size / sizeof(Address)); ++i) {
+ Address maybe_ptr = payload[i];
+#if defined(MEMORY_SANITIZER)
+ // |payload| may be uninitialized by design or just contain padding bytes.
+ // Copy into a local variable that is not poisoned for conservative marking.
+ // Copy into a temporary variable to maintain the original MSAN state.
+ MSAN_UNPOISON(&maybe_ptr, sizeof(maybe_ptr));
+#endif
+ if (maybe_ptr) {
+ visitor->TraceConservativelyIfNeeded(maybe_ptr);
+ }
+ }
+}
+
+} // namespace
+
+void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
+ const void* address) {
+ // TODO(chromium:1056170): Add page bloom filter
+
+ const BasePage* page = reinterpret_cast<const BasePage*>(
+ page_backend_.Lookup(static_cast<ConstAddress>(address)));
+
+ if (!page) return;
+
+ DCHECK_EQ(&heap_, page->heap());
+
+ auto* header = page->TryObjectHeaderFromInnerAddress(
+ const_cast<Address>(reinterpret_cast<ConstAddress>(address)));
+
+ if (!header) return;
+
+ if (!header->IsInConstruction<HeapObjectHeader::AccessMode::kNonAtomic>()) {
+ Visit(header->Payload(),
+ {header->Payload(),
+ GlobalGCInfoTable::GCInfoFromIndex(header->GetGCInfoIndex()).trace});
+ } else {
+ VisitConservatively(*header, TraceConservatively);
+ }
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/visitor.h b/chromium/v8/src/heap/cppgc/visitor.h
index caa840b4dc3..5003e31f8f4 100644
--- a/chromium/v8/src/heap/cppgc/visitor.h
+++ b/chromium/v8/src/heap/cppgc/visitor.h
@@ -5,16 +5,50 @@
#ifndef V8_HEAP_CPPGC_VISITOR_H_
#define V8_HEAP_CPPGC_VISITOR_H_
+#include "include/cppgc/persistent.h"
#include "include/cppgc/visitor.h"
+#include "src/heap/cppgc/heap-object-header.h"
namespace cppgc {
namespace internal {
+class HeapBase;
+class HeapObjectHeader;
+class PageBackend;
+
// Base visitor that is allowed to create a public cppgc::Visitor object and
// use its internals.
class VisitorBase : public cppgc::Visitor {
public:
VisitorBase() = default;
+
+ template <typename T>
+ void TraceRootForTesting(const Persistent<T>& p, const SourceLocation& loc) {
+ TraceRoot(p, loc);
+ }
+
+ template <typename T>
+ void TraceRootForTesting(const WeakPersistent<T>& p,
+ const SourceLocation& loc) {
+ TraceRoot(p, loc);
+ }
+};
+
+// Regular visitor that additionally allows for conservative tracing.
+class ConservativeTracingVisitor : public VisitorBase {
+ public:
+ ConservativeTracingVisitor(HeapBase&, PageBackend&);
+
+ void TraceConservativelyIfNeeded(const void*);
+
+ protected:
+ using TraceConservativelyCallback = void(ConservativeTracingVisitor*,
+ const HeapObjectHeader&);
+ virtual void VisitConservatively(HeapObjectHeader&,
+ TraceConservativelyCallback) {}
+
+ HeapBase& heap_;
+ PageBackend& page_backend_;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/write-barrier.cc b/chromium/v8/src/heap/cppgc/write-barrier.cc
new file mode 100644
index 00000000000..683a3fc091f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/write-barrier.cc
@@ -0,0 +1,84 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/cppgc/internal/write-barrier.h"
+
+#include "include/cppgc/internal/pointer-policies.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/heap-object-header-inl.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page-inl.h"
+#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/marker.h"
+#include "src/heap/cppgc/marking-visitor.h"
+
+#if defined(CPPGC_CAGED_HEAP)
+#include "include/cppgc/internal/caged-heap-local-data.h"
+#endif
+
+namespace cppgc {
+namespace internal {
+
+namespace {
+
+void MarkValue(const BasePage* page, Marker* marker, const void* value) {
+#if defined(CPPGC_CAGED_HEAP)
+ DCHECK(reinterpret_cast<CagedHeapLocalData*>(
+ reinterpret_cast<uintptr_t>(value) &
+ ~(kCagedHeapReservationAlignment - 1))
+ ->is_marking_in_progress);
+#endif
+ auto& header =
+ const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
+ if (!header.TryMarkAtomic()) return;
+
+ DCHECK(marker);
+
+ if (V8_UNLIKELY(MutatorThreadMarkingVisitor::IsInConstruction(header))) {
+ // It is assumed that objects on not_fully_constructed_worklist_ are not
+ // marked.
+ header.Unmark();
+ Marker::NotFullyConstructedWorklist::View not_fully_constructed_worklist(
+ marker->not_fully_constructed_worklist(), Marker::kMutatorThreadId);
+ not_fully_constructed_worklist.Push(header.Payload());
+ return;
+ }
+
+ Marker::WriteBarrierWorklist::View write_barrier_worklist(
+ marker->write_barrier_worklist(), Marker::kMutatorThreadId);
+ write_barrier_worklist.Push(&header);
+}
+
+} // namespace
+
+void WriteBarrier::MarkingBarrierSlowWithSentinelCheck(const void* value) {
+ if (!value || value == kSentinelPointer) return;
+
+ MarkingBarrierSlow(value);
+}
+
+void WriteBarrier::MarkingBarrierSlow(const void* value) {
+ const BasePage* page = BasePage::FromPayload(value);
+ const auto* heap = page->heap();
+
+ // Marker being not set up means that no incremental/concurrent marking is in
+ // progress.
+ if (!heap->marker()) return;
+
+ MarkValue(page, heap->marker(), value);
+}
+
+#if defined(CPPGC_YOUNG_GENERATION)
+void WriteBarrier::GenerationalBarrierSlow(CagedHeapLocalData* local_data,
+ const AgeTable& age_table,
+ const void* slot,
+ uintptr_t value_offset) {
+ if (age_table[value_offset] == AgeTable::Age::kOld) return;
+ // Record slot.
+ local_data->heap_base->remembered_slots().insert(const_cast<void*>(slot));
+}
+#endif
+
+} // namespace internal
+} // namespace cppgc