summaryrefslogtreecommitdiff
path: root/chromium/third_party/blink/renderer/platform/heap
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/blink/renderer/platform/heap')
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/BUILD.gn1
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/BlinkGCDesign.md194
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/DEPS3
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/address_cache.cc10
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/address_cache.h19
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/address_cache_test.cc6
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/garbage_collected.h2
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/gc_info.cc7
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/gc_info.h34
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/gc_info_test.cc4
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/heap.cc50
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/heap.h47
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/heap_allocator.cc6
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/heap_allocator.h49
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/heap_compact.cc170
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/heap_compact.h18
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/heap_compact_test.cc32
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/heap_page.cc18
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/heap_page.h2
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/heap_stats_collector.h19
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/heap_test.cc67
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/incremental_marking_test.cc144
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/marking_verifier.h3
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/marking_visitor.cc9
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/marking_visitor.h10
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/persistent.h27
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/persistent_node.h4
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/process_heap.cc12
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/safe_point.h39
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/thread_state.cc276
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/thread_state.h105
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/threading_traits.h4
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/trace_traits.h4
-rw-r--r--chromium/third_party/blink/renderer/platform/heap/visitor.h6
34 files changed, 779 insertions, 622 deletions
diff --git a/chromium/third_party/blink/renderer/platform/heap/BUILD.gn b/chromium/third_party/blink/renderer/platform/heap/BUILD.gn
index 3859f8c0600..337d4cbfe9e 100644
--- a/chromium/third_party/blink/renderer/platform/heap/BUILD.gn
+++ b/chromium/third_party/blink/renderer/platform/heap/BUILD.gn
@@ -69,7 +69,6 @@ blink_platform_sources("heap") {
"persistent_node.h",
"process_heap.cc",
"process_heap.h",
- "safe_point.h",
"self_keep_alive.h",
"sparse_heap_bitmap.cc",
"sparse_heap_bitmap.h",
diff --git a/chromium/third_party/blink/renderer/platform/heap/BlinkGCDesign.md b/chromium/third_party/blink/renderer/platform/heap/BlinkGCDesign.md
index 9748c4d3691..cbac6aa5766 100644
--- a/chromium/third_party/blink/renderer/platform/heap/BlinkGCDesign.md
+++ b/chromium/third_party/blink/renderer/platform/heap/BlinkGCDesign.md
@@ -1,3 +1,4 @@
+
# Blink GC Design
Oilpan is a garbage collection system for Blink objects.
@@ -9,159 +10,117 @@ see [BlinkGCAPIReference](BlinkGCAPIReference.md).
## Overview
-Oilpan is a single-threaded mark-and-sweep GC.
-It doesn't (yet) implement a generational or incremental GC.
-
-Blink has multiple threads including the main thread, HTML parser threads,
-database threads and worker threads. Threads that touch Oilpan's heap need
-to be attached to Oilpan. These threads can have cross-thread pointers.
-Oilpan scans the object graph spanning these threads and collects
-unreachable objects.
+Oilpan implements a mark-and-sweep GC. It features thread-local garbage
+collection with incremental marking and lazy sweeping. It can also do
+compaction for a subset of objects (collection backings).
## Threading model
-Oilpan runs a GC in the following steps:
-
-Step 1. A thread decides to trigger a GC. The thread can be any thread
-(it is likely to be the main thread because most allocations take place
-on the main thread). The thread is called a GCing thread.
-
-Step 2. The GCing thread waits for all other threads to enter safe points.
-A safe point is a place where it's guaranteed that the thread does not
-mutate the object graph of objects in Oilpan's heap. In common cases the thread
-stops at the safe point but doesn't necessarily need to stop. For example,
-the thread is allowed to execute a synchronous IO at the safe point as
-long as it doesn't mutate the object graph. Safe points are inserted into
-many places so that the thread can enter the safe point as soon as possible
-when the GCing thread requests the thread to do so. For example, safe points
-are inserted into V8 interruptors, at the end of event loops,
-before acquiring a mutex, before starting a synchronous IO operation etc.
-
-Step 3. Once all the threads enter the safe points, the GCing thread starts
-a marking phase. The GCing thread marks all objects reachable from the root
-set by calling trace() methods defined on each object. This means that the
-GCing thread marks objects owned by all the threads. This doesn't cause any
-threading race because all the other threads are at the safe points.
-
-Step 4. Once the marking is complete, the GCing thread resumes executions of
-all the other threads. Each thread starts a sweeping phase. Each thread is
-responsible for destructing objects that the thread has allocated.
-That way objects are guaranteed to get destructed on the thread that has
-allocated the objects. The sweeping is done by each thread lazily.
-Instead of completing the sweeping phase in one go, the thread sweeps
-objects incrementally as much as it allocates. Lazy sweeping is helpful
-to distribute a long pause time of the sweeping phase into small chunks.
-
-The step 2 and 3 is the pause time of the GC.
-The pause time is proportional to the number of objects marked
-in the marking phase, meaning that it is proportional to the number of
-live objects.
-
-Notes:
-
-* If the GCing thread fails at stopping all the other threads in a
-certain period of time, it gives up triggering a GC. That way we avoid
-introducing an unacceptably long pause time. (This will rarely happen
-because each thread enters safe points very frequently.)
-
-* It is not really nice that the GCing thread has to stop all the other threads.
-For example, a worker thread has to get involved in a GC
-caused by a lot of allocations happening on the main thread.
-To resolve the issue, we have a plan to split the Oilpan's heap
-into per-thread heaps. Once it's implemented, each thread can run
-GCs independently.
+Oilpan creates a different heap and root set for each thread. This allows Oilpan
+to run garbage collection in parallel with mutators running in other threads.
+
+Any object or `Persistent` that is allocated on a thread automatically belong to
+that thread's heap or root set. References to objects belonging to another
+thread's heap, must use the `CrossThreadPersistent` handle. This is even true
+for on-heap to on-heap references.
+
+Assigning to a `CrossThreadPersistent` requires a global lock, meaning it might
+block waiting for garbage collection to end on all other threads.
+
+Threads that want to allocate Oilpan objects must be "attached" to Oilpan
+(typically through `WebThreadSupportingGC`).
+
+## Heap partitioning
+
+As mentioned earlier, we have separate heaps for each thread. This `ThreadHeap`
+is further partitioned into "Arenas". The Arena for an object is chosen
+depending on a number of criteria.
+
+For example
+- objects over 64KiB goes into `kLargeObjectArenaIndex`
+- objects that have an eager finalizer goes into `kEagerSweepArenaIndex`
+- objects that is a collection backing goes into one of the collection backing
+arenas
+- objects that is a Node or a CSSValue goes into one of the typed arenas
+- other objects goes into one of the normal page arenas bucketed depending on
+their size
## Precise GC and conservative GC
-Oilpan has two kinds of GCs.
+Oilpan has three kinds of GCs.
-When all threads are stopped at the safe points at the end of event loops,
-a precise GC is triggered. At this point it is guaranteed that
-there are no on-stack pointers pointing to Oilpan's heap.
-Thus Oilpan runs a precise GC. The root set of a precise GC is
-persistent handles.
+Precise GC is triggered at the end of an event loop. At this point, it is
+guaranteed that there are no on-stack pointers pointing to Oilpan's heap. Oilpan
+can just trace from the `Persistent` handles and collect all garbage precisely.
-Otherwise, a conservative GC is triggered. In this case, the GC scans
-a native stack of the threads (which are not stopped at the safe points
-at the end of event loops) and push the pointers discovered via the native
-stacks into the root set. (That's why you can use raw pointers on the
-native stack.) The root set of a conservative GC is persistent handles
-and the native stacks of the threads.
+Conservative GC runs when we are under memory pressure, and a GC cannot wait
+until we go back to an event loop. In this case, the GC scans the native stack
+and treats the pointers discovered via the native stacks as part of the root
+set. (That's why raw pointers are used instead of handles on the native stack.)
-A conservative GC is more expensive than a precise GC because
-the conservative GC needs to scan the native stacks.
-Thus Oilpan tries its best to trigger GCs at the end of an event loop.
-In particular, Oilpan tries its best to trigger GCs in idle tasks.
+Incremental GC is the most common type of GC. It splits the marking phase into
+small chunks and runs them between tasks. The smaller pause times help with
+reducing jank.
## Marking phase
-The marking phase (the step 3 in the above description) consists of
-the following steps. The marking phase is executed in a stop-the-world manner.
+The marking phase consists of the following steps. The marking phase is executed
+in a stop-the-world manner.
-Step 3-1. The GCing thread marks all objects reachable from the root set
-by calling trace() methods defined on each object.
+Step 1. Mark all objects reachable from the root set by calling `Trace()`
+methods defined on each object.
-Step 3-2. The GCing thread clears out all trivial WeakMembers.
+Step 2. Clear out all weak handles and run weak callbacks.
To prevent a use-after-free from happening, it is very important to
make sure that Oilpan doesn't mis-trace any edge of the object graph.
This means that all pointers except on-stack pointers must be wrapped
with Oilpan's handles (i.e., Persistent<>, Member<>, WeakMember<> etc).
Raw pointers to on-heap objects have a risk of creating an edge Oilpan
-cannot understand and causing a use-after-free. You should not use raw pointers
-to on-heap objects (except raw pointers on native stacks) unless you're pretty
-sure that the target objects are guaranteed to be kept alive in other ways.
+cannot understand and causing a use-after-free. Raw pointers shall not be used
+to reference on-heap objects (except raw pointers on native stacks). Exceptions
+can be made if the target object is guaranteed to be kept alive in other ways.
## Sweeping phase
-The sweeping phase (the step 4 in the above description) consists of
-the following steps. The sweeping phase is executed by each thread in parallel.
-
-Step 4-1. The thread clears out all non-trivial WeakMembers.
-Non-trivial WeakMembers are the ones that have manual weak processing
-(registered by registerWeakMembers()) and the ones embedded in HeapHashMap etc.
-The reason we don't run non-trivial WeakMembers in the marking phase is that
-clearing out the non-trivial WeakMembers can invoke some destructors
-(e.g., if you have HeapHashMap<WeakMember<X>, OwnPtr<Y>>, Y's destructor
-is invoked when the weak processing removes the key).
-The destructors must run in the same thread that has allocated the objects.
+The sweeping phase consists of the following steps.
-Step 4-2. The thread invokes pre-finalizers.
+Step 1. Invoke pre-finalizers.
At this point, no destructors have been invoked.
Thus the pre-finalizers are allowed to touch any other on-heap objects
(which may get destructed in this sweeping phase).
-Step 4-3. The thread invokes destructors of dead objects that are marked
-as eagerly-finalized. See the following notes for more details about the
+Step 2. Invokes destructors of dead objects that are marked as
+eagerly-finalized. See the following notes for more details about the
eagerly-finalized objects.
-Step 4-4. The thread resumes mutator's execution. (A mutator means user code.)
+Step 3. The thread resumes mutator's execution. (A mutator means user code.)
-Step 4-5. As the mutator allocates new objects, lazy sweeping invokes
+Step 4. As the mutator allocates new objects, lazy sweeping invokes
destructors of the remaining dead objects incrementally.
There is no guarantee of the order in which the destructors are invoked.
That's why destructors must not touch any other on-heap objects
(which might have already been destructed). If some destructor unavoidably
-needs to touch other on-heap objects, you need to use a pre-finalizer.
-The pre-finalizer is allowed to touch other on-heap objects.
+needs to touch other on-heap objects, it will have to be converted to a
+pre-finalizer. The pre-finalizer is allowed to touch other on-heap objects.
The mutator is resumed before all the destructors has run.
For example, imagine a case where X is a client of Y, and Y holds
-a list of clients. If you rely on X's destructor removing X from the list,
+a list of clients. If the code relies on X's destructor removing X from the list,
there is a risk that Y iterates the list and calls some method of X
which may touch other on-heap objects. This causes a use-after-free.
-You need to make sure that X is explicitly removed from the list
+Care must be taken to make sure that X is explicitly removed from the list
before the mutator resumes its execution in a way that doesn't rely on
X's destructor.
Either way, the most important thing is that there is no guarantee of
-when destructors run. You shouldn't put any assumption about
-the order and the timing.
+when destructors run. Assumptions should not be made about the order and the
+timing of their execution.
(In general, it's dangerous to do something complicated in a destructor.)
-Notes (The followings are features you'll need only when you have
-unusual destruction requirements):
+Notes (The followings are features that shall be reserved for unusual
+destruction requirements):
* Weak processing runs only when the holder object of the WeakMember
outlives the pointed object. If the holder object and the pointed object die
@@ -170,8 +129,8 @@ assuming that the weak processing always runs.
* Pre-finalizers are heavy because the thread needs to scan all pre-finalizers
at each sweeping phase to determine which pre-finalizers to be invoked
-(the thread needs to invoke pre-finalizers of dead objects).
-You should avoid adding pre-finalizers to frequently created objects.
+(the thread needs to invoke pre-finalizers of dead objects). Adding
+pre-finalizers to frequently created objects should be avoided.
* Eagerly-finalized objects are guaranteed to get destructed before the
mutator resumes its execution. This means that a destructor of
@@ -179,22 +138,3 @@ an eagerly-finalized object is allowed to touch other not-eagerly-finalized
objects whereas it's not allowed to touch other eagerly-finalized objects.
This notion is useful for some objects, but nasty.
We're planning to replace most eagerly-finalized objects with pre-finalizers.
-
-* There is a subtle scenario where a next GC is triggered before
-the thread finishes lazy sweeping. In that case, the not-yet-swept objects
-are marked as dead and the next GC starts. The objects marked as dead are
-swept in the sweeping phase of the next GC. This means that you cannot assume
-that some two objects get destructed in the same GC cycle.
-
-## Heap structures
-
-Each thread has its dedicated heap so that the thread can allocate an object
-without acquiring a lock. For example, an object allocated on thread 1 goes
-to a different heap than an object allocated on thread 2.
-
-In addition, each thread provides multiple arenas to group objects by their type
-and thus improves locality.
-For example, a Node object allocated on thread 1 goes to a different heap than
-a CSSValue object allocated on thread 1. (See BlinkGC.h to get the list of
-the typed arenas.)
-
diff --git a/chromium/third_party/blink/renderer/platform/heap/DEPS b/chromium/third_party/blink/renderer/platform/heap/DEPS
index c5aa107a763..4d6fe98210a 100644
--- a/chromium/third_party/blink/renderer/platform/heap/DEPS
+++ b/chromium/third_party/blink/renderer/platform/heap/DEPS
@@ -8,9 +8,8 @@ include_rules = [
# Dependencies.
"+base/atomicops.h",
"+base/bits.h",
- "+base/compiler_specific.h",
+ "+base/sampling_heap_profiler/poisson_allocation_sampler.h",
"+base/synchronization/lock.h",
- "+base/sys_info.h",
"+third_party/blink/renderer/platform/bindings",
"+third_party/blink/renderer/platform/cross_thread_functional.h",
diff --git a/chromium/third_party/blink/renderer/platform/heap/address_cache.cc b/chromium/third_party/blink/renderer/platform/heap/address_cache.cc
index 06c24e011f5..fb93fbcbe22 100644
--- a/chromium/third_party/blink/renderer/platform/heap/address_cache.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/address_cache.cc
@@ -8,6 +8,16 @@
namespace blink {
+AddressCache::EnabledScope::EnabledScope(AddressCache* address_cache)
+ : address_cache_(address_cache) {
+ address_cache_->FlushIfDirty();
+ address_cache_->EnableLookup();
+}
+
+AddressCache::EnabledScope::~EnabledScope() {
+ address_cache_->DisableLookup();
+}
+
void AddressCache::Flush() {
if (has_entries_) {
for (size_t i = 0; i < kNumberOfEntries; ++i)
diff --git a/chromium/third_party/blink/renderer/platform/heap/address_cache.h b/chromium/third_party/blink/renderer/platform/heap/address_cache.h
index 85c676cd6b2..526594f9562 100644
--- a/chromium/third_party/blink/renderer/platform/heap/address_cache.h
+++ b/chromium/third_party/blink/renderer/platform/heap/address_cache.h
@@ -16,12 +16,17 @@ class PLATFORM_EXPORT AddressCache {
USING_FAST_MALLOC(AddressCache);
public:
- AddressCache() : enabled_(false), has_entries_(false), dirty_(false) {
- // Start by flushing the cache in a non-empty state to initialize all the
- // cache entries.
- for (size_t i = 0; i < kNumberOfEntries; ++i)
- entries_[i] = nullptr;
- }
+ class PLATFORM_EXPORT EnabledScope {
+ public:
+ explicit EnabledScope(AddressCache*);
+ ~EnabledScope();
+
+ private:
+ AddressCache* const address_cache_;
+ };
+
+ AddressCache()
+ : entries_{}, enabled_(false), has_entries_(false), dirty_(false) {}
void EnableLookup() { enabled_ = true; }
void DisableLookup() { enabled_ = false; }
@@ -29,7 +34,7 @@ class PLATFORM_EXPORT AddressCache {
void MarkDirty() { dirty_ = true; }
void Flush();
void FlushIfDirty();
- bool IsEmpty() { return !has_entries_; }
+ bool IsEmpty() const { return !has_entries_; }
// Perform a lookup in the cache. Returns true if the address is guaranteed
// to not in Blink's heap and false otherwise.
diff --git a/chromium/third_party/blink/renderer/platform/heap/address_cache_test.cc b/chromium/third_party/blink/renderer/platform/heap/address_cache_test.cc
index 001b32ce553..280920dd1c4 100644
--- a/chromium/third_party/blink/renderer/platform/heap/address_cache_test.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/address_cache_test.cc
@@ -15,6 +15,12 @@ const Address kObjectAddress = reinterpret_cast<Address>(kBlinkPageSize);
} // namespace
+TEST(AddressCacheTest, Scope) {
+ AddressCache cache;
+ AddressCache::EnabledScope scope(&cache);
+ EXPECT_FALSE(cache.Lookup(kObjectAddress));
+}
+
TEST(AddressCacheTest, InitialIsEmpty) {
AddressCache cache;
cache.EnableLookup();
diff --git a/chromium/third_party/blink/renderer/platform/heap/garbage_collected.h b/chromium/third_party/blink/renderer/platform/heap/garbage_collected.h
index 3111f5c7f4a..d07e1e4cf23 100644
--- a/chromium/third_party/blink/renderer/platform/heap/garbage_collected.h
+++ b/chromium/third_party/blink/renderer/platform/heap/garbage_collected.h
@@ -49,6 +49,8 @@ struct IsGarbageCollectedMixin {
struct TraceDescriptor {
STACK_ALLOCATED();
+
+ public:
void* base_object_payload;
TraceCallback callback;
bool can_trace_eagerly;
diff --git a/chromium/third_party/blink/renderer/platform/heap/gc_info.cc b/chromium/third_party/blink/renderer/platform/heap/gc_info.cc
index 1033c658f78..6c515bc5c9f 100644
--- a/chromium/third_party/blink/renderer/platform/heap/gc_info.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/gc_info.cc
@@ -49,7 +49,7 @@ void GCInfoTable::CreateGlobalTable() {
}
void GCInfoTable::EnsureGCInfoIndex(const GCInfo* gc_info,
- size_t* gc_info_index_slot) {
+ uint32_t* gc_info_index_slot) {
DCHECK(gc_info);
DCHECK(gc_info_index_slot);
@@ -64,14 +64,13 @@ void GCInfoTable::EnsureGCInfoIndex(const GCInfo* gc_info,
if (*gc_info_index_slot)
return;
- int index = ++current_index_;
- size_t gc_info_index = static_cast<size_t>(index);
+ uint32_t gc_info_index = ++current_index_;
CHECK(gc_info_index < GCInfoTable::kMaxIndex);
if (current_index_ >= limit_)
Resize();
table_[gc_info_index] = gc_info;
- ReleaseStore(reinterpret_cast<int*>(gc_info_index_slot), index);
+ ReleaseStore(gc_info_index_slot, gc_info_index);
}
void GCInfoTable::Resize() {
diff --git a/chromium/third_party/blink/renderer/platform/heap/gc_info.h b/chromium/third_party/blink/renderer/platform/heap/gc_info.h
index b6e7e42f512..46eb0fdbb51 100644
--- a/chromium/third_party/blink/renderer/platform/heap/gc_info.h
+++ b/chromium/third_party/blink/renderer/platform/heap/gc_info.h
@@ -44,7 +44,7 @@ struct GCInfo {
};
#if DCHECK_IS_ON()
-PLATFORM_EXPORT void AssertObjectHasGCInfo(const void*, size_t gc_info_index);
+PLATFORM_EXPORT void AssertObjectHasGCInfo(const void*, uint32_t gc_info_index);
#endif
class PLATFORM_EXPORT GCInfoTable {
@@ -56,14 +56,14 @@ class PLATFORM_EXPORT GCInfoTable {
// of the Oilpan GC Clang plugin, there appear to be at most about 6,000
// types. Thus 14 bits should be more than twice as many bits as we will ever
// need.
- static constexpr size_t kMaxIndex = 1 << 14;
+ static constexpr uint32_t kMaxIndex = 1 << 14;
// Sets up a singleton table that can be acquired using Get().
static void CreateGlobalTable();
static GCInfoTable& Get() { return *global_table_; }
- inline const GCInfo* GCInfoFromIndex(size_t index) {
+ inline const GCInfo* GCInfoFromIndex(uint32_t index) {
DCHECK_GE(index, 1u);
DCHECK(index < kMaxIndex);
DCHECK(table_);
@@ -72,9 +72,9 @@ class PLATFORM_EXPORT GCInfoTable {
return info;
}
- void EnsureGCInfoIndex(const GCInfo*, size_t*);
+ void EnsureGCInfoIndex(const GCInfo*, uint32_t*);
- size_t GcInfoIndex() { return current_index_; }
+ uint32_t GcInfoIndex() { return current_index_; }
private:
FRIEND_TEST_ALL_PREFIXES(GCInfoTest, InitialEmpty);
@@ -95,10 +95,10 @@ class PLATFORM_EXPORT GCInfoTable {
// GCInfo indices start from 1 for heap objects, with 0 being treated
// specially as the index for freelist entries and large heap objects.
- size_t current_index_ = 0;
+ uint32_t current_index_ = 0;
// The limit (exclusive) of the currently allocated table.
- size_t limit_ = 0;
+ uint32_t limit_ = 0;
Mutex table_mutex_;
};
@@ -108,14 +108,14 @@ class PLATFORM_EXPORT GCInfoTable {
template <typename T>
struct GCInfoAtBaseType {
STATIC_ONLY(GCInfoAtBaseType);
- static size_t Index() {
+ static uint32_t Index() {
static_assert(sizeof(T), "T must be fully defined");
static const GCInfo kGcInfo = {
TraceTrait<T>::Trace, FinalizerTrait<T>::Finalize,
NameTrait<T>::GetName, FinalizerTrait<T>::kNonTrivialFinalizer,
std::is_polymorphic<T>::value,
};
- static size_t gc_info_index = 0;
+ static uint32_t gc_info_index = 0;
if (!AcquireLoad(&gc_info_index))
GCInfoTable::Get().EnsureGCInfoIndex(&kGcInfo, &gc_info_index);
DCHECK_GE(gc_info_index, 1u);
@@ -144,7 +144,7 @@ struct GetGarbageCollectedType<T, false> {
template <typename T>
struct GCInfoTrait {
STATIC_ONLY(GCInfoTrait);
- static size_t Index() {
+ static uint32_t Index() {
return GCInfoAtBaseType<typename GetGarbageCollectedType<T>::type>::Index();
}
};
@@ -158,13 +158,13 @@ template <typename T, typename U, typename V>
class HeapHashSet;
template <typename T, typename U, typename V>
class HeapLinkedHashSet;
-template <typename T, size_t inlineCapacity, typename U>
+template <typename T, wtf_size_t inlineCapacity, typename U>
class HeapListHashSet;
-template <typename ValueArg, size_t inlineCapacity>
+template <typename ValueArg, wtf_size_t inlineCapacity>
class HeapListHashSetAllocator;
-template <typename T, size_t inlineCapacity>
+template <typename T, wtf_size_t inlineCapacity>
class HeapVector;
-template <typename T, size_t inlineCapacity>
+template <typename T, wtf_size_t inlineCapacity>
class HeapDeque;
template <typename T, typename U, typename V>
class HeapHashCountedSet;
@@ -178,17 +178,17 @@ struct GCInfoTrait<HeapHashSet<T, U, V>>
template <typename T, typename U, typename V>
struct GCInfoTrait<HeapLinkedHashSet<T, U, V>>
: public GCInfoTrait<LinkedHashSet<T, U, V, HeapAllocator>> {};
-template <typename T, size_t inlineCapacity, typename U>
+template <typename T, wtf_size_t inlineCapacity, typename U>
struct GCInfoTrait<HeapListHashSet<T, inlineCapacity, U>>
: public GCInfoTrait<
ListHashSet<T,
inlineCapacity,
U,
HeapListHashSetAllocator<T, inlineCapacity>>> {};
-template <typename T, size_t inlineCapacity>
+template <typename T, wtf_size_t inlineCapacity>
struct GCInfoTrait<HeapVector<T, inlineCapacity>>
: public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> {};
-template <typename T, size_t inlineCapacity>
+template <typename T, wtf_size_t inlineCapacity>
struct GCInfoTrait<HeapDeque<T, inlineCapacity>>
: public GCInfoTrait<Deque<T, inlineCapacity, HeapAllocator>> {};
template <typename T, typename U, typename V>
diff --git a/chromium/third_party/blink/renderer/platform/heap/gc_info_test.cc b/chromium/third_party/blink/renderer/platform/heap/gc_info_test.cc
index d651be09c53..6447fc31900 100644
--- a/chromium/third_party/blink/renderer/platform/heap/gc_info_test.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/gc_info_test.cc
@@ -16,8 +16,8 @@ TEST(GCInfoTest, InitialEmpty) {
TEST(GCInfoTest, ResizeToMaxIndex) {
GCInfoTable table;
GCInfo info = {nullptr, nullptr, nullptr, false, false};
- size_t slot = 0;
- for (size_t i = 0; i < (GCInfoTable::kMaxIndex - 1); i++) {
+ uint32_t slot = 0;
+ for (uint32_t i = 0; i < (GCInfoTable::kMaxIndex - 1); i++) {
slot = 0;
table.EnsureGCInfoIndex(&info, &slot);
EXPECT_LT(0u, slot);
diff --git a/chromium/third_party/blink/renderer/platform/heap/heap.cc b/chromium/third_party/blink/renderer/platform/heap/heap.cc
index 7688dfb5f79..8a1dc1bd0d8 100644
--- a/chromium/third_party/blink/renderer/platform/heap/heap.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/heap.cc
@@ -44,7 +44,6 @@
#include "third_party/blink/renderer/platform/heap/marking_visitor.h"
#include "third_party/blink/renderer/platform/heap/page_memory.h"
#include "third_party/blink/renderer/platform/heap/page_pool.h"
-#include "third_party/blink/renderer/platform/heap/safe_point.h"
#include "third_party/blink/renderer/platform/heap/thread_state.h"
#include "third_party/blink/renderer/platform/histogram.h"
#include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
@@ -199,25 +198,16 @@ HeapCompact* ThreadHeap::Compaction() {
}
void ThreadHeap::RegisterMovingObjectReference(MovableReference* slot) {
- DCHECK(slot);
- DCHECK(*slot);
Compaction()->RegisterMovingObjectReference(slot);
}
-void ThreadHeap::RegisterMovingObjectCallback(MovableReference reference,
+void ThreadHeap::RegisterMovingObjectCallback(MovableReference* slot,
MovingObjectCallback callback,
void* callback_data) {
- DCHECK(reference);
- Compaction()->RegisterMovingObjectCallback(reference, callback,
- callback_data);
+ Compaction()->RegisterMovingObjectCallback(slot, callback, callback_data);
}
-void ThreadHeap::ProcessMarkingStack(Visitor* visitor) {
- bool complete = AdvanceMarkingStackProcessing(visitor, TimeTicks::Max());
- CHECK(complete);
-}
-
-void ThreadHeap::MarkNotFullyConstructedObjects(Visitor* visitor) {
+void ThreadHeap::MarkNotFullyConstructedObjects(MarkingVisitor* visitor) {
DCHECK(!thread_state_->IsIncrementalMarking());
ThreadHeapStatsCollector::Scope stats_scope(
stats_collector(),
@@ -227,8 +217,7 @@ void ThreadHeap::MarkNotFullyConstructedObjects(Visitor* visitor) {
while (
not_fully_constructed_worklist_->Pop(WorklistTaskId::MainThread, &item)) {
BasePage* const page = PageFromObject(item);
- reinterpret_cast<MarkingVisitor*>(visitor)->ConservativelyMarkAddress(
- page, reinterpret_cast<Address>(item));
+ visitor->ConservativelyMarkAddress(page, reinterpret_cast<Address>(item));
}
}
@@ -256,8 +245,7 @@ void ThreadHeap::InvokeEphemeronCallbacks(Visitor* visitor) {
ephemeron_callbacks_ = std::move(final_set);
}
-bool ThreadHeap::AdvanceMarkingStackProcessing(Visitor* visitor,
- TimeTicks deadline) {
+bool ThreadHeap::AdvanceMarking(MarkingVisitor* visitor, TimeTicks deadline) {
const size_t kDeadlineCheckInterval = 2500;
size_t processed_callback_count = 0;
// Ephemeron fixed point loop.
@@ -324,25 +312,7 @@ size_t ThreadHeap::ObjectPayloadSizeForTesting() {
return object_payload_size;
}
-void ThreadHeap::VisitPersistentRoots(Visitor* visitor) {
- ThreadHeapStatsCollector::Scope stats_scope(
- stats_collector(), ThreadHeapStatsCollector::kVisitPersistentRoots);
- DCHECK(thread_state_->InAtomicMarkingPause());
- thread_state_->VisitPersistents(visitor);
-}
-
-void ThreadHeap::VisitStackRoots(MarkingVisitor* visitor) {
- ThreadHeapStatsCollector::Scope stats_scope(
- stats_collector(), ThreadHeapStatsCollector::kVisitStackRoots);
- DCHECK(thread_state_->InAtomicMarkingPause());
- address_cache_->FlushIfDirty();
- address_cache_->EnableLookup();
- thread_state_->VisitStack(visitor);
- address_cache_->DisableLookup();
-}
-
BasePage* ThreadHeap::LookupPageForAddress(Address address) {
- DCHECK(thread_state_->InAtomicMarkingPause());
if (PageMemoryRegion* region = region_tree_->Lookup(address)) {
return region->PageFromAddress(address);
}
@@ -430,8 +400,8 @@ int ThreadHeap::ArenaIndexOfVectorArenaLeastRecentlyExpanded(
return arena_index_with_min_arena_age;
}
-BaseArena* ThreadHeap::ExpandedVectorBackingArena(size_t gc_info_index) {
- size_t entry_index = gc_info_index & kLikelyToBePromptlyFreedArrayMask;
+BaseArena* ThreadHeap::ExpandedVectorBackingArena(uint32_t gc_info_index) {
+ uint32_t entry_index = gc_info_index & kLikelyToBePromptlyFreedArrayMask;
--likely_to_be_promptly_freed_[entry_index];
int arena_index = vector_backing_arena_index_;
arena_ages_[arena_index] = ++current_arena_ages_;
@@ -448,9 +418,9 @@ void ThreadHeap::AllocationPointAdjusted(int arena_index) {
}
}
-void ThreadHeap::PromptlyFreed(size_t gc_info_index) {
+void ThreadHeap::PromptlyFreed(uint32_t gc_info_index) {
DCHECK(thread_state_->CheckThread());
- size_t entry_index = gc_info_index & kLikelyToBePromptlyFreedArrayMask;
+ uint32_t entry_index = gc_info_index & kLikelyToBePromptlyFreedArrayMask;
// See the comment in vectorBackingArena() for why this is +3.
likely_to_be_promptly_freed_[entry_index] += 3;
}
@@ -550,7 +520,7 @@ void ThreadHeap::TakeSnapshot(SnapshotType type) {
size_t total_dead_count = 0;
size_t total_live_size = 0;
size_t total_dead_size = 0;
- for (size_t gc_info_index = 1;
+ for (uint32_t gc_info_index = 1;
gc_info_index <= GCInfoTable::Get().GcInfoIndex(); ++gc_info_index) {
total_live_count += info.live_count[gc_info_index];
total_dead_count += info.dead_count[gc_info_index];
diff --git a/chromium/third_party/blink/renderer/platform/heap/heap.h b/chromium/third_party/blink/renderer/platform/heap/heap.h
index 1623c4188ce..499750ce30a 100644
--- a/chromium/third_party/blink/renderer/platform/heap/heap.h
+++ b/chromium/third_party/blink/renderer/platform/heap/heap.h
@@ -203,11 +203,6 @@ class PLATFORM_EXPORT ThreadHeap {
return weak_callback_worklist_.get();
}
- void VisitPersistentRoots(Visitor*);
- void VisitStackRoots(MarkingVisitor*);
- void EnterSafePoint(ThreadState*);
- void LeaveSafePoint();
-
// Is the finalizable GC object still alive, but slated for lazy sweeping?
// If a lazy sweep is in progress, returns true if the object was found
// to be not reachable during the marking phase, but it has yet to be swept
@@ -262,7 +257,7 @@ class PLATFORM_EXPORT ThreadHeap {
//
// For Blink, |HeapLinkedHashSet<>| is currently the only abstraction which
// relies on this feature.
- void RegisterMovingObjectCallback(MovableReference,
+ void RegisterMovingObjectCallback(MovableReference*,
MovingObjectCallback,
void* callback_data);
@@ -280,17 +275,19 @@ class PLATFORM_EXPORT ThreadHeap {
Address AllocateOnArenaIndex(ThreadState*,
size_t,
int arena_index,
- size_t gc_info_index,
+ uint32_t gc_info_index,
const char* type_name);
template <typename T>
static Address Allocate(size_t, bool eagerly_sweep = false);
template <typename T>
static Address Reallocate(void* previous, size_t);
- void ProcessMarkingStack(Visitor*);
void WeakProcessing(Visitor*);
- void MarkNotFullyConstructedObjects(Visitor*);
- bool AdvanceMarkingStackProcessing(Visitor*, TimeTicks deadline);
+
+ // Marks not fully constructed objects.
+ void MarkNotFullyConstructedObjects(MarkingVisitor*);
+ // Marks the transitive closure including ephemerons.
+ bool AdvanceMarking(MarkingVisitor*, TimeTicks deadline);
void VerifyMarking();
// Conservatively checks whether an address is a pointer in any of the
@@ -304,7 +301,7 @@ class PLATFORM_EXPORT ThreadHeap {
size_t ObjectPayloadSizeForTesting();
- AddressCache* address_cache() { return address_cache_.get(); }
+ AddressCache* address_cache() const { return address_cache_.get(); }
PagePool* GetFreePagePool() { return free_page_pool_.get(); }
@@ -350,9 +347,9 @@ class PLATFORM_EXPORT ThreadHeap {
// (*) More than 33% of the same type of vectors have been promptly
// freed since the last GC.
//
- BaseArena* VectorBackingArena(size_t gc_info_index) {
+ BaseArena* VectorBackingArena(uint32_t gc_info_index) {
DCHECK(thread_state_->CheckThread());
- size_t entry_index = gc_info_index & kLikelyToBePromptlyFreedArrayMask;
+ uint32_t entry_index = gc_info_index & kLikelyToBePromptlyFreedArrayMask;
--likely_to_be_promptly_freed_[entry_index];
int arena_index = vector_backing_arena_index_;
// If likely_to_be_promptly_freed_[entryIndex] > 0, that means that
@@ -367,14 +364,14 @@ class PLATFORM_EXPORT ThreadHeap {
DCHECK(IsVectorArenaIndex(arena_index));
return arenas_[arena_index];
}
- BaseArena* ExpandedVectorBackingArena(size_t gc_info_index);
+ BaseArena* ExpandedVectorBackingArena(uint32_t gc_info_index);
static bool IsVectorArenaIndex(int arena_index) {
return BlinkGC::kVector1ArenaIndex <= arena_index &&
arena_index <= BlinkGC::kVector4ArenaIndex;
}
static bool IsNormalArenaIndex(int);
void AllocationPointAdjusted(int arena_index);
- void PromptlyFreed(size_t gc_info_index);
+ void PromptlyFreed(uint32_t gc_info_index);
void ClearArenaAges();
int ArenaIndexOfVectorArenaLeastRecentlyExpanded(int begin_arena_index,
int end_arena_index);
@@ -602,7 +599,7 @@ class VerifyEagerFinalization {
inline Address ThreadHeap::AllocateOnArenaIndex(ThreadState* state,
size_t size,
int arena_index,
- size_t gc_info_index,
+ uint32_t gc_info_index,
const char* type_name) {
DCHECK(state->IsAllocationAllowed());
DCHECK_NE(arena_index, BlinkGC::kLargeObjectArenaIndex);
@@ -651,7 +648,7 @@ Address ThreadHeap::Reallocate(void* previous, size_t size) {
arena_index = ArenaIndexForObjectSize(size);
}
- size_t gc_info_index = GCInfoTrait<T>::Index();
+ uint32_t gc_info_index = GCInfoTrait<T>::Index();
// TODO(haraken): We don't support reallocate() for finalizable objects.
DCHECK(!GCInfoTable::Get()
.GCInfoFromIndex(previous_header->GcInfoIndex())
@@ -677,11 +674,17 @@ Address ThreadHeap::Reallocate(void* previous, size_t size) {
template <typename T>
void Visitor::HandleWeakCell(Visitor* self, void* object) {
T** cell = reinterpret_cast<T**>(object);
- // '-1' means deleted value. This can happen when weak fields are deleted
- // while incremental marking is running.
- if (*cell && (*cell == reinterpret_cast<T*>(-1) ||
- !ObjectAliveTrait<T>::IsHeapObjectAlive(*cell)))
- *cell = nullptr;
+ T* contents = *cell;
+ if (contents) {
+ if (contents == reinterpret_cast<T*>(-1)) {
+ // '-1' means deleted value. This can happen when weak fields are deleted
+ // while incremental marking is running. Deleted values need to be
+ // preserved to avoid reviving objects in containers.
+ return;
+ }
+ if (!ObjectAliveTrait<T>::IsHeapObjectAlive(contents))
+ *cell = nullptr;
+ }
}
} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/heap_allocator.cc b/chromium/third_party/blink/renderer/platform/heap/heap_allocator.cc
index 7791505bbcd..411f19a7878 100644
--- a/chromium/third_party/blink/renderer/platform/heap/heap_allocator.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/heap_allocator.cc
@@ -40,8 +40,10 @@ void HeapAllocator::FreeInlineVectorBacking(void* address) {
BackingFree(address);
}
-void HeapAllocator::FreeHashTableBacking(void* address, bool is_weak_table) {
- if (!ThreadState::Current()->IsMarkingInProgress() || !is_weak_table)
+void HeapAllocator::FreeHashTableBacking(void* address) {
+ // When incremental marking is enabled weak callbacks may have been
+ // registered.
+ if (!ThreadState::Current()->IsMarkingInProgress())
BackingFree(address);
}
diff --git a/chromium/third_party/blink/renderer/platform/heap/heap_allocator.h b/chromium/third_party/blink/renderer/platform/heap/heap_allocator.h
index ab841704e2e..ec3b12684c6 100644
--- a/chromium/third_party/blink/renderer/platform/heap/heap_allocator.h
+++ b/chromium/third_party/blink/renderer/platform/heap/heap_allocator.h
@@ -72,7 +72,7 @@ class PLATFORM_EXPORT HeapAllocator {
ThreadState* state =
ThreadStateFor<ThreadingTrait<T>::kAffinity>::GetState();
DCHECK(state->IsAllocationAllowed());
- size_t gc_info_index = GCInfoTrait<HeapVectorBacking<T>>::Index();
+ uint32_t gc_info_index = GCInfoTrait<HeapVectorBacking<T>>::Index();
NormalPageArena* arena = static_cast<NormalPageArena*>(
state->Heap().VectorBackingArena(gc_info_index));
return reinterpret_cast<T*>(arena->AllocateObject(
@@ -83,7 +83,7 @@ class PLATFORM_EXPORT HeapAllocator {
ThreadState* state =
ThreadStateFor<ThreadingTrait<T>::kAffinity>::GetState();
DCHECK(state->IsAllocationAllowed());
- size_t gc_info_index = GCInfoTrait<HeapVectorBacking<T>>::Index();
+ uint32_t gc_info_index = GCInfoTrait<HeapVectorBacking<T>>::Index();
NormalPageArena* arena = static_cast<NormalPageArena*>(
state->Heap().ExpandedVectorBackingArena(gc_info_index));
return reinterpret_cast<T*>(arena->AllocateObject(
@@ -96,7 +96,7 @@ class PLATFORM_EXPORT HeapAllocator {
size_t quantized_shrunk_size);
template <typename T>
static T* AllocateInlineVectorBacking(size_t size) {
- size_t gc_info_index = GCInfoTrait<HeapVectorBacking<T>>::Index();
+ uint32_t gc_info_index = GCInfoTrait<HeapVectorBacking<T>>::Index();
ThreadState* state =
ThreadStateFor<ThreadingTrait<T>::kAffinity>::GetState();
const char* type_name = WTF_HEAP_PROFILER_TYPE_NAME(HeapVectorBacking<T>);
@@ -112,7 +112,7 @@ class PLATFORM_EXPORT HeapAllocator {
template <typename T, typename HashTable>
static T* AllocateHashTableBacking(size_t size) {
- size_t gc_info_index =
+ uint32_t gc_info_index =
GCInfoTrait<HeapHashTableBacking<HashTable>>::Index();
ThreadState* state =
ThreadStateFor<ThreadingTrait<T>::kAffinity>::GetState();
@@ -125,7 +125,7 @@ class PLATFORM_EXPORT HeapAllocator {
static T* AllocateZeroedHashTableBacking(size_t size) {
return AllocateHashTableBacking<T, HashTable>(size);
}
- static void FreeHashTableBacking(void* address, bool is_weak_table);
+ static void FreeHashTableBacking(void* address);
static bool ExpandHashTableBacking(void*, size_t);
static void TraceMarkedBackingStore(void* address) {
@@ -192,11 +192,11 @@ class PLATFORM_EXPORT HeapAllocator {
template <typename T, typename VisitorDispatcher>
static void RegisterBackingStoreCallback(VisitorDispatcher visitor,
- T* backing_store,
+ T** backing_store_slot,
MovingObjectCallback callback,
void* callback_data) {
- visitor->RegisterBackingStoreCallback(backing_store, callback,
- callback_data);
+ visitor->RegisterBackingStoreCallback(
+ reinterpret_cast<void**>(backing_store_slot), callback, callback_data);
}
static void EnterGCForbiddenScope() {
@@ -309,7 +309,7 @@ class PLATFORM_EXPORT HeapAllocator {
size_t quantized_current_size,
size_t quantized_shrunk_size);
- template <typename T, size_t u, typename V>
+ template <typename T, wtf_size_t u, typename V>
friend class WTF::Vector;
template <typename T, typename U, typename V, typename W>
friend class WTF::HashSet;
@@ -339,7 +339,7 @@ static void TraceListHashSetValue(VisitorDispatcher visitor, Value& value) {
// This inherits from the static-only HeapAllocator trait class, but we do
// declare pointers to instances. These pointers are always null, and no
// objects are instantiated.
-template <typename ValueArg, size_t inlineCapacity>
+template <typename ValueArg, wtf_size_t inlineCapacity>
class HeapListHashSetAllocator : public HeapAllocator {
DISALLOW_NEW();
@@ -477,8 +477,9 @@ class HeapLinkedHashSet
};
template <typename ValueArg,
- size_t inlineCapacity = 0, // The inlineCapacity is just a dummy to
- // match ListHashSet (off-heap).
+ wtf_size_t inlineCapacity =
+ 0, // The inlineCapacity is just a dummy to
+ // match ListHashSet (off-heap).
typename HashArg = typename DefaultHash<ValueArg>::Hash>
class HeapListHashSet
: public ListHashSet<ValueArg,
@@ -502,7 +503,7 @@ class HeapHashCountedSet
"HashCountedSet<> instead of HeapHashCountedSet<>");
};
-template <typename T, size_t inlineCapacity = 0>
+template <typename T, wtf_size_t inlineCapacity = 0>
class HeapVector : public Vector<T, inlineCapacity, HeapAllocator> {
IS_GARBAGE_COLLECTED_TYPE();
using Base = Vector<T, inlineCapacity, HeapAllocator>;
@@ -538,18 +539,18 @@ class HeapVector : public Vector<T, inlineCapacity, HeapAllocator> {
return Base::operator new(size, location);
}
- explicit HeapVector(size_t size)
+ explicit HeapVector(wtf_size_t size)
: Vector<T, inlineCapacity, HeapAllocator>(size) {}
- HeapVector(size_t size, const T& val)
+ HeapVector(wtf_size_t size, const T& val)
: Vector<T, inlineCapacity, HeapAllocator>(size, val) {}
- template <size_t otherCapacity>
+ template <wtf_size_t otherCapacity>
HeapVector(const HeapVector<T, otherCapacity>& other)
: Vector<T, inlineCapacity, HeapAllocator>(other) {}
};
-template <typename T, size_t inlineCapacity = 0>
+template <typename T, wtf_size_t inlineCapacity = 0>
class HeapDeque : public Deque<T, inlineCapacity, HeapAllocator> {
IS_GARBAGE_COLLECTED_TYPE();
using Base = Deque<T, inlineCapacity, HeapAllocator>;
@@ -585,10 +586,10 @@ class HeapDeque : public Deque<T, inlineCapacity, HeapAllocator> {
return Base::operator new(size, location);
}
- explicit HeapDeque(size_t size)
+ explicit HeapDeque(wtf_size_t size)
: Deque<T, inlineCapacity, HeapAllocator>(size) {}
- HeapDeque(size_t size, const T& val)
+ HeapDeque(wtf_size_t size, const T& val)
: Deque<T, inlineCapacity, HeapAllocator>(size, val) {}
HeapDeque& operator=(const HeapDeque& other) {
@@ -597,7 +598,7 @@ class HeapDeque : public Deque<T, inlineCapacity, HeapAllocator> {
return *this;
}
- template <size_t otherCapacity>
+ template <wtf_size_t otherCapacity>
HeapDeque(const HeapDeque<T, otherCapacity>& other)
: Deque<T, inlineCapacity, HeapAllocator>(other) {}
};
@@ -712,7 +713,7 @@ struct VectorTraits<blink::HeapDeque<T, 0>>
static const bool kCanMoveWithMemcpy = true;
};
-template <typename T, size_t inlineCapacity>
+template <typename T, wtf_size_t inlineCapacity>
struct VectorTraits<blink::HeapVector<T, inlineCapacity>>
: VectorTraitsBase<blink::HeapVector<T, inlineCapacity>> {
STATIC_ONLY(VectorTraits);
@@ -724,7 +725,7 @@ struct VectorTraits<blink::HeapVector<T, inlineCapacity>>
static const bool kCanMoveWithMemcpy = VectorTraits<T>::kCanMoveWithMemcpy;
};
-template <typename T, size_t inlineCapacity>
+template <typename T, wtf_size_t inlineCapacity>
struct VectorTraits<blink::HeapDeque<T, inlineCapacity>>
: VectorTraitsBase<blink::HeapDeque<T, inlineCapacity>> {
STATIC_ONLY(VectorTraits);
@@ -915,7 +916,7 @@ struct HashTraits<blink::UntracedMember<T>>
}
};
-template <typename T, size_t inlineCapacity>
+template <typename T, wtf_size_t inlineCapacity>
struct IsTraceable<
ListHashSetNode<T, blink::HeapListHashSetAllocator<T, inlineCapacity>>*> {
STATIC_ONLY(IsTraceable);
@@ -926,7 +927,7 @@ struct IsTraceable<
static const bool value = true;
};
-template <typename T, size_t inlineCapacity>
+template <typename T, wtf_size_t inlineCapacity>
struct IsGarbageCollectedType<
ListHashSetNode<T, blink::HeapListHashSetAllocator<T, inlineCapacity>>> {
static const bool value = true;
diff --git a/chromium/third_party/blink/renderer/platform/heap/heap_compact.cc b/chromium/third_party/blink/renderer/platform/heap/heap_compact.cc
index 4594fbfd18a..4b6e34ce42b 100644
--- a/chromium/third_party/blink/renderer/platform/heap/heap_compact.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/heap_compact.cc
@@ -13,7 +13,6 @@
#include "third_party/blink/renderer/platform/histogram.h"
#include "third_party/blink/renderer/platform/runtime_enabled_features.h"
#include "third_party/blink/renderer/platform/wtf/hash_map.h"
-#include "third_party/blink/renderer/platform/wtf/hash_set.h"
#include "third_party/blink/renderer/platform/wtf/time.h"
namespace blink {
@@ -29,8 +28,8 @@ bool HeapCompact::force_compaction_gc_ = false;
// heap compaction-enhanced GC.
class HeapCompact::MovableObjectFixups final {
public:
- static std::unique_ptr<MovableObjectFixups> Create() {
- return base::WrapUnique(new MovableObjectFixups);
+ static std::unique_ptr<MovableObjectFixups> Create(ThreadHeap* heap) {
+ return base::WrapUnique(new MovableObjectFixups(heap));
}
~MovableObjectFixups() = default;
@@ -61,14 +60,23 @@ class HeapCompact::MovableObjectFixups final {
}
void Add(MovableReference* slot) {
+ DCHECK(*slot);
MovableReference reference = *slot;
- BasePage* ref_page = PageFromObject(reference);
+ BasePage* ref_page =
+ heap_->LookupPageForAddress(reinterpret_cast<Address>(reference));
+
+ // ref_page is null if *slot is pointing to an off-heap region. This may
+ // happy if *slot is pointing to an inline buffer of HeapVector with inline
+ // capacity.
+ if (!ref_page)
+ return;
// Nothing to compact on a large object's page.
if (ref_page->IsLargeObjectPage())
return;
+ if (!HeapCompact::IsCompactableArena(ref_page->Arena()->ArenaIndex()))
+ return;
#if DCHECK_IS_ON()
- DCHECK(HeapCompact::IsCompactableArena(ref_page->Arena()->ArenaIndex()));
auto it = fixups_.find(reference);
DCHECK(it == fixups_.end() || it->value == slot);
#endif
@@ -107,12 +115,18 @@ class HeapCompact::MovableObjectFixups final {
AddInteriorFixup(slot);
}
- void AddFixupCallback(MovableReference reference,
+ void AddFixupCallback(MovableReference* slot,
MovingObjectCallback callback,
void* callback_data) {
- DCHECK(!fixup_callbacks_.Contains(reference));
- fixup_callbacks_.insert(reference, std::pair<void*, MovingObjectCallback>(
- callback_data, callback));
+ DCHECK(!fixup_callbacks_.Contains(slot));
+ fixup_callbacks_.insert(
+ slot, std::pair<void*, MovingObjectCallback>(callback_data, callback));
+ }
+
+ void RemoveFixupCallback(MovableReference* slot) {
+ auto it = fixup_callbacks_.find(slot);
+ if (it != fixup_callbacks_.end())
+ fixup_callbacks_.erase(it);
}
void RelocateInteriorFixups(Address from, Address to, size_t size) {
@@ -124,25 +138,22 @@ class HeapCompact::MovableObjectFixups final {
// to adjust. If the backing store of such an interior slot hasn't
// been moved already, update the slot -> real location mapping.
// When the backing store is eventually moved, it'll use that location.
- //
for (size_t offset = 0; offset < size; offset += sizeof(void*)) {
- if (!range->IsSet(from + offset))
- continue;
MovableReference* slot =
reinterpret_cast<MovableReference*>(from + offset);
+
+ // Early bailout.
+ if (!range->IsSet(reinterpret_cast<Address>(slot)))
+ continue;
+
auto it = interior_fixups_.find(slot);
if (it == interior_fixups_.end())
continue;
- // TODO: with the right sparse bitmap representation, it could be possible
- // to quickly determine if we've now stepped past the last address
- // that needed fixup in [address, address + size). Breaking out of this
- // loop might be worth doing for hash table backing stores with a very
- // low load factor. But interior fixups are rare.
-
// If |slot|'s mapping is set, then the slot has been adjusted already.
if (it->value)
continue;
+
Address fixup = to + offset;
LOG_HEAP_COMPACTION() << "Range interior fixup: " << (from + offset)
<< " " << it->value << " " << fixup;
@@ -151,16 +162,37 @@ class HeapCompact::MovableObjectFixups final {
// moved/compacted, it'll update |to + offset| with a pointer to the
// moved backing store.
interior_fixups_.Set(slot, fixup);
+
+ // If the |slot|'s content is pointing into the region [from, from + size)
+ // we are dealing with an interior pointer that does not point to a valid
+ // HeapObjectHeader. Such references need to be fixed up immediately.
+ Address fixup_contents = *reinterpret_cast<Address*>(fixup);
+ if (fixup_contents > from && fixup_contents < (from + size)) {
+ *reinterpret_cast<Address*>(fixup) = fixup_contents - from + to;
+ continue;
+ }
}
}
void Relocate(Address from, Address to) {
auto it = fixups_.find(from);
- DCHECK(it != fixups_.end());
+ // This means that there is no corresponding slot for a live backing store.
+ // This may happen because a mutator may change the slot to point to a
+ // different backing store because e.g.:
+ // - Incremental marking marked a backing store as live that was later on
+ // replaced.
+ // - Backings were changed when being processed in
+ // EagerSweep/PreFinalizer/WeakProcessing.
+ if (it == fixups_.end())
+ return;
+
#if DCHECK_IS_ON()
BasePage* from_page = PageFromObject(from);
DCHECK(relocatable_pages_.Contains(from_page));
#endif
+
+ // If the object is referenced by a slot that is contained on a compacted
+ // area itself, check whether it can be updated already.
MovableReference* slot = reinterpret_cast<MovableReference*>(it->value);
auto interior = interior_fixups_.find(slot);
if (interior != interior_fixups_.end()) {
@@ -174,6 +206,7 @@ class HeapCompact::MovableObjectFixups final {
slot = slot_location;
}
}
+
// If the slot has subsequently been updated, a prefinalizer or
// a destructor having mutated and expanded/shrunk the collection,
// do not update and relocate the slot -- |from| is no longer valid
@@ -185,27 +218,19 @@ class HeapCompact::MovableObjectFixups final {
LOG_HEAP_COMPACTION()
<< "No relocation: slot = " << slot << ", *slot = " << *slot
<< ", from = " << from << ", to = " << to;
-#if DCHECK_IS_ON()
- // Verify that the already updated slot is valid, meaning:
- // - has been cleared.
- // - has been updated & expanded with a large object backing store.
- // - has been updated with a larger, freshly allocated backing store.
- // (on a fresh page in a compactable arena that is not being
- // compacted.)
- if (!*slot)
- return;
- BasePage* slot_page = PageFromObject(*slot);
- DCHECK(
- slot_page->IsLargeObjectPage() ||
- (HeapCompact::IsCompactableArena(slot_page->Arena()->ArenaIndex()) &&
- !relocatable_pages_.Contains(slot_page)));
-#endif
+ VerifyUpdatedSlot(slot);
return;
}
+
+ // Update the slots new value.
*slot = to;
size_t size = 0;
- auto callback = fixup_callbacks_.find(from);
+
+ // Execute potential fixup callbacks.
+ MovableReference* callback_slot =
+ reinterpret_cast<MovableReference*>(it->value);
+ auto callback = fixup_callbacks_.find(callback_slot);
if (UNLIKELY(callback != fixup_callbacks_.end())) {
size = HeapObjectHeader::FromPayload(to)->PayloadSize();
callback->value.second(callback->value.first, from, to, size);
@@ -231,7 +256,11 @@ class HeapCompact::MovableObjectFixups final {
#endif
private:
- MovableObjectFixups() = default;
+ MovableObjectFixups(ThreadHeap* heap) : heap_(heap) {}
+
+ void VerifyUpdatedSlot(MovableReference* slot);
+
+ ThreadHeap* heap_;
// Tracking movable and updatable references. For now, we keep a
// map which for each movable object, recording the slot that
@@ -243,7 +272,7 @@ class HeapCompact::MovableObjectFixups final {
// Map from movable reference to callbacks that need to be invoked
// when the object moves.
- HashMap<MovableReference, std::pair<void*, MovingObjectCallback>>
+ HashMap<MovableReference*, std::pair<void*, MovingObjectCallback>>
fixup_callbacks_;
// Slot => relocated slot/final location.
@@ -257,6 +286,30 @@ class HeapCompact::MovableObjectFixups final {
std::unique_ptr<SparseHeapBitmap> interiors_;
};
+void HeapCompact::MovableObjectFixups::VerifyUpdatedSlot(
+ MovableReference* slot) {
+// Verify that the already updated slot is valid, meaning:
+// - has been cleared.
+// - has been updated & expanded with a large object backing store.
+// - has been updated with a larger, freshly allocated backing store.
+// (on a fresh page in a compactable arena that is not being
+// compacted.)
+#if DCHECK_IS_ON()
+ if (!*slot)
+ return;
+ BasePage* slot_page =
+ heap_->LookupPageForAddress(reinterpret_cast<Address>(*slot));
+ // ref_page is null if *slot is pointing to an off-heap region. This may
+ // happy if *slot is pointing to an inline buffer of HeapVector with
+ // inline capacity.
+ if (!slot_page)
+ return;
+ DCHECK(slot_page->IsLargeObjectPage() ||
+ (HeapCompact::IsCompactableArena(slot_page->Arena()->ArenaIndex()) &&
+ !relocatable_pages_.Contains(slot_page)));
+#endif // DCHECK_IS_ON()
+}
+
HeapCompact::HeapCompact(ThreadHeap* heap)
: heap_(heap),
do_compact_(false),
@@ -283,7 +336,7 @@ HeapCompact::~HeapCompact() = default;
HeapCompact::MovableObjectFixups& HeapCompact::Fixups() {
if (!fixups_)
- fixups_ = MovableObjectFixups::Create();
+ fixups_ = MovableObjectFixups::Create(heap_);
return *fixups_;
}
@@ -301,12 +354,6 @@ bool HeapCompact::ShouldCompact(ThreadHeap* heap,
<< " count=" << gc_count_since_last_compaction_
<< " free=" << free_list_size_;
gc_count_since_last_compaction_++;
- // It is only safe to compact during non-conservative GCs.
- // TODO: for the main thread, limit this further to only idle GCs.
- if (reason != BlinkGC::GCReason::kIdleGC &&
- reason != BlinkGC::GCReason::kPreciseGC &&
- reason != BlinkGC::GCReason::kForcedGC)
- return false;
// If the GCing thread requires a stack scan, do not compact.
// Why? Should the stack contain an iterator pointing into its
@@ -315,10 +362,22 @@ bool HeapCompact::ShouldCompact(ThreadHeap* heap,
if (stack_state == BlinkGC::kHeapPointersOnStack)
return false;
+ if (reason == BlinkGC::GCReason::kTesting) {
+ UpdateHeapResidency();
+ return force_compaction_gc_;
+ }
+
// TODO(keishi): Should be enable after fixing the crashes.
if (marking_type == BlinkGC::kIncrementalMarking)
return false;
+ // TODO(harukamt): Add kIncrementalIdleGC and kIncrementalV8FollowupGC when we
+ // enable heap compaction for incremental marking.
+ if (reason != BlinkGC::GCReason::kIdleGC &&
+ reason != BlinkGC::GCReason::kPreciseGC &&
+ reason != BlinkGC::GCReason::kForcedGC)
+ return false;
+
// Compaction enable rules:
// - It's been a while since the last time.
// - "Considerable" amount of heap memory is bound up in freelist
@@ -353,20 +412,27 @@ void HeapCompact::Initialize(ThreadState* state) {
force_compaction_gc_ = false;
}
+void HeapCompact::RemoveSlot(MovableReference* slot) {
+ auto it = traced_slots_.find(slot);
+ if (it != traced_slots_.end())
+ traced_slots_.erase(it);
+ Fixups().RemoveFixupCallback(slot);
+}
+
void HeapCompact::RegisterMovingObjectReference(MovableReference* slot) {
if (!do_compact_)
return;
- Fixups().Add(slot);
+ traced_slots_.insert(slot);
}
-void HeapCompact::RegisterMovingObjectCallback(MovableReference reference,
+void HeapCompact::RegisterMovingObjectCallback(MovableReference* slot,
MovingObjectCallback callback,
void* callback_data) {
if (!do_compact_)
return;
- Fixups().AddFixupCallback(reference, callback, callback_data);
+ Fixups().AddFixupCallback(slot, callback, callback_data);
}
void HeapCompact::UpdateHeapResidency() {
@@ -415,13 +481,19 @@ void HeapCompact::FinishedArenaCompaction(NormalPageArena* arena,
}
void HeapCompact::Relocate(Address from, Address to) {
- DCHECK(fixups_);
- fixups_->Relocate(from, to);
+ Fixups().Relocate(from, to);
}
void HeapCompact::StartThreadCompaction() {
if (!do_compact_)
return;
+
+ // The mapping between the slots and the backing stores are created
+ for (auto** slot : traced_slots_) {
+ if (*slot)
+ Fixups().Add(slot);
+ }
+ traced_slots_.clear();
}
void HeapCompact::FinishThreadCompaction() {
diff --git a/chromium/third_party/blink/renderer/platform/heap/heap_compact.h b/chromium/third_party/blink/renderer/platform/heap/heap_compact.h
index dc32e77b229..780e72f56a4 100644
--- a/chromium/third_party/blink/renderer/platform/heap/heap_compact.h
+++ b/chromium/third_party/blink/renderer/platform/heap/heap_compact.h
@@ -10,6 +10,7 @@
#include "base/memory/ptr_util.h"
#include "third_party/blink/renderer/platform/heap/blink_gc.h"
#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/wtf/hash_set.h"
#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
#include <bitset>
@@ -35,12 +36,18 @@
namespace blink {
+namespace incremental_marking_test {
+class IncrementalMarkingTestDriver;
+}
+
class NormalPageArena;
class BasePage;
class ThreadState;
class ThreadHeap;
class PLATFORM_EXPORT HeapCompact final {
+ friend class incremental_marking_test::IncrementalMarkingTestDriver;
+
public:
static std::unique_ptr<HeapCompact> Create(ThreadHeap* heap) {
return base::WrapUnique(new HeapCompact(heap));
@@ -48,6 +55,10 @@ class PLATFORM_EXPORT HeapCompact final {
~HeapCompact();
+ // Remove slot from traced_slots_ when a registered slot is destructed by
+ // mutator
+ void RemoveSlot(MovableReference* slot);
+
// Determine if a GC for the given type and reason should also perform
// additional heap compaction.
//
@@ -80,7 +91,7 @@ class PLATFORM_EXPORT HeapCompact final {
void RegisterMovingObjectReference(MovableReference* slot);
// See |Heap::registerMovingObjectCallback()| documentation.
- void RegisterMovingObjectCallback(MovableReference,
+ void RegisterMovingObjectCallback(MovableReference*,
MovingObjectCallback,
void* callback_data);
@@ -164,6 +175,11 @@ class PLATFORM_EXPORT HeapCompact final {
// the range of BlinkGC::ArenaIndices.
unsigned compactable_arenas_;
+ // The set is to remember slots that traced during
+ // marking phases. The mapping between the slots and the backing stores are
+ // created at the atomic pause phase.
+ HashSet<MovableReference*> traced_slots_;
+
static bool force_compaction_gc_;
};
diff --git a/chromium/third_party/blink/renderer/platform/heap/heap_compact_test.cc b/chromium/third_party/blink/renderer/platform/heap/heap_compact_test.cc
index 2dcb6ab1e90..7ecb8bfe27a 100644
--- a/chromium/third_party/blink/renderer/platform/heap/heap_compact_test.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/heap_compact_test.cc
@@ -491,6 +491,38 @@ TEST(HeapCompactTest, CompactLinkedHashSetNested) {
}
}
+TEST(HeapCompactTest, CompactInlinedBackingStore) {
+ // Regression test: https://crbug.com/875044
+ //
+ // This test checks that compaction properly updates pointers to statically
+ // allocated inline backings, see e.g. Vector::inline_buffer_.
+
+ // Use a Key with pre-defined hash traits.
+ using Key = Member<IntWrapper>;
+ // Value uses a statically allocated inline backing of size 64. As long as no
+ // more than elements are added no out-of-line allocation is triggered.
+ // The internal forwarding pointer to the inlined storage needs to be handled
+ // by compaction.
+ using Value = HeapVector<Member<IntWrapper>, 64>;
+ using MapWithInlinedBacking = HeapHashMap<Key, Value>;
+
+ Persistent<MapWithInlinedBacking> map = new MapWithInlinedBacking;
+ {
+ // Create a map that is reclaimed during compaction.
+ (new MapWithInlinedBacking)
+ ->insert(IntWrapper::Create(1, HashTablesAreCompacted), Value());
+
+ IntWrapper* wrapper = IntWrapper::Create(1, HashTablesAreCompacted);
+ Value storage;
+ storage.push_front(wrapper);
+ map->insert(wrapper, std::move(storage));
+ }
+ PerformHeapCompaction();
+ // The first GC should update the pointer accordingly and thus not crash on
+ // the second GC.
+ PerformHeapCompaction();
+}
+
} // namespace blink
#endif // ENABLE_HEAP_COMPACTION
diff --git a/chromium/third_party/blink/renderer/platform/heap/heap_page.cc b/chromium/third_party/blink/renderer/platform/heap/heap_page.cc
index 47b4c3d7fed..c5552d65116 100644
--- a/chromium/third_party/blink/renderer/platform/heap/heap_page.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/heap_page.cc
@@ -42,7 +42,6 @@
#include "third_party/blink/renderer/platform/heap/marking_verifier.h"
#include "third_party/blink/renderer/platform/heap/page_memory.h"
#include "third_party/blink/renderer/platform/heap/page_pool.h"
-#include "third_party/blink/renderer/platform/heap/safe_point.h"
#include "third_party/blink/renderer/platform/heap/thread_state.h"
#include "third_party/blink/renderer/platform/histogram.h"
#include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
@@ -1496,6 +1495,7 @@ void NormalPage::SweepAndCompact(CompactionContext& context) {
void NormalPage::MakeConsistentForMutator() {
object_start_bit_map()->Clear();
+ size_t marked_object_size = 0;
Address start_of_gap = Payload();
NormalPageArena* normal_arena = ArenaForNormalPage();
for (Address header_address = Payload(); header_address < PayloadEnd();) {
@@ -1519,6 +1519,7 @@ void NormalPage::MakeConsistentForMutator() {
normal_arena->AddToFreeList(start_of_gap, header_address - start_of_gap);
if (header->IsMarked()) {
header->Unmark();
+ marked_object_size += size;
}
object_start_bit_map()->SetBit(header_address);
header_address += size;
@@ -1528,6 +1529,11 @@ void NormalPage::MakeConsistentForMutator() {
if (start_of_gap != PayloadEnd())
normal_arena->AddToFreeList(start_of_gap, PayloadEnd() - start_of_gap);
+ if (marked_object_size) {
+ ArenaForNormalPage()->GetThreadState()->Heap().IncreaseMarkedObjectSize(
+ marked_object_size);
+ }
+
VerifyObjectStartBitmapIsConsistentWithPayload();
}
@@ -1646,14 +1652,14 @@ void NormalPage::TakeSnapshot(base::trace_event::MemoryAllocatorDump* page_dump,
live_count++;
live_size += header->size();
- size_t gc_info_index = header->GcInfoIndex();
+ uint32_t gc_info_index = header->GcInfoIndex();
info.live_count[gc_info_index]++;
info.live_size[gc_info_index] += header->size();
} else {
dead_count++;
dead_size += header->size();
- size_t gc_info_index = header->GcInfoIndex();
+ uint32_t gc_info_index = header->GcInfoIndex();
info.dead_count[gc_info_index]++;
info.dead_size[gc_info_index] += header->size();
}
@@ -1709,8 +1715,10 @@ bool LargeObjectPage::Sweep() {
void LargeObjectPage::MakeConsistentForMutator() {
HeapObjectHeader* header = ObjectHeader();
- if (header->IsMarked())
+ if (header->IsMarked()) {
header->Unmark();
+ Arena()->GetThreadState()->Heap().IncreaseMarkedObjectSize(size());
+ }
}
#if defined(ADDRESS_SANITIZER)
@@ -1730,7 +1738,7 @@ void LargeObjectPage::TakeSnapshot(
size_t live_count = 0;
size_t dead_count = 0;
HeapObjectHeader* header = ObjectHeader();
- size_t gc_info_index = header->GcInfoIndex();
+ uint32_t gc_info_index = header->GcInfoIndex();
size_t payload_size = header->PayloadSize();
if (header->IsMarked()) {
live_count = 1;
diff --git a/chromium/third_party/blink/renderer/platform/heap/heap_page.h b/chromium/third_party/blink/renderer/platform/heap/heap_page.h
index b697773664e..2bc6d7cc77f 100644
--- a/chromium/third_party/blink/renderer/platform/heap/heap_page.h
+++ b/chromium/third_party/blink/renderer/platform/heap/heap_page.h
@@ -194,7 +194,7 @@ class PLATFORM_EXPORT HeapObjectHeader {
size_t size() const;
- NO_SANITIZE_ADDRESS size_t GcInfoIndex() const {
+ NO_SANITIZE_ADDRESS uint32_t GcInfoIndex() const {
return (encoded_ & kHeaderGCInfoIndexMask) >> kHeaderGCInfoIndexShift;
}
diff --git a/chromium/third_party/blink/renderer/platform/heap/heap_stats_collector.h b/chromium/third_party/blink/renderer/platform/heap/heap_stats_collector.h
index 30ee866cc63..05680bbd4d9 100644
--- a/chromium/third_party/blink/renderer/platform/heap/heap_stats_collector.h
+++ b/chromium/third_party/blink/renderer/platform/heap/heap_stats_collector.h
@@ -99,14 +99,11 @@ class PLATFORM_EXPORT ThreadHeapStatsCollector {
static constexpr int kNumScopeIds = kLastScopeId + 1;
- enum TraceDefaultBehavior {
- kEnabled,
- kDisabled,
- };
+ enum TraceCategory { kEnabled, kDisabled, kDevTools };
// Trace a particular scope. Will emit a trace event and record the time in
// the corresponding ThreadHeapStatsCollector.
- template <TraceDefaultBehavior default_behavior = kDisabled>
+ template <TraceCategory trace_category = kDisabled>
class PLATFORM_EXPORT InternalScope {
DISALLOW_NEW();
DISALLOW_COPY_AND_ASSIGN(InternalScope);
@@ -125,9 +122,14 @@ class PLATFORM_EXPORT ThreadHeapStatsCollector {
private:
constexpr static const char* TraceCategory() {
- return default_behavior == kEnabled
- ? "blink_gc"
- : TRACE_DISABLED_BY_DEFAULT("blink_gc");
+ switch (trace_category) {
+ case kEnabled:
+ return "blink_gc";
+ case kDisabled:
+ return TRACE_DISABLED_BY_DEFAULT("blink_gc");
+ case kDevTools:
+ return "blink_gc,devtools.timeline";
+ }
}
void StartTrace() { TRACE_EVENT_BEGIN0(TraceCategory(), ToString(id_)); }
@@ -149,6 +151,7 @@ class PLATFORM_EXPORT ThreadHeapStatsCollector {
using Scope = InternalScope<kDisabled>;
using EnabledScope = InternalScope<kEnabled>;
+ using DevToolsScope = InternalScope<kDevTools>;
// POD to hold interesting data accumulated during a garbage collection cycle.
// The event is always fully polulated when looking at previous events but
diff --git a/chromium/third_party/blink/renderer/platform/heap/heap_test.cc b/chromium/third_party/blink/renderer/platform/heap/heap_test.cc
index 031cfc109a6..4e7a71d4d3c 100644
--- a/chromium/third_party/blink/renderer/platform/heap/heap_test.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/heap_test.cc
@@ -47,7 +47,6 @@
#include "third_party/blink/renderer/platform/heap/heap_terminated_array_builder.h"
#include "third_party/blink/renderer/platform/heap/heap_test_utilities.h"
#include "third_party/blink/renderer/platform/heap/marking_visitor.h"
-#include "third_party/blink/renderer/platform/heap/safe_point.h"
#include "third_party/blink/renderer/platform/heap/self_keep_alive.h"
#include "third_party/blink/renderer/platform/heap/stack_frame_depth.h"
#include "third_party/blink/renderer/platform/heap/thread_state.h"
@@ -361,9 +360,9 @@ class TestGCCollectGarbageScope {
~TestGCCollectGarbageScope() { ThreadState::Current()->CompleteSweep(); }
};
-class TestGCMarkingScope : public TestGCCollectGarbageScope {
+class TestGCScope : public TestGCCollectGarbageScope {
public:
- explicit TestGCMarkingScope(BlinkGC::StackState state)
+ explicit TestGCScope(BlinkGC::StackState state)
: TestGCCollectGarbageScope(state),
atomic_pause_scope_(ThreadState::Current()) {
ThreadState::Current()->Heap().stats_collector()->NotifyMarkingStarted(
@@ -371,7 +370,7 @@ class TestGCMarkingScope : public TestGCCollectGarbageScope {
ThreadState::Current()->AtomicPausePrologue(state, BlinkGC::kAtomicMarking,
BlinkGC::GCReason::kPreciseGC);
}
- ~TestGCMarkingScope() {
+ ~TestGCScope() {
ThreadState::Current()->MarkPhaseEpilogue(BlinkGC::kAtomicMarking);
ThreadState::Current()->AtomicPauseEpilogue(BlinkGC::kAtomicMarking,
BlinkGC::kEagerSweeping);
@@ -381,16 +380,6 @@ class TestGCMarkingScope : public TestGCCollectGarbageScope {
ThreadState::AtomicPauseScope atomic_pause_scope_;
};
-class TestGCScope : public TestGCMarkingScope {
- public:
- explicit TestGCScope(BlinkGC::StackState state)
- : TestGCMarkingScope(state), safe_point_scope_(state) {}
- ~TestGCScope() {}
-
- private:
- SafePointScope safe_point_scope_;
-};
-
class SimpleObject : public GarbageCollected<SimpleObject> {
public:
static SimpleObject* Create() { return new SimpleObject(); }
@@ -1734,7 +1723,7 @@ TEST(HeapTest, BasicFunctionality) {
ClearOutOldGarbage();
size_t initial_object_payload_size = heap.ObjectPayloadSizeForTesting();
{
- size_t slack = 0;
+ wtf_size_t slack = 0;
// When the test starts there may already have been leaked some memory
// on the heap, so we establish a base line.
@@ -1776,7 +1765,7 @@ TEST(HeapTest, BasicFunctionality) {
ClearOutOldGarbage();
size_t total = 0;
- size_t slack = 0;
+ wtf_size_t slack = 0;
size_t base_level = heap.ObjectPayloadSizeForTesting();
bool test_pages_allocated = !base_level;
if (test_pages_allocated)
@@ -2396,8 +2385,8 @@ TEST(HeapTest, MAYBE_LargeHashMap) {
// Try to allocate a HashTable larger than kMaxHeapObjectSize
// (crbug.com/597953).
- size_t size = kMaxHeapObjectSize /
- sizeof(HeapHashMap<int, Member<IntWrapper>>::ValueType);
+ wtf_size_t size = kMaxHeapObjectSize /
+ sizeof(HeapHashMap<int, Member<IntWrapper>>::ValueType);
Persistent<HeapHashMap<int, Member<IntWrapper>>> map =
new HeapHashMap<int, Member<IntWrapper>>();
map->ReserveCapacityForSize(size);
@@ -2409,7 +2398,7 @@ TEST(HeapTest, LargeVector) {
// Try to allocate a HeapVectors larger than kMaxHeapObjectSize
// (crbug.com/597953).
- size_t size = kMaxHeapObjectSize / sizeof(int);
+ wtf_size_t size = kMaxHeapObjectSize / sizeof(int);
Persistent<HeapVector<int>> vector = new HeapVector<int>(size);
EXPECT_LE(size, vector->capacity());
}
@@ -2458,7 +2447,7 @@ TEST(HeapTest, HeapVectorFilledWithValue) {
IntWrapper* val = IntWrapper::Create(1);
HeapVector<Member<IntWrapper>> vector(10, val);
EXPECT_EQ(10u, vector.size());
- for (size_t i = 0; i < vector.size(); i++)
+ for (wtf_size_t i = 0; i < vector.size(); i++)
EXPECT_EQ(val, vector[i]);
}
@@ -2584,16 +2573,18 @@ TEST(HeapTest, HeapVectorOnStackLargeObjectPageSized) {
// LargeObjectPage ends.
using Container = HeapVector<Member<IntWrapper>>;
Container vector;
- size_t size = (kLargeObjectSizeThreshold + kBlinkGuardPageSize -
- LargeObjectPage::PageHeaderSize() - sizeof(HeapObjectHeader)) /
- sizeof(Container::ValueType);
+ wtf_size_t size =
+ (kLargeObjectSizeThreshold + kBlinkGuardPageSize -
+ static_cast<wtf_size_t>(LargeObjectPage::PageHeaderSize()) -
+ sizeof(HeapObjectHeader)) /
+ sizeof(Container::ValueType);
vector.ReserveCapacity(size);
for (unsigned i = 0; i < size; ++i)
vector.push_back(IntWrapper::Create(i));
ConservativelyCollectGarbage();
}
-template <typename T, size_t inlineCapacity, typename U>
+template <typename T, wtf_size_t inlineCapacity, typename U>
bool DequeContains(HeapDeque<T, inlineCapacity>& deque, U u) {
typedef typename HeapDeque<T, inlineCapacity>::iterator iterator;
for (iterator it = deque.begin(); it != deque.end(); ++it) {
@@ -4042,7 +4033,7 @@ TEST(HeapTest, CheckAndMarkPointer) {
MarkingVisitor::kGlobalMarking);
heap.address_cache()->EnableLookup();
heap.address_cache()->Flush();
- for (size_t i = 0; i < object_addresses.size(); i++) {
+ for (wtf_size_t i = 0; i < object_addresses.size(); i++) {
EXPECT_TRUE(heap.CheckAndMarkPointer(&visitor, object_addresses[i],
ReportMarkedPointer));
EXPECT_TRUE(heap.CheckAndMarkPointer(&visitor, end_addresses[i],
@@ -4067,7 +4058,7 @@ TEST(HeapTest, CheckAndMarkPointer) {
MarkingVisitor::kGlobalMarking);
heap.address_cache()->EnableLookup();
heap.address_cache()->Flush();
- for (size_t i = 0; i < object_addresses.size(); i++) {
+ for (wtf_size_t i = 0; i < object_addresses.size(); i++) {
// We would like to assert that checkAndMarkPointer returned false
// here because the pointers no longer point into a valid object
// (it's been freed by the GCs. But checkAndMarkPointer will return
@@ -4548,14 +4539,14 @@ TEST(HeapTest, HeapTerminatedArray) {
HeapTerminatedArray<TerminatedArrayItem>* arr = nullptr;
- const size_t kPrefixSize = 4;
- const size_t kSuffixSize = 4;
+ const wtf_size_t kPrefixSize = 4;
+ const wtf_size_t kSuffixSize = 4;
{
HeapTerminatedArrayBuilder<TerminatedArrayItem> builder(arr);
builder.Grow(kPrefixSize);
ConservativelyCollectGarbage();
- for (size_t i = 0; i < kPrefixSize; i++)
+ for (wtf_size_t i = 0; i < kPrefixSize; i++)
builder.Append(TerminatedArrayItem(IntWrapper::Create(i)));
arr = builder.Release();
}
@@ -4563,13 +4554,13 @@ TEST(HeapTest, HeapTerminatedArray) {
ConservativelyCollectGarbage();
EXPECT_EQ(0, IntWrapper::destructor_calls_);
EXPECT_EQ(kPrefixSize, arr->size());
- for (size_t i = 0; i < kPrefixSize; i++)
- EXPECT_EQ(i, static_cast<size_t>(arr->at(i).Payload()->Value()));
+ for (wtf_size_t i = 0; i < kPrefixSize; i++)
+ EXPECT_EQ(i, static_cast<wtf_size_t>(arr->at(i).Payload()->Value()));
{
HeapTerminatedArrayBuilder<TerminatedArrayItem> builder(arr);
builder.Grow(kSuffixSize);
- for (size_t i = 0; i < kSuffixSize; i++)
+ for (wtf_size_t i = 0; i < kSuffixSize; i++)
builder.Append(TerminatedArrayItem(IntWrapper::Create(kPrefixSize + i)));
arr = builder.Release();
}
@@ -4577,8 +4568,8 @@ TEST(HeapTest, HeapTerminatedArray) {
ConservativelyCollectGarbage();
EXPECT_EQ(0, IntWrapper::destructor_calls_);
EXPECT_EQ(kPrefixSize + kSuffixSize, arr->size());
- for (size_t i = 0; i < kPrefixSize + kSuffixSize; i++)
- EXPECT_EQ(i, static_cast<size_t>(arr->at(i).Payload()->Value()));
+ for (wtf_size_t i = 0; i < kPrefixSize + kSuffixSize; i++)
+ EXPECT_EQ(i, static_cast<wtf_size_t>(arr->at(i).Payload()->Value()));
{
Persistent<HeapTerminatedArray<TerminatedArrayItem>> persistent_arr = arr;
@@ -4587,8 +4578,8 @@ TEST(HeapTest, HeapTerminatedArray) {
arr = persistent_arr.Get();
EXPECT_EQ(0, IntWrapper::destructor_calls_);
EXPECT_EQ(kPrefixSize + kSuffixSize, arr->size());
- for (size_t i = 0; i < kPrefixSize + kSuffixSize; i++)
- EXPECT_EQ(i, static_cast<size_t>(arr->at(i).Payload()->Value()));
+ for (wtf_size_t i = 0; i < kPrefixSize + kSuffixSize; i++)
+ EXPECT_EQ(i, static_cast<wtf_size_t>(arr->at(i).Payload()->Value()));
}
arr = nullptr;
@@ -4603,9 +4594,9 @@ TEST(HeapTest, HeapLinkedStack) {
HeapLinkedStack<TerminatedArrayItem>* stack =
new HeapLinkedStack<TerminatedArrayItem>();
- const size_t kStackSize = 10;
+ const wtf_size_t kStackSize = 10;
- for (size_t i = 0; i < kStackSize; i++)
+ for (wtf_size_t i = 0; i < kStackSize; i++)
stack->Push(TerminatedArrayItem(IntWrapper::Create(i)));
ConservativelyCollectGarbage();
diff --git a/chromium/third_party/blink/renderer/platform/heap/incremental_marking_test.cc b/chromium/third_party/blink/renderer/platform/heap/incremental_marking_test.cc
index 16123472b26..59d153eb9e9 100644
--- a/chromium/third_party/blink/renderer/platform/heap/incremental_marking_test.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/incremental_marking_test.cc
@@ -10,6 +10,7 @@
#include "third_party/blink/renderer/platform/heap/heap.h"
#include "third_party/blink/renderer/platform/heap/heap_allocator.h"
#include "third_party/blink/renderer/platform/heap/heap_buildflags.h"
+#include "third_party/blink/renderer/platform/heap/heap_compact.h"
#include "third_party/blink/renderer/platform/heap/heap_terminated_array.h"
#include "third_party/blink/renderer/platform/heap/heap_terminated_array_builder.h"
#include "third_party/blink/renderer/platform/heap/member.h"
@@ -65,7 +66,7 @@ class BackingVisitor : public Visitor {
WeakCallback,
void*) final {}
void VisitBackingStoreOnly(void*, void**) final {}
- void RegisterBackingStoreCallback(void* backing_store,
+ void RegisterBackingStoreCallback(void** slot,
MovingObjectCallback,
void* callback_data) final {}
void RegisterWeakCallback(void* closure, WeakCallback) final {}
@@ -1630,6 +1631,11 @@ class IncrementalMarkingTestDriver {
thread_state_->CompleteSweep();
}
+ HashSet<MovableReference*>& GetTracedSlot() {
+ HeapCompact* compaction = ThreadState::Current()->Heap().Compaction();
+ return compaction->traced_slots_;
+ }
+
private:
ThreadState* const thread_state_;
};
@@ -1645,7 +1651,7 @@ TEST(IncrementalMarkingTest, TestDriver) {
}
TEST(IncrementalMarkingTest, DropBackingStore) {
- // Regression test: crbug.com/828537
+ // Regression test: https://crbug.com/828537
using WeakStore = HeapHashCountedSet<WeakMember<Object>>;
Persistent<WeakStore> persistent(new WeakStore);
@@ -1659,6 +1665,140 @@ TEST(IncrementalMarkingTest, DropBackingStore) {
driver.FinishGC();
}
+TEST(IncrementalMarkingTest, WeakCallbackDoesNotReviveDeletedValue) {
+ // Regression test: https://crbug.com/870196
+
+ // std::pair avoids treating the hashset backing as weak backing.
+ using WeakStore = HeapHashCountedSet<std::pair<WeakMember<Object>, size_t>>;
+
+ Persistent<WeakStore> persistent(new WeakStore);
+ // Create at least two entries to avoid completely emptying out the data
+ // structure. The values for .second are chosen to be non-null as they
+ // would otherwise count as empty and be skipped during iteration after the
+ // first part died.
+ persistent->insert({Object::Create(), 1});
+ persistent->insert({Object::Create(), 2});
+ IncrementalMarkingTestDriver driver(ThreadState::Current());
+ driver.Start();
+ // The backing is not treated as weak backing and thus eagerly processed,
+ // effectively registering the slots of WeakMembers.
+ driver.FinishSteps();
+ // The following deletes the first found entry. The second entry is left
+ // untouched.
+ for (auto& entries : *persistent) {
+ persistent->erase(entries.key);
+ break;
+ }
+ driver.FinishGC();
+
+ size_t count = 0;
+ for (const auto& entry : *persistent) {
+ count++;
+ // Use the entry to keep compilers happy.
+ if (entry.key.second > 0) {
+ }
+ }
+ CHECK_EQ(1u, count);
+}
+
+TEST(IncrementalMarkingTest, NoBackingFreeDuringIncrementalMarking) {
+ // Regression test: https://crbug.com/870306
+ // Only reproduces in ASAN configurations.
+ using WeakStore = HeapHashCountedSet<std::pair<WeakMember<Object>, size_t>>;
+
+ Persistent<WeakStore> persistent(new WeakStore);
+ // Prefill the collection to grow backing store. A new backing store allocaton
+ // would trigger the write barrier, mitigating the bug where a backing store
+ // is promptly freed.
+ for (size_t i = 0; i < 8; i++) {
+ persistent->insert({Object::Create(), i});
+ }
+ IncrementalMarkingTestDriver driver(ThreadState::Current());
+ driver.Start();
+ persistent->insert({Object::Create(), 8});
+ // Is not allowed to free the backing store as the previous insert may have
+ // registered a slot.
+ persistent->clear();
+ driver.FinishSteps();
+ driver.FinishGC();
+}
+
+TEST(IncrementalMarkingTest, DropReferenceWithHeapCompaction) {
+ using Store = HeapHashCountedSet<Member<Object>>;
+
+ Persistent<Store> persistent(new Store);
+ persistent->insert(Object::Create());
+ IncrementalMarkingTestDriver driver(ThreadState::Current());
+ HeapCompact::ScheduleCompactionGCForTesting(true);
+ driver.Start();
+ driver.FinishSteps();
+ persistent->clear();
+ // Registration of movable and updatable references should not crash because
+ // if a slot have nullptr reference, it doesn't call registeration method.
+ driver.FinishGC();
+}
+
+TEST(IncrementalMarkingTest, HasInlineCapacityCollectionWithHeapCompaction) {
+ using Store = HeapVector<Member<Object>, 2>;
+
+ Persistent<Store> persistent(new Store);
+ Persistent<Store> persistent2(new Store);
+
+ IncrementalMarkingTestDriver driver(ThreadState::Current());
+ HeapCompact::ScheduleCompactionGCForTesting(true);
+ persistent->push_back(Object::Create());
+ driver.Start();
+ driver.FinishSteps();
+
+ // Should collect also slots that has only inline buffer and nullptr
+ // references.
+ EXPECT_EQ(driver.GetTracedSlot().size(), 2u);
+ driver.FinishGC();
+}
+
+TEST(IncrementalMarkingTest, SlotDestruction) {
+ IncrementalMarkingTestDriver driver(ThreadState::Current());
+ HeapCompact::ScheduleCompactionGCForTesting(true);
+ Vector<MovableReference*> ref(7);
+
+ {
+ Object* obj = Object::Create();
+ PersistentHeapHashSet<Member<Object>> p_hashset;
+ PersistentHeapHashMap<Member<Object>, Member<Object>> p_hashmap;
+ PersistentHeapLinkedHashSet<Member<Object>> p_linkedhashset;
+ PersistentHeapListHashSet<Member<Object>> p_listhashset;
+ PersistentHeapHashCountedSet<Member<Object>> p_hashcountedset;
+ PersistentHeapVector<Member<Object>> p_vector;
+ PersistentHeapDeque<Member<Object>> p_deque;
+
+ p_hashset.insert(obj);
+ p_hashmap.insert(obj, obj);
+ p_linkedhashset.insert(obj);
+ p_listhashset.insert(obj);
+ p_hashcountedset.insert(obj);
+ p_vector.push_back(obj);
+ p_deque.push_back(obj);
+
+ ref[0] = reinterpret_cast<MovableReference*>(&p_hashset);
+ ref[1] = reinterpret_cast<MovableReference*>(&p_hashmap);
+ ref[2] = reinterpret_cast<MovableReference*>(&p_linkedhashset);
+ ref[3] = reinterpret_cast<MovableReference*>(&p_listhashset);
+ ref[4] = reinterpret_cast<MovableReference*>(&p_hashcountedset);
+ ref[5] = reinterpret_cast<MovableReference*>(&p_vector);
+ ref[6] = reinterpret_cast<MovableReference*>(&p_deque);
+
+ driver.Start();
+ driver.FinishSteps();
+
+ for (size_t i = 0; i < ref.size(); ++i) {
+ EXPECT_TRUE(driver.GetTracedSlot().Contains(ref[i]));
+ }
+ }
+ for (size_t i = 0; i < ref.size(); ++i) {
+ EXPECT_FALSE(driver.GetTracedSlot().Contains(ref[i]));
+ }
+}
+
} // namespace incremental_marking_test
} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/heap/marking_verifier.h b/chromium/third_party/blink/renderer/platform/heap/marking_verifier.h
index d5b01125efe..33f084dcf68 100644
--- a/chromium/third_party/blink/renderer/platform/heap/marking_verifier.h
+++ b/chromium/third_party/blink/renderer/platform/heap/marking_verifier.h
@@ -53,7 +53,8 @@ class MarkingVerifier final : public Visitor {
WeakCallback,
void*) final {}
void VisitBackingStoreOnly(void*, void**) final {}
- void RegisterBackingStoreCallback(void*, MovingObjectCallback, void*) final {}
+ void RegisterBackingStoreCallback(void**, MovingObjectCallback, void*) final {
+ }
void RegisterWeakCallback(void*, WeakCallback) final {}
void Visit(const TraceWrapperV8Reference<v8::Value>&) final {}
void Visit(DOMWrapperMap<ScriptWrappable>*,
diff --git a/chromium/third_party/blink/renderer/platform/heap/marking_visitor.cc b/chromium/third_party/blink/renderer/platform/heap/marking_visitor.cc
index eb320b74f4f..d803593d36d 100644
--- a/chromium/third_party/blink/renderer/platform/heap/marking_visitor.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/marking_visitor.cc
@@ -117,21 +117,20 @@ void MarkingVisitor::RegisterWeakCallback(void* object, WeakCallback callback) {
weak_callback_worklist_.Push({object, callback});
}
-void MarkingVisitor::RegisterBackingStoreReference(void* slot) {
+void MarkingVisitor::RegisterBackingStoreReference(void** slot) {
if (marking_mode_ != kGlobalMarkingWithCompaction)
return;
Heap().RegisterMovingObjectReference(
reinterpret_cast<MovableReference*>(slot));
}
-void MarkingVisitor::RegisterBackingStoreCallback(void* backing_store,
+void MarkingVisitor::RegisterBackingStoreCallback(void** slot,
MovingObjectCallback callback,
void* callback_data) {
if (marking_mode_ != kGlobalMarkingWithCompaction)
return;
- Heap().RegisterMovingObjectCallback(
- reinterpret_cast<MovableReference>(backing_store), callback,
- callback_data);
+ Heap().RegisterMovingObjectCallback(reinterpret_cast<MovableReference*>(slot),
+ callback, callback_data);
}
bool MarkingVisitor::RegisterWeakTable(const void* closure,
diff --git a/chromium/third_party/blink/renderer/platform/heap/marking_visitor.h b/chromium/third_party/blink/renderer/platform/heap/marking_visitor.h
index 24ae2cedb3e..18eedcdf918 100644
--- a/chromium/third_party/blink/renderer/platform/heap/marking_visitor.h
+++ b/chromium/third_party/blink/renderer/platform/heap/marking_visitor.h
@@ -124,6 +124,8 @@ class PLATFORM_EXPORT MarkingVisitor final : public Visitor {
void** object_slot,
TraceDescriptor desc) final {
RegisterBackingStoreReference(object_slot);
+ if (!object)
+ return;
Visit(object, desc);
}
@@ -140,11 +142,13 @@ class PLATFORM_EXPORT MarkingVisitor final : public Visitor {
// processing. In this case, the contents are processed separately using
// the corresponding traits but the backing store requires marking.
void VisitBackingStoreOnly(void* object, void** object_slot) final {
- MarkHeaderNoTracing(HeapObjectHeader::FromPayload(object));
RegisterBackingStoreReference(object_slot);
+ if (!object)
+ return;
+ MarkHeaderNoTracing(HeapObjectHeader::FromPayload(object));
}
- void RegisterBackingStoreCallback(void* backing_store,
+ void RegisterBackingStoreCallback(void** slot,
MovingObjectCallback,
void* callback_data) final;
bool RegisterWeakTable(const void* closure,
@@ -161,7 +165,7 @@ class PLATFORM_EXPORT MarkingVisitor final : public Visitor {
static void WriteBarrierSlow(void*);
static void TraceMarkedBackingStoreSlow(void*);
- void RegisterBackingStoreReference(void* slot);
+ void RegisterBackingStoreReference(void** slot);
void ConservativelyMarkHeader(HeapObjectHeader*);
diff --git a/chromium/third_party/blink/renderer/platform/heap/persistent.h b/chromium/third_party/blink/renderer/platform/heap/persistent.h
index 0b62143ad41..d15be07f002 100644
--- a/chromium/third_party/blink/renderer/platform/heap/persistent.h
+++ b/chromium/third_party/blink/renderer/platform/heap/persistent.h
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "third_party/blink/renderer/platform/heap/heap.h"
+#include "third_party/blink/renderer/platform/heap/heap_compact.h"
#include "third_party/blink/renderer/platform/heap/member.h"
#include "third_party/blink/renderer/platform/heap/persistent_node.h"
#include "third_party/blink/renderer/platform/heap/visitor.h"
@@ -115,6 +116,9 @@ class PersistentBase {
return *raw_;
}
explicit operator bool() const { return raw_; }
+ // TODO(https://crbug.com/653394): Consider returning a thread-safe best
+ // guess of validity.
+ bool MaybeValid() const { return true; }
operator T*() const {
CheckPointer();
return raw_;
@@ -182,7 +186,7 @@ class PersistentBase {
crossThreadnessConfiguration == kCrossThreadPersistentConfiguration,
"This Persistent does not require the cross-thread lock.");
#if DCHECK_IS_ON()
- DCHECK(ProcessHeap::CrossThreadPersistentMutex().Locked());
+ ProcessHeap::CrossThreadPersistentMutex().AssertAcquired();
#endif
raw_ = nullptr;
CrossThreadPersistentRegion& region =
@@ -338,7 +342,7 @@ class PersistentBase {
kWeakPersistentConfiguration,
kCrossThreadPersistentConfiguration>* persistent) {
#if DCHECK_IS_ON()
- DCHECK(ProcessHeap::CrossThreadPersistentMutex().Locked());
+ ProcessHeap::CrossThreadPersistentMutex().AssertAcquired();
#endif
persistent->ClearWithLockHeld();
}
@@ -683,6 +687,11 @@ class PersistentHeapCollectionBase : public Collection {
#if DCHECK_IS_ON()
DCHECK_EQ(state_, state);
#endif
+ HeapCompact* compactor = state->Heap().Compaction();
+ if (compactor->IsCompacting()) {
+ compactor->RemoveSlot(
+ reinterpret_cast<MovableReference*>(this->GetBufferSlot()));
+ }
state->FreePersistentNode(state->GetPersistentRegion(), persistent_node_);
persistent_node_ = nullptr;
}
@@ -719,7 +728,7 @@ class PersistentHeapLinkedHashSet
HeapLinkedHashSet<ValueArg, HashArg, TraitsArg>> {};
template <typename ValueArg,
- size_t inlineCapacity = 0,
+ wtf_size_t inlineCapacity = 0,
typename HashArg = typename DefaultHash<ValueArg>::Hash>
class PersistentHeapListHashSet
: public PersistentHeapCollectionBase<
@@ -732,13 +741,13 @@ class PersistentHeapHashCountedSet
: public PersistentHeapCollectionBase<
HeapHashCountedSet<ValueArg, HashFunctions, Traits>> {};
-template <typename T, size_t inlineCapacity = 0>
+template <typename T, wtf_size_t inlineCapacity = 0>
class PersistentHeapVector
: public PersistentHeapCollectionBase<HeapVector<T, inlineCapacity>> {
public:
PersistentHeapVector() { InitializeUnusedSlots(); }
- explicit PersistentHeapVector(size_t size)
+ explicit PersistentHeapVector(wtf_size_t size)
: PersistentHeapCollectionBase<HeapVector<T, inlineCapacity>>(size) {
InitializeUnusedSlots();
}
@@ -748,7 +757,7 @@ class PersistentHeapVector
InitializeUnusedSlots();
}
- template <size_t otherCapacity>
+ template <wtf_size_t otherCapacity>
PersistentHeapVector(const HeapVector<T, otherCapacity>& other)
: PersistentHeapCollectionBase<HeapVector<T, inlineCapacity>>(other) {
InitializeUnusedSlots();
@@ -759,19 +768,19 @@ class PersistentHeapVector
// The PersistentHeapVector is allocated off heap along with its
// inline buffer (if any.) Maintain the invariant that unused
// slots are cleared for the off-heap inline buffer also.
- size_t unused_slots = this->capacity() - this->size();
+ wtf_size_t unused_slots = this->capacity() - this->size();
if (unused_slots)
this->ClearUnusedSlots(this->end(), this->end() + unused_slots);
}
};
-template <typename T, size_t inlineCapacity = 0>
+template <typename T, wtf_size_t inlineCapacity = 0>
class PersistentHeapDeque
: public PersistentHeapCollectionBase<HeapDeque<T, inlineCapacity>> {
public:
PersistentHeapDeque() = default;
- template <size_t otherCapacity>
+ template <wtf_size_t otherCapacity>
PersistentHeapDeque(const HeapDeque<T, otherCapacity>& other)
: PersistentHeapCollectionBase<HeapDeque<T, inlineCapacity>>(other) {}
};
diff --git a/chromium/third_party/blink/renderer/platform/heap/persistent_node.h b/chromium/third_party/blink/renderer/platform/heap/persistent_node.h
index 1255bf444ad..d32346534d6 100644
--- a/chromium/third_party/blink/renderer/platform/heap/persistent_node.h
+++ b/chromium/third_party/blink/renderer/platform/heap/persistent_node.h
@@ -174,7 +174,7 @@ class CrossThreadPersistentRegion final {
void* self,
TraceCallback trace) {
#if DCHECK_IS_ON()
- DCHECK(ProcessHeap::CrossThreadPersistentMutex().Locked());
+ ProcessHeap::CrossThreadPersistentMutex().AssertAcquired();
#endif
PersistentNode* node =
persistent_region_.AllocatePersistentNode(self, trace);
@@ -204,7 +204,7 @@ class CrossThreadPersistentRegion final {
void TracePersistentNodes(Visitor* visitor) {
// If this assert triggers, you're tracing without being in a LockScope.
#if DCHECK_IS_ON()
- DCHECK(ProcessHeap::CrossThreadPersistentMutex().Locked());
+ ProcessHeap::CrossThreadPersistentMutex().AssertAcquired();
#endif
persistent_region_.TracePersistentNodes(
visitor, CrossThreadPersistentRegion::ShouldTracePersistentNode);
diff --git a/chromium/third_party/blink/renderer/platform/heap/process_heap.cc b/chromium/third_party/blink/renderer/platform/heap/process_heap.cc
index a8fcc0a46e6..26afa8bd483 100644
--- a/chromium/third_party/blink/renderer/platform/heap/process_heap.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/process_heap.cc
@@ -4,7 +4,7 @@
#include "third_party/blink/renderer/platform/heap/process_heap.h"
-#include "base/sampling_heap_profiler/sampling_heap_profiler.h"
+#include "base/sampling_heap_profiler/poisson_allocation_sampler.h"
#include "third_party/blink/renderer/platform/heap/gc_info.h"
#include "third_party/blink/renderer/platform/heap/heap.h"
#include "third_party/blink/renderer/platform/heap/persistent_node.h"
@@ -14,12 +14,14 @@ namespace blink {
namespace {
-void BlinkGCAllocHook(uint8_t* address, size_t size, const char*) {
- base::SamplingHeapProfiler::RecordAlloc(address, size);
+void BlinkGCAllocHook(uint8_t* address, size_t size, const char* context) {
+ base::PoissonAllocationSampler::RecordAlloc(
+ address, size, base::PoissonAllocationSampler::AllocatorType::kBlinkGC,
+ context);
}
void BlinkGCFreeHook(uint8_t* address) {
- base::SamplingHeapProfiler::RecordFree(address);
+ base::PoissonAllocationSampler::RecordFree(address);
}
} // namespace
@@ -31,7 +33,7 @@ void ProcessHeap::Init() {
GCInfoTable::CreateGlobalTable();
- base::SamplingHeapProfiler::SetHooksInstallCallback([]() {
+ base::PoissonAllocationSampler::SetHooksInstallCallback([]() {
HeapAllocHooks::SetAllocationHook(&BlinkGCAllocHook);
HeapAllocHooks::SetFreeHook(&BlinkGCFreeHook);
});
diff --git a/chromium/third_party/blink/renderer/platform/heap/safe_point.h b/chromium/third_party/blink/renderer/platform/heap/safe_point.h
deleted file mode 100644
index 41a3e7b1177..00000000000
--- a/chromium/third_party/blink/renderer/platform/heap/safe_point.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_SAFE_POINT_H_
-#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_SAFE_POINT_H_
-
-#include "base/macros.h"
-#include "third_party/blink/renderer/platform/heap/thread_state.h"
-#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
-
-namespace blink {
-
-class SafePointScope final {
- STACK_ALLOCATED();
-
- public:
- explicit SafePointScope(BlinkGC::StackState stack_state,
- ThreadState* state = ThreadState::Current())
- : state_(state) {
- if (state_) {
- state_->EnterSafePoint(stack_state, this);
- }
- }
-
- ~SafePointScope() {
- if (state_)
- state_->LeaveSafePoint();
- }
-
- private:
- ThreadState* state_;
-
- DISALLOW_COPY_AND_ASSIGN(SafePointScope);
-};
-
-} // namespace blink
-
-#endif
diff --git a/chromium/third_party/blink/renderer/platform/heap/thread_state.cc b/chromium/third_party/blink/renderer/platform/heap/thread_state.cc
index 94c2171db9f..b5bf92262fd 100644
--- a/chromium/third_party/blink/renderer/platform/heap/thread_state.cc
+++ b/chromium/third_party/blink/renderer/platform/heap/thread_state.cc
@@ -43,6 +43,7 @@
#include "third_party/blink/public/platform/platform.h"
#include "third_party/blink/public/platform/web_thread.h"
#include "third_party/blink/renderer/platform/bindings/runtime_call_stats.h"
+#include "third_party/blink/renderer/platform/heap/address_cache.h"
#include "third_party/blink/renderer/platform/heap/blink_gc_memory_dump_provider.h"
#include "third_party/blink/renderer/platform/heap/handle.h"
#include "third_party/blink/renderer/platform/heap/heap.h"
@@ -51,12 +52,12 @@
#include "third_party/blink/renderer/platform/heap/heap_stats_collector.h"
#include "third_party/blink/renderer/platform/heap/marking_visitor.h"
#include "third_party/blink/renderer/platform/heap/page_pool.h"
-#include "third_party/blink/renderer/platform/heap/safe_point.h"
#include "third_party/blink/renderer/platform/heap/visitor.h"
#include "third_party/blink/renderer/platform/histogram.h"
#include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
#include "third_party/blink/renderer/platform/instrumentation/tracing/web_memory_allocator_dump.h"
#include "third_party/blink/renderer/platform/instrumentation/tracing/web_process_memory_dump.h"
+#include "third_party/blink/renderer/platform/scheduler/public/thread_scheduler.h"
#include "third_party/blink/renderer/platform/wtf/allocator/partitions.h"
#include "third_party/blink/renderer/platform/wtf/stack_util.h"
#include "third_party/blink/renderer/platform/wtf/threading_primitives.h"
@@ -167,7 +168,12 @@ ThreadState::ThreadState()
weak_persistent_region_(std::make_unique<PersistentRegion>()),
start_of_stack_(reinterpret_cast<intptr_t*>(WTF::GetStackStart())),
end_of_stack_(reinterpret_cast<intptr_t*>(WTF::GetStackStart())),
- safe_point_scope_marker_(nullptr),
+#if HAS_FEATURE(safe_stack)
+ start_of_unsafe_stack_(
+ reinterpret_cast<intptr_t*>(__builtin___get_unsafe_stack_top())),
+ end_of_unsafe_stack_(
+ reinterpret_cast<intptr_t*>(__builtin___get_unsafe_stack_bottom())),
+#endif
sweep_forbidden_(false),
no_allocation_count_(0),
gc_forbidden_count_(0),
@@ -323,24 +329,15 @@ void ThreadState::VisitAsanFakeStackForPointer(MarkingVisitor* visitor,
NO_SANITIZE_ADDRESS
NO_SANITIZE_THREAD
void ThreadState::VisitStack(MarkingVisitor* visitor) {
- if (stack_state_ == BlinkGC::kNoHeapPointersOnStack)
- return;
+ DCHECK_EQ(current_gc_data_.stack_state, BlinkGC::kHeapPointersOnStack);
Address* start = reinterpret_cast<Address*>(start_of_stack_);
- // If there is a safepoint scope marker we should stop the stack
- // scanning there to not touch active parts of the stack. Anything
- // interesting beyond that point is in the safepoint stack copy.
- // If there is no scope marker the thread is blocked and we should
- // scan all the way to the recorded end stack pointer.
Address* end = reinterpret_cast<Address*>(end_of_stack_);
- Address* safe_point_scope_marker =
- reinterpret_cast<Address*>(safe_point_scope_marker_);
- Address* current = safe_point_scope_marker ? safe_point_scope_marker : end;
// Ensure that current is aligned by address size otherwise the loop below
// will read past start address.
- current = reinterpret_cast<Address*>(reinterpret_cast<intptr_t>(current) &
- ~(sizeof(Address) - 1));
+ Address* current = reinterpret_cast<Address*>(
+ reinterpret_cast<intptr_t>(end) & ~(sizeof(Address) - 1));
for (; current < start; ++current) {
Address ptr = *current;
@@ -356,17 +353,32 @@ void ThreadState::VisitStack(MarkingVisitor* visitor) {
VisitAsanFakeStackForPointer(visitor, ptr);
}
- for (Address ptr : safe_point_stack_copy_) {
-#if defined(MEMORY_SANITIZER)
- // See the comment above.
- __msan_unpoison(&ptr, sizeof(ptr));
-#endif
+#if HAS_FEATURE(safe_stack)
+ start = reinterpret_cast<Address*>(start_of_unsafe_stack_);
+ end = reinterpret_cast<Address*>(end_of_unsafe_stack_);
+ current = end;
+
+ for (; current < start; ++current) {
+ Address ptr = *current;
+ // SafeStack And MSan are not compatible
heap_->CheckAndMarkPointer(visitor, ptr);
VisitAsanFakeStackForPointer(visitor, ptr);
}
+#endif
+}
+
+void ThreadState::VisitDOMWrappers(Visitor* visitor) {
+ if (trace_dom_wrappers_) {
+ ThreadHeapStatsCollector::Scope stats_scope(
+ Heap().stats_collector(), ThreadHeapStatsCollector::kVisitDOMWrappers);
+ trace_dom_wrappers_(isolate_, visitor);
+ }
}
void ThreadState::VisitPersistents(Visitor* visitor) {
+ ThreadHeapStatsCollector::Scope stats_scope(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kVisitPersistentRoots);
{
ThreadHeapStatsCollector::Scope stats_scope(
Heap().stats_collector(),
@@ -380,11 +392,6 @@ void ThreadState::VisitPersistents(Visitor* visitor) {
Heap().stats_collector(), ThreadHeapStatsCollector::kVisitPersistents);
persistent_region_->TracePersistentNodes(visitor);
}
- if (trace_dom_wrappers_) {
- ThreadHeapStatsCollector::Scope stats_scope(
- Heap().stats_collector(), ThreadHeapStatsCollector::kVisitDOMWrappers);
- trace_dom_wrappers_(isolate_, visitor);
- }
}
void ThreadState::VisitWeakPersistents(Visitor* visitor) {
@@ -959,7 +966,6 @@ void ThreadState::FinishSnapshot() {
gc_state_ = kNoGCScheduled;
SetGCPhase(GCPhase::kSweeping);
SetGCPhase(GCPhase::kNone);
- Heap().stats_collector()->NotifySweepingCompleted();
}
void ThreadState::AtomicPauseEpilogue(BlinkGC::MarkingType marking_type,
@@ -1038,11 +1044,19 @@ void ThreadState::CompleteSweep() {
return;
{
- AtomicPauseScope atomic_pause_scope(this);
+ // CompleteSweep may be called during regular mutator exececution, from a
+ // task, or from the atomic pause in which the atomic scope has already been
+ // opened.
+ const bool was_in_atomic_pause = in_atomic_pause();
+ if (!was_in_atomic_pause)
+ EnterAtomicPause();
+ ScriptForbiddenScope script_forbidden;
SweepForbiddenScope scope(this);
ThreadHeapStatsCollector::EnabledScope stats_scope(
Heap().stats_collector(), ThreadHeapStatsCollector::kCompleteSweep);
Heap().CompleteSweep();
+ if (!was_in_atomic_pause)
+ LeaveAtomicPause();
}
PostSweep();
}
@@ -1208,6 +1222,16 @@ void UpdateHistograms(const ThreadHeapStatsCollector::Event& event) {
} // namespace
+void ThreadState::UpdateStatisticsAfterSweeping() {
+ DCHECK(!IsSweepingInProgress());
+ DCHECK(Heap().stats_collector()->is_started());
+ Heap().stats_collector()->NotifySweepingCompleted();
+ if (IsMainThread())
+ UpdateHistograms(Heap().stats_collector()->previous());
+ // Emit trace counters for all threads.
+ UpdateTraceCounters(*Heap().stats_collector());
+}
+
void ThreadState::PostSweep() {
DCHECK(CheckThread());
@@ -1220,74 +1244,36 @@ void ThreadState::PostSweep() {
for (auto* const observer : observers_)
observer->OnCompleteSweepDone();
- Heap().stats_collector()->NotifySweepingCompleted();
- if (IsMainThread())
- UpdateHistograms(Heap().stats_collector()->previous());
- // Emit trace counters for all threads.
- UpdateTraceCounters(*Heap().stats_collector());
+ if (!in_atomic_pause()) {
+ // Immediately update the statistics if running outside of the atomic pause.
+ UpdateStatisticsAfterSweeping();
+ }
}
void ThreadState::SafePoint(BlinkGC::StackState stack_state) {
DCHECK(CheckThread());
RunScheduledGC(stack_state);
- stack_state_ = BlinkGC::kHeapPointersOnStack;
}
-#ifdef ADDRESS_SANITIZER
-// When we are running under AddressSanitizer with
-// detect_stack_use_after_return=1 then stack marker obtained from
-// SafePointScope will point into a fake stack. Detect this case by checking if
-// it falls in between current stack frame and stack start and use an arbitrary
-// high enough value for it. Don't adjust stack marker in any other case to
-// match behavior of code running without AddressSanitizer.
-NO_SANITIZE_ADDRESS static void* AdjustScopeMarkerForAdressSanitizer(
- void* scope_marker) {
- Address start = reinterpret_cast<Address>(WTF::GetStackStart());
- Address end = reinterpret_cast<Address>(&start);
- CHECK_LT(end, start);
-
- if (end <= scope_marker && scope_marker < start)
- return scope_marker;
-
- // 256 is as good an approximation as any else.
- const size_t kBytesToCopy = sizeof(Address) * 256;
- if (static_cast<size_t>(start - end) < kBytesToCopy)
- return start;
-
- return end + kBytesToCopy;
-}
-#endif
-
// TODO(haraken): The first void* pointer is unused. Remove it.
using PushAllRegistersCallback = void (*)(void*, ThreadState*, intptr_t*);
extern "C" void PushAllRegisters(void*, ThreadState*, PushAllRegistersCallback);
-static void EnterSafePointAfterPushRegisters(void*,
- ThreadState* state,
- intptr_t* stack_end) {
+static void DidPushRegisters(void*, ThreadState* state, intptr_t* stack_end) {
state->RecordStackEnd(stack_end);
- state->CopyStackUntilSafePointScope();
-}
-
-void ThreadState::EnterSafePoint(BlinkGC::StackState stack_state,
- void* scope_marker) {
- DCHECK(CheckThread());
-#ifdef ADDRESS_SANITIZER
- if (stack_state == BlinkGC::kHeapPointersOnStack)
- scope_marker = AdjustScopeMarkerForAdressSanitizer(scope_marker);
+#if HAS_FEATURE(safe_stack)
+ state->RecordUnsafeStackEnd(
+ reinterpret_cast<intptr_t*>(__builtin___get_unsafe_stack_ptr()));
#endif
- DCHECK(stack_state == BlinkGC::kNoHeapPointersOnStack || scope_marker);
- DCHECK(IsGCForbidden());
- stack_state_ = stack_state;
- safe_point_scope_marker_ = scope_marker;
- PushAllRegisters(nullptr, this, EnterSafePointAfterPushRegisters);
}
-void ThreadState::LeaveSafePoint() {
+void ThreadState::PushRegistersAndVisitStack() {
DCHECK(CheckThread());
- stack_state_ = BlinkGC::kHeapPointersOnStack;
- ClearSafePointScopeMarker();
+ DCHECK(IsGCForbidden());
+ DCHECK_EQ(current_gc_data_.stack_state, BlinkGC::kHeapPointersOnStack);
+ PushAllRegisters(nullptr, this, DidPushRegisters);
+ VisitStack(static_cast<MarkingVisitor*>(CurrentVisitor()));
}
void ThreadState::AddObserver(BlinkGCObserver* observer) {
@@ -1314,32 +1300,6 @@ void ThreadState::ReportMemoryToV8() {
reported_memory_to_v8_ = current_heap_size;
}
-void ThreadState::CopyStackUntilSafePointScope() {
- if (!safe_point_scope_marker_ ||
- stack_state_ == BlinkGC::kNoHeapPointersOnStack)
- return;
-
- Address* to = reinterpret_cast<Address*>(safe_point_scope_marker_);
- Address* from = reinterpret_cast<Address*>(end_of_stack_);
- CHECK_LT(from, to);
- CHECK_LE(to, reinterpret_cast<Address*>(start_of_stack_));
- size_t slot_count = static_cast<size_t>(to - from);
-// Catch potential performance issues.
-#if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
- // ASan/LSan use more space on the stack and we therefore
- // increase the allowed stack copying for those builds.
- DCHECK_LT(slot_count, 2048u);
-#else
- DCHECK_LT(slot_count, 1024u);
-#endif
-
- DCHECK(!safe_point_stack_copy_.size());
- safe_point_stack_copy_.resize(slot_count);
- for (size_t i = 0; i < slot_count; ++i) {
- safe_point_stack_copy_[i] = from[i];
- }
-}
-
void ThreadState::RegisterStaticPersistentNode(
PersistentNode* node,
PersistentClearCallback callback) {
@@ -1452,7 +1412,7 @@ void ThreadState::IncrementalMarkingStart(BlinkGC::GCReason reason) {
CompleteSweep();
Heap().stats_collector()->NotifyMarkingStarted(reason);
{
- ThreadHeapStatsCollector::Scope stats_scope(
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
Heap().stats_collector(),
ThreadHeapStatsCollector::kIncrementalMarkingStartMarking, "reason",
GcReasonString(reason));
@@ -1470,7 +1430,7 @@ void ThreadState::IncrementalMarkingStart(BlinkGC::GCReason reason) {
}
void ThreadState::IncrementalMarkingStep() {
- ThreadHeapStatsCollector::Scope stats_scope(
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
Heap().stats_collector(),
ThreadHeapStatsCollector::kIncrementalMarkingStep);
VLOG(2) << "[state:" << this << "] "
@@ -1487,7 +1447,7 @@ void ThreadState::IncrementalMarkingStep() {
}
void ThreadState::IncrementalMarkingFinalize() {
- ThreadHeapStatsCollector::Scope stats_scope(
+ ThreadHeapStatsCollector::EnabledScope stats_scope(
Heap().stats_collector(),
ThreadHeapStatsCollector::kIncrementalMarkingFinalize);
VLOG(2) << "[state:" << this << "] "
@@ -1571,27 +1531,27 @@ void ThreadState::CollectGarbage(BlinkGC::StackState stack_state,
<< " reason: " << GcReasonString(reason);
}
-void ThreadState::RunAtomicPause(BlinkGC::StackState stack_state,
- BlinkGC::MarkingType marking_type,
- BlinkGC::SweepingType sweeping_type,
- BlinkGC::GCReason reason) {
- {
- ThreadHeapStatsCollector::EnabledScope stats1(
- Heap().stats_collector(), ThreadHeapStatsCollector::kAtomicPhase);
- AtomicPauseScope atomic_pause_scope(this);
- {
- ThreadHeapStatsCollector::EnabledScope stats2(
- Heap().stats_collector(),
- ThreadHeapStatsCollector::kAtomicPhaseMarking, "lazySweeping",
- sweeping_type == BlinkGC::kLazySweeping ? "yes" : "no", "gcReason",
- GcReasonString(reason));
- AtomicPausePrologue(stack_state, marking_type, reason);
- MarkPhaseVisitRoots();
- CHECK(MarkPhaseAdvanceMarking(TimeTicks::Max()));
- MarkPhaseEpilogue(marking_type);
- }
- AtomicPauseEpilogue(marking_type, sweeping_type);
- }
+void ThreadState::AtomicPauseMarkPrologue(BlinkGC::StackState stack_state,
+ BlinkGC::MarkingType marking_type,
+ BlinkGC::GCReason reason) {
+ AtomicPausePrologue(stack_state, marking_type, reason);
+ MarkPhaseVisitRoots();
+ MarkPhaseVisitNotFullyConstructedObjects();
+}
+
+void ThreadState::AtomicPauseMarkTransitiveClosure() {
+ CHECK(MarkPhaseAdvanceMarking(TimeTicks::Max()));
+}
+
+void ThreadState::AtomicPauseMarkEpilogue(BlinkGC::MarkingType marking_type) {
+ MarkPhaseEpilogue(marking_type);
+}
+
+void ThreadState::AtomicPauseSweepAndCompact(
+ BlinkGC::MarkingType marking_type,
+ BlinkGC::SweepingType sweeping_type) {
+ AtomicPauseScope atomic_pause_scope(this);
+ AtomicPauseEpilogue(marking_type, sweeping_type);
if (marking_type == BlinkGC::kTakeSnapshot) {
FinishSnapshot();
CHECK(!IsSweepingInProgress());
@@ -1609,6 +1569,33 @@ void ThreadState::RunAtomicPause(BlinkGC::StackState stack_state,
}
}
+void ThreadState::RunAtomicPause(BlinkGC::StackState stack_state,
+ BlinkGC::MarkingType marking_type,
+ BlinkGC::SweepingType sweeping_type,
+ BlinkGC::GCReason reason) {
+ {
+ ThreadHeapStatsCollector::DevToolsScope stats1(
+ Heap().stats_collector(), ThreadHeapStatsCollector::kAtomicPhase);
+ {
+ AtomicPauseScope atomic_pause_scope(this);
+ ThreadHeapStatsCollector::EnabledScope stats2(
+ Heap().stats_collector(),
+ ThreadHeapStatsCollector::kAtomicPhaseMarking, "lazySweeping",
+ sweeping_type == BlinkGC::kLazySweeping ? "yes" : "no", "gcReason",
+ GcReasonString(reason));
+ AtomicPauseMarkPrologue(stack_state, marking_type, reason);
+ AtomicPauseMarkTransitiveClosure();
+ AtomicPauseMarkEpilogue(marking_type);
+ }
+ AtomicPauseSweepAndCompact(marking_type, sweeping_type);
+ }
+ if (!IsSweepingInProgress()) {
+ // Sweeping was finished during the atomic pause. Update statistics needs to
+ // run outside of the top-most stats scope.
+ UpdateStatisticsAfterSweeping();
+ }
+}
+
namespace {
MarkingVisitor::MarkingMode GetMarkingMode(bool should_compact,
@@ -1668,25 +1655,29 @@ void ThreadState::AtomicPausePrologue(BlinkGC::StackState stack_state,
}
void ThreadState::MarkPhaseVisitRoots() {
- // StackFrameDepth should be disabled so we don't trace most of the object
- // graph in one incremental marking step.
+ // StackFrameDepth should be disabled to avoid eagerly tracing into the object
+ // graph when just visiting roots.
DCHECK(!Heap().GetStackFrameDepth().IsEnabled());
- // 1. Trace persistent roots.
- Heap().VisitPersistentRoots(current_gc_data_.visitor.get());
+ Visitor* visitor = current_gc_data_.visitor.get();
- // 2. Trace objects reachable from the stack.
- {
- SafePointScope safe_point_scope(current_gc_data_.stack_state, this);
- Heap().VisitStackRoots(current_gc_data_.visitor.get());
+ VisitPersistents(visitor);
+
+ VisitDOMWrappers(visitor);
+
+ if (current_gc_data_.stack_state == BlinkGC::kHeapPointersOnStack) {
+ ThreadHeapStatsCollector::Scope stats_scope(
+ Heap().stats_collector(), ThreadHeapStatsCollector::kVisitStackRoots);
+ AddressCache::EnabledScope address_cache_scope(Heap().address_cache());
+ PushRegistersAndVisitStack();
}
}
bool ThreadState::MarkPhaseAdvanceMarking(TimeTicks deadline) {
StackFrameDepthScope stack_depth_scope(&Heap().GetStackFrameDepth());
- // 3. Transitive closure to trace objects including ephemerons.
- return Heap().AdvanceMarkingStackProcessing(current_gc_data_.visitor.get(),
- deadline);
+ return Heap().AdvanceMarking(
+ reinterpret_cast<MarkingVisitor*>(current_gc_data_.visitor.get()),
+ deadline);
}
bool ThreadState::ShouldVerifyMarking() const {
@@ -1698,12 +1689,13 @@ bool ThreadState::ShouldVerifyMarking() const {
return should_verify_marking;
}
+void ThreadState::MarkPhaseVisitNotFullyConstructedObjects() {
+ Heap().MarkNotFullyConstructedObjects(
+ reinterpret_cast<MarkingVisitor*>(current_gc_data_.visitor.get()));
+}
+
void ThreadState::MarkPhaseEpilogue(BlinkGC::MarkingType marking_type) {
Visitor* visitor = current_gc_data_.visitor.get();
- // Finish marking of not-fully-constructed objects.
- Heap().MarkNotFullyConstructedObjects(visitor);
- CHECK(Heap().AdvanceMarkingStackProcessing(visitor, TimeTicks::Max()));
-
{
// See ProcessHeap::CrossThreadPersistentMutex().
MutexLocker persistent_lock(ProcessHeap::CrossThreadPersistentMutex());
diff --git a/chromium/third_party/blink/renderer/platform/heap/thread_state.h b/chromium/third_party/blink/renderer/platform/heap/thread_state.h
index 0060c09e379..c77816c53e8 100644
--- a/chromium/third_party/blink/renderer/platform/heap/thread_state.h
+++ b/chromium/third_party/blink/renderer/platform/heap/thread_state.h
@@ -35,11 +35,11 @@
#include "base/atomicops.h"
#include "base/macros.h"
+#include "third_party/blink/public/platform/scheduler/web_rail_mode_observer.h"
#include "third_party/blink/renderer/platform/bindings/script_forbidden_scope.h"
#include "third_party/blink/renderer/platform/heap/blink_gc.h"
#include "third_party/blink/renderer/platform/heap/threading_traits.h"
#include "third_party/blink/renderer/platform/platform_export.h"
-#include "third_party/blink/renderer/platform/scheduler/public/thread_scheduler.h"
#include "third_party/blink/renderer/platform/wtf/address_sanitizer.h"
#include "third_party/blink/renderer/platform/wtf/allocator.h"
#include "third_party/blink/renderer/platform/wtf/forward.h"
@@ -139,7 +139,7 @@ class PLATFORM_EXPORT BlinkGCObserver {
};
class PLATFORM_EXPORT ThreadState final
- : scheduler::WebThreadScheduler::RAILModeObserver {
+ : private scheduler::WebRAILModeObserver {
USING_FAST_MALLOC(ThreadState);
public:
@@ -408,37 +408,16 @@ class PLATFORM_EXPORT ThreadState final
void FlushHeapDoesNotContainCacheIfNeeded();
- // Safepoint related functionality.
- //
- // When a thread attempts to perform GC it needs to stop all other threads
- // that use the heap or at least guarantee that they will not touch any
- // heap allocated object until GC is complete.
- //
- // We say that a thread is at a safepoint if this thread is guaranteed to
- // not touch any heap allocated object or any heap related functionality until
- // it leaves the safepoint.
- //
- // Notice that a thread does not have to be paused if it is at safepoint it
- // can continue to run and perform tasks that do not require interaction
- // with the heap. It will be paused if it attempts to leave the safepoint and
- // there is a GC in progress.
- //
- // Each thread that has ThreadState attached must:
- // - periodically check if GC is requested from another thread by calling a
- // safePoint() method;
- // - use SafePointScope around long running loops that have no safePoint()
- // invocation inside, such loops must not touch any heap object;
- //
- // Check if GC is requested by another thread and pause this thread if this is
- // the case. Can only be called when current thread is in a consistent state.
void SafePoint(BlinkGC::StackState);
- // Mark current thread as running inside safepoint.
- void EnterSafePoint(BlinkGC::StackState, void*);
- void LeaveSafePoint();
-
void RecordStackEnd(intptr_t* end_of_stack) { end_of_stack_ = end_of_stack; }
- NO_SANITIZE_ADDRESS void CopyStackUntilSafePointScope();
+#if HAS_FEATURE(safe_stack)
+ void RecordUnsafeStackEnd(intptr_t* end_of_unsafe_stack) {
+ end_of_unsafe_stack_ = end_of_unsafe_stack;
+ }
+#endif
+
+ void PushRegistersAndVisitStack();
// A region of non-weak PersistentNodes allocated on the given thread.
PersistentRegion* GetPersistentRegion() const {
@@ -464,8 +443,13 @@ class PLATFORM_EXPORT ThreadState final
// Visit all weak persistents allocated on this thread.
void VisitWeakPersistents(Visitor*);
+ // Visit all DOM wrappers allocatd on this thread.
+ void VisitDOMWrappers(Visitor*);
+
struct GCSnapshotInfo {
STACK_ALLOCATED();
+
+ public:
GCSnapshotInfo(size_t num_object_types);
// Map from gcInfoIndex (vector-index) to count/size.
@@ -527,8 +511,6 @@ class PLATFORM_EXPORT ThreadState final
v8::Isolate* GetIsolate() const { return isolate_; }
- BlinkGC::StackState GetStackState() const { return stack_state_; }
-
void CollectGarbage(BlinkGC::StackState,
BlinkGC::MarkingType,
BlinkGC::SweepingType,
@@ -569,20 +551,15 @@ class PLATFORM_EXPORT ThreadState final
MarkingVisitor* CurrentVisitor() { return current_gc_data_.visitor.get(); }
- // Implementation for RAILModeObserver
+ // Implementation for WebRAILModeObserver
void OnRAILModeChanged(v8::RAILMode new_mode) override {
should_optimize_for_load_time_ = new_mode == v8::RAILMode::PERFORMANCE_LOAD;
+ if (should_optimize_for_load_time_ && IsIncrementalMarking() &&
+ GetGCState() == GCState::kIncrementalMarkingStepScheduled)
+ ScheduleIncrementalMarkingFinalize();
}
private:
- // Needs to set up visitor for testing purposes.
- friend class incremental_marking_test::IncrementalMarkingScope;
- friend class incremental_marking_test::IncrementalMarkingTestDriver;
- template <typename T>
- friend class PrefinalizerRegistration;
- friend class TestGCMarkingScope;
- friend class ThreadStateSchedulingTest;
-
// Number of ThreadState's that are currently in incremental marking. The
// counter is incremented by one when some ThreadState enters incremental
// marking and decremented upon finishing.
@@ -594,6 +571,24 @@ class PLATFORM_EXPORT ThreadState final
ThreadState();
~ThreadState() override;
+ // The following methods are used to compose RunAtomicPause. Public users
+ // should use the CollectGarbage entrypoint. Internal users should use these
+ // methods to compose a full garbage collection.
+ void AtomicPauseMarkPrologue(BlinkGC::StackState,
+ BlinkGC::MarkingType,
+ BlinkGC::GCReason);
+ void AtomicPauseMarkTransitiveClosure();
+ void AtomicPauseMarkEpilogue(BlinkGC::MarkingType);
+ void AtomicPauseSweepAndCompact(BlinkGC::MarkingType marking_type,
+ BlinkGC::SweepingType sweeping_type);
+
+ void RunAtomicPause(BlinkGC::StackState,
+ BlinkGC::MarkingType,
+ BlinkGC::SweepingType,
+ BlinkGC::GCReason);
+
+ void UpdateStatisticsAfterSweeping();
+
// The version is needed to be able to start incremental marking.
void MarkPhasePrologue(BlinkGC::StackState,
BlinkGC::MarkingType,
@@ -604,19 +599,10 @@ class PLATFORM_EXPORT ThreadState final
void AtomicPauseEpilogue(BlinkGC::MarkingType, BlinkGC::SweepingType);
void MarkPhaseEpilogue(BlinkGC::MarkingType);
void MarkPhaseVisitRoots();
+ void MarkPhaseVisitNotFullyConstructedObjects();
bool MarkPhaseAdvanceMarking(TimeTicks deadline);
void VerifyMarking(BlinkGC::MarkingType);
- void RunAtomicPause(BlinkGC::StackState,
- BlinkGC::MarkingType,
- BlinkGC::SweepingType,
- BlinkGC::GCReason);
-
- void ClearSafePointScopeMarker() {
- safe_point_stack_copy_.clear();
- safe_point_scope_marker_ = nullptr;
- }
-
bool ShouldVerifyMarking() const;
// shouldScheduleIdleGC and shouldForceConservativeGC
@@ -669,7 +655,6 @@ class PLATFORM_EXPORT ThreadState final
void ReportMemoryToV8();
- friend class SafePointScope;
friend class BlinkGCObserver;
@@ -698,12 +683,14 @@ class PLATFORM_EXPORT ThreadState final
ThreadIdentifier thread_;
std::unique_ptr<PersistentRegion> persistent_region_;
std::unique_ptr<PersistentRegion> weak_persistent_region_;
- BlinkGC::StackState stack_state_;
intptr_t* start_of_stack_;
intptr_t* end_of_stack_;
- void* safe_point_scope_marker_;
- Vector<Address> safe_point_stack_copy_;
+#if HAS_FEATURE(safe_stack)
+ intptr_t* start_of_unsafe_stack_;
+ intptr_t* end_of_unsafe_stack_;
+#endif
+
bool sweep_forbidden_;
size_t no_allocation_count_;
size_t gc_forbidden_count_;
@@ -766,6 +753,14 @@ class PLATFORM_EXPORT ThreadState final
};
GCData current_gc_data_;
+ // Needs to set up visitor for testing purposes.
+ friend class incremental_marking_test::IncrementalMarkingScope;
+ friend class incremental_marking_test::IncrementalMarkingTestDriver;
+ template <typename T>
+ friend class PrefinalizerRegistration;
+ friend class TestGCScope;
+ friend class ThreadStateSchedulingTest;
+
DISALLOW_COPY_AND_ASSIGN(ThreadState);
};
diff --git a/chromium/third_party/blink/renderer/platform/heap/threading_traits.h b/chromium/third_party/blink/renderer/platform/heap/threading_traits.h
index 34a66f1ae5b..036118b18b6 100644
--- a/chromium/third_party/blink/renderer/platform/heap/threading_traits.h
+++ b/chromium/third_party/blink/renderer/platform/heap/threading_traits.h
@@ -169,9 +169,9 @@ template <typename T, typename U, typename V, typename W, typename X>
class HeapHashMap;
template <typename T, typename U, typename V>
class HeapHashSet;
-template <typename T, size_t inlineCapacity>
+template <typename T, wtf_size_t inlineCapacity>
class HeapVector;
-template <typename T, size_t inlineCapacity>
+template <typename T, wtf_size_t inlineCapacity>
class HeapDeque;
template <typename T, typename U, typename V>
class HeapHashCountedSet;
diff --git a/chromium/third_party/blink/renderer/platform/heap/trace_traits.h b/chromium/third_party/blink/renderer/platform/heap/trace_traits.h
index 9ac4c315e2b..b7473d85ef4 100644
--- a/chromium/third_party/blink/renderer/platform/heap/trace_traits.h
+++ b/chromium/third_party/blink/renderer/platform/heap/trace_traits.h
@@ -400,9 +400,9 @@ class TraceEagerlyTrait<HeapDoublyLinkedList<T>> {
static const bool value = TraceEagerlyTrait<T>::value;
};
-template <typename ValueArg, size_t inlineCapacity>
+template <typename ValueArg, wtf_size_t inlineCapacity>
class HeapListHashSetAllocator;
-template <typename T, size_t inlineCapacity>
+template <typename T, wtf_size_t inlineCapacity>
class TraceEagerlyTrait<
WTF::ListHashSetNode<T, HeapListHashSetAllocator<T, inlineCapacity>>> {
STATIC_ONLY(TraceEagerlyTrait);
diff --git a/chromium/third_party/blink/renderer/platform/heap/visitor.h b/chromium/third_party/blink/renderer/platform/heap/visitor.h
index 9b9ba505449..e0723f7ae6c 100644
--- a/chromium/third_party/blink/renderer/platform/heap/visitor.h
+++ b/chromium/third_party/blink/renderer/platform/heap/visitor.h
@@ -120,8 +120,6 @@ class PLATFORM_EXPORT Visitor {
static_assert(IsGarbageCollectedType<T>::value,
"T needs to be a garbage collected object");
- if (!backing_store)
- return;
VisitBackingStoreStrongly(reinterpret_cast<void*>(backing_store),
reinterpret_cast<void**>(backing_store_slot),
TraceDescriptorFor(backing_store));
@@ -151,8 +149,6 @@ class PLATFORM_EXPORT Visitor {
static_assert(IsGarbageCollectedType<T>::value,
"T needs to be a garbage collected object");
- if (!backing_store)
- return;
VisitBackingStoreOnly(reinterpret_cast<void*>(backing_store),
reinterpret_cast<void**>(backing_store_slot));
}
@@ -270,7 +266,7 @@ class PLATFORM_EXPORT Visitor {
// Registers backing store pointers so that they can be moved and properly
// updated.
- virtual void RegisterBackingStoreCallback(void* backing_store,
+ virtual void RegisterBackingStoreCallback(void** slot,
MovingObjectCallback,
void* callback_data) = 0;