summaryrefslogtreecommitdiff
path: root/chromium/v8/src/heap
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2019-02-13 15:05:36 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2019-02-14 10:33:47 +0000
commite684a3455bcc29a6e3e66a004e352dea4e1141e7 (patch)
treed55b4003bde34d7d05f558f02cfd82b2a66a7aac /chromium/v8/src/heap
parent2b94bfe47ccb6c08047959d1c26e392919550e86 (diff)
downloadqtwebengine-chromium-e684a3455bcc29a6e3e66a004e352dea4e1141e7.tar.gz
BASELINE: Update Chromium to 72.0.3626.110 and Ninja to 1.9.0
Change-Id: Ic57220b00ecc929a893c91f5cc552f5d3e99e922 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/v8/src/heap')
-rw-r--r--chromium/v8/src/heap/array-buffer-collector.cc8
-rw-r--r--chromium/v8/src/heap/array-buffer-tracker-inl.h4
-rw-r--r--chromium/v8/src/heap/array-buffer-tracker.cc4
-rw-r--r--chromium/v8/src/heap/barrier.h26
-rw-r--r--chromium/v8/src/heap/code-stats.cc6
-rw-r--r--chromium/v8/src/heap/concurrent-marking.cc240
-rw-r--r--chromium/v8/src/heap/embedder-tracing.cc68
-rw-r--r--chromium/v8/src/heap/embedder-tracing.h78
-rw-r--r--chromium/v8/src/heap/factory-inl.h9
-rw-r--r--chromium/v8/src/heap/factory.cc551
-rw-r--r--chromium/v8/src/heap/factory.h80
-rw-r--r--chromium/v8/src/heap/gc-tracer.cc86
-rw-r--r--chromium/v8/src/heap/gc-tracer.h8
-rw-r--r--chromium/v8/src/heap/heap-inl.h210
-rw-r--r--chromium/v8/src/heap/heap-write-barrier-inl.h72
-rw-r--r--chromium/v8/src/heap/heap-write-barrier.h40
-rw-r--r--chromium/v8/src/heap/heap.cc902
-rw-r--r--chromium/v8/src/heap/heap.h492
-rw-r--r--chromium/v8/src/heap/incremental-marking-inl.h14
-rw-r--r--chromium/v8/src/heap/incremental-marking-job.cc47
-rw-r--r--chromium/v8/src/heap/incremental-marking-job.h23
-rw-r--r--chromium/v8/src/heap/incremental-marking.cc267
-rw-r--r--chromium/v8/src/heap/incremental-marking.h23
-rw-r--r--chromium/v8/src/heap/item-parallel-job.cc2
-rw-r--r--chromium/v8/src/heap/mark-compact-inl.h126
-rw-r--r--chromium/v8/src/heap/mark-compact.cc752
-rw-r--r--chromium/v8/src/heap/mark-compact.h132
-rw-r--r--chromium/v8/src/heap/marking.h4
-rw-r--r--chromium/v8/src/heap/memory-reducer.cc1
-rw-r--r--chromium/v8/src/heap/object-stats.cc105
-rw-r--r--chromium/v8/src/heap/object-stats.h2
-rw-r--r--chromium/v8/src/heap/objects-visiting-inl.h78
-rw-r--r--chromium/v8/src/heap/objects-visiting.cc90
-rw-r--r--chromium/v8/src/heap/objects-visiting.h137
-rw-r--r--chromium/v8/src/heap/remembered-set.h39
-rw-r--r--chromium/v8/src/heap/scavenge-job.cc1
-rw-r--r--chromium/v8/src/heap/scavenger-inl.h141
-rw-r--r--chromium/v8/src/heap/scavenger.cc79
-rw-r--r--chromium/v8/src/heap/scavenger.h66
-rw-r--r--chromium/v8/src/heap/setup-heap-internal.cc164
-rw-r--r--chromium/v8/src/heap/slot-set.cc100
-rw-r--r--chromium/v8/src/heap/slot-set.h286
-rw-r--r--chromium/v8/src/heap/spaces-inl.h25
-rw-r--r--chromium/v8/src/heap/spaces.cc424
-rw-r--r--chromium/v8/src/heap/spaces.h200
-rw-r--r--chromium/v8/src/heap/store-buffer.cc18
-rw-r--r--chromium/v8/src/heap/store-buffer.h6
-rw-r--r--chromium/v8/src/heap/sweeper.cc39
-rw-r--r--chromium/v8/src/heap/sweeper.h14
-rw-r--r--chromium/v8/src/heap/worklist.h15
50 files changed, 3565 insertions, 2739 deletions
diff --git a/chromium/v8/src/heap/array-buffer-collector.cc b/chromium/v8/src/heap/array-buffer-collector.cc
index 0cf4ae945da..6d4e1bb3c35 100644
--- a/chromium/v8/src/heap/array-buffer-collector.cc
+++ b/chromium/v8/src/heap/array-buffer-collector.cc
@@ -5,9 +5,11 @@
#include "src/heap/array-buffer-collector.h"
#include "src/base/template-utils.h"
+#include "src/cancelable-task.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
+#include "src/task-utils.h"
namespace v8 {
namespace internal {
@@ -28,13 +30,13 @@ void ArrayBufferCollector::QueueOrFreeGarbageAllocations(
if (heap_->ShouldReduceMemory()) {
FreeAllocationsHelper(heap_, allocations);
} else {
- base::LockGuard<base::Mutex> guard(&allocations_mutex_);
+ base::MutexGuard guard(&allocations_mutex_);
allocations_.push_back(std::move(allocations));
}
}
void ArrayBufferCollector::PerformFreeAllocations() {
- base::LockGuard<base::Mutex> guard(&allocations_mutex_);
+ base::MutexGuard guard(&allocations_mutex_);
for (const std::vector<JSArrayBuffer::Allocation>& allocations :
allocations_) {
FreeAllocationsHelper(heap_, allocations);
@@ -48,7 +50,7 @@ void ArrayBufferCollector::FreeAllocations() {
if (!heap_->IsTearingDown() && !heap_->ShouldReduceMemory() &&
FLAG_concurrent_array_buffer_freeing) {
V8::GetCurrentPlatform()->CallOnWorkerThread(
- MakeCancelableLambdaTask(heap_->isolate(), [this] {
+ MakeCancelableTask(heap_->isolate(), [this] {
TRACE_BACKGROUND_GC(
heap_->tracer(),
GCTracer::BackgroundScope::BACKGROUND_ARRAY_BUFFER_FREE);
diff --git a/chromium/v8/src/heap/array-buffer-tracker-inl.h b/chromium/v8/src/heap/array-buffer-tracker-inl.h
index 814cfce63ae..41f23c489b9 100644
--- a/chromium/v8/src/heap/array-buffer-tracker-inl.h
+++ b/chromium/v8/src/heap/array-buffer-tracker-inl.h
@@ -21,7 +21,7 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer* buffer) {
const size_t length = buffer->byte_length();
Page* page = Page::FromAddress(buffer->address());
{
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::MutexGuard guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) {
page->AllocateLocalTracker();
@@ -44,7 +44,7 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
Page* page = Page::FromAddress(buffer->address());
const size_t length = buffer->byte_length();
{
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::MutexGuard guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
DCHECK_NOT_NULL(tracker);
tracker->Remove(buffer, length);
diff --git a/chromium/v8/src/heap/array-buffer-tracker.cc b/chromium/v8/src/heap/array-buffer-tracker.cc
index f35f2b3754c..25d97950591 100644
--- a/chromium/v8/src/heap/array-buffer-tracker.cc
+++ b/chromium/v8/src/heap/array-buffer-tracker.cc
@@ -37,7 +37,7 @@ void LocalArrayBufferTracker::Process(Callback callback) {
DCHECK_NOT_NULL(new_buffer);
Page* target_page = Page::FromAddress(new_buffer->address());
{
- base::LockGuard<base::Mutex> guard(target_page->mutex());
+ base::MutexGuard guard(target_page->mutex());
LocalArrayBufferTracker* tracker = target_page->local_tracker();
if (tracker == nullptr) {
target_page->AllocateLocalTracker();
@@ -120,7 +120,7 @@ bool ArrayBufferTracker::ProcessBuffers(Page* page, ProcessingMode mode) {
bool ArrayBufferTracker::IsTracked(JSArrayBuffer* buffer) {
Page* page = Page::FromAddress(buffer->address());
{
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::MutexGuard guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return false;
return tracker->IsTracked(buffer);
diff --git a/chromium/v8/src/heap/barrier.h b/chromium/v8/src/heap/barrier.h
index d945a83d90a..a5a4b512632 100644
--- a/chromium/v8/src/heap/barrier.h
+++ b/chromium/v8/src/heap/barrier.h
@@ -7,6 +7,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
+#include "src/base/platform/time.h"
namespace v8 {
namespace internal {
@@ -14,6 +15,10 @@ namespace internal {
// Barrier that can be used once to synchronize a dynamic number of tasks
// working concurrently.
//
+// The barrier takes a timeout which is used to avoid waiting for too long. If
+// any of the users ever reach the timeout they will disable the barrier and
+// signal others to fall through.
+//
// Usage:
// void RunConcurrently(OneShotBarrier* shared_barrier) {
// shared_barrier->Start();
@@ -31,20 +36,20 @@ namespace internal {
// immediately.
class OneshotBarrier {
public:
- OneshotBarrier() : tasks_(0), waiting_(0), done_(false) {}
+ explicit OneshotBarrier(base::TimeDelta timeout) : timeout_(timeout) {}
void Start() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
tasks_++;
}
void NotifyAll() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
if (waiting_ > 0) condition_.NotifyAll();
}
bool Wait() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
if (done_) return true;
DCHECK_LE(waiting_, tasks_);
@@ -54,7 +59,11 @@ class OneshotBarrier {
condition_.NotifyAll();
} else {
// Spurious wakeup is ok here.
- condition_.Wait(&mutex_);
+ if (!condition_.WaitFor(&mutex_, timeout_)) {
+ // If predefined timeout was reached, Stop waiting and signal being done
+ // also to other tasks.
+ done_ = true;
+ }
}
waiting_--;
return done_;
@@ -66,9 +75,10 @@ class OneshotBarrier {
private:
base::ConditionVariable condition_;
base::Mutex mutex_;
- int tasks_;
- int waiting_;
- bool done_;
+ base::TimeDelta timeout_;
+ int tasks_ = 0;
+ int waiting_ = 0;
+ bool done_ = false;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/code-stats.cc b/chromium/v8/src/heap/code-stats.cc
index 5d8c2ab527e..ac89f14f602 100644
--- a/chromium/v8/src/heap/code-stats.cc
+++ b/chromium/v8/src/heap/code-stats.cc
@@ -18,14 +18,14 @@ void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject* object,
// Log the size of external source code.
Object* source = script->source();
if (source->IsExternalString()) {
- ExternalString* external_source_string = ExternalString::cast(source);
+ ExternalString external_source_string = ExternalString::cast(source);
int size = isolate->external_script_source_size();
size += external_source_string->ExternalPayloadSize();
isolate->set_external_script_source_size(size);
}
} else if (object->IsAbstractCode()) {
// Record code+metadata statisitcs.
- AbstractCode* abstract_code = AbstractCode::cast(object);
+ AbstractCode abstract_code = AbstractCode::cast(object);
int size = abstract_code->SizeIncludingMetadata();
if (abstract_code->IsCode()) {
size += isolate->code_and_metadata_size();
@@ -207,7 +207,7 @@ void CodeStatistics::CollectCodeCommentStatistics(HeapObject* obj,
return;
}
- Code* code = Code::cast(obj);
+ Code code = Code::cast(obj);
RelocIterator it(code);
int delta = 0;
Address prev_pc = code->raw_instruction_start();
diff --git a/chromium/v8/src/heap/concurrent-marking.cc b/chromium/v8/src/heap/concurrent-marking.cc
index 5e147ca9a53..024904f83fd 100644
--- a/chromium/v8/src/heap/concurrent-marking.cc
+++ b/chromium/v8/src/heap/concurrent-marking.cc
@@ -20,6 +20,7 @@
#include "src/heap/worklist.h"
#include "src/isolate.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/slots-inl.h"
#include "src/utils-inl.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -34,7 +35,10 @@ class ConcurrentMarkingState final
: live_bytes_(live_bytes) {}
Bitmap* bitmap(const MemoryChunk* chunk) {
- return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
+ reinterpret_cast<intptr_t>(chunk),
+ MemoryChunk::kMarkBitmapOffset);
+ return chunk->marking_bitmap_;
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
@@ -53,10 +57,10 @@ class SlotSnapshot {
public:
SlotSnapshot() : number_of_slots_(0) {}
int number_of_slots() const { return number_of_slots_; }
- Object** slot(int i) const { return snapshot_[i].first; }
+ ObjectSlot slot(int i) const { return snapshot_[i].first; }
Object* value(int i) const { return snapshot_[i].second; }
void clear() { number_of_slots_ = 0; }
- void add(Object** slot, Object* value) {
+ void add(ObjectSlot slot, Object* value) {
snapshot_[number_of_slots_].first = slot;
snapshot_[number_of_slots_].second = value;
++number_of_slots_;
@@ -65,7 +69,7 @@ class SlotSnapshot {
private:
static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kPointerSize;
int number_of_slots_;
- std::pair<Object**, Object*> snapshot_[kMaxSnapshotSize];
+ std::pair<ObjectSlot, Object*> snapshot_[kMaxSnapshotSize];
DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
};
@@ -88,24 +92,31 @@ class ConcurrentMarkingVisitor final
task_id_(task_id),
embedder_tracing_enabled_(embedder_tracing_enabled) {}
- template <typename T>
+ template <typename T, typename = typename std::enable_if<
+ std::is_base_of<Object, T>::value>::type>
static V8_INLINE T* Cast(HeapObject* object) {
return T::cast(object);
}
+ template <typename T, typename = typename std::enable_if<
+ std::is_base_of<ObjectPtr, T>::value>::type>
+ static V8_INLINE T Cast(HeapObject* object) {
+ return T::cast(object);
+ }
+
bool ShouldVisit(HeapObject* object) {
return marking_state_.GreyToBlack(object);
}
bool AllowDefaultJSObjectVisit() { return false; }
- void ProcessStrongHeapObject(HeapObject* host, Object** slot,
+ void ProcessStrongHeapObject(HeapObject* host, ObjectSlot slot,
HeapObject* heap_object) {
MarkObject(heap_object);
MarkCompactCollector::RecordSlot(host, slot, heap_object);
}
- void ProcessWeakHeapObject(HeapObject* host, HeapObjectReference** slot,
+ void ProcessWeakHeapObject(HeapObject* host, HeapObjectSlot slot,
HeapObject* heap_object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
@@ -127,9 +138,10 @@ class ConcurrentMarkingVisitor final
}
}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** slot = start; slot < end; slot++) {
- Object* object = base::AsAtomicPointer::Relaxed_Load(slot);
+ void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot slot = start; slot < end; ++slot) {
+ Object* object = slot.Relaxed_Load();
DCHECK(!HasWeakHeapObjectTag(object));
if (object->IsHeapObject()) {
ProcessStrongHeapObject(host, slot, HeapObject::cast(object));
@@ -137,32 +149,30 @@ class ConcurrentMarkingVisitor final
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
- for (MaybeObject** slot = start; slot < end; slot++) {
- MaybeObject* object = base::AsAtomicPointer::Relaxed_Load(slot);
+ void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
+ for (MaybeObjectSlot slot = start; slot < end; ++slot) {
+ MaybeObject object = slot.Relaxed_Load();
HeapObject* heap_object;
if (object->GetHeapObjectIfStrong(&heap_object)) {
// If the reference changes concurrently from strong to weak, the write
// barrier will treat the weak reference as strong, so we won't miss the
// weak reference.
- ProcessStrongHeapObject(host, reinterpret_cast<Object**>(slot),
- heap_object);
+ ProcessStrongHeapObject(host, ObjectSlot(slot), heap_object);
} else if (object->GetHeapObjectIfWeak(&heap_object)) {
- ProcessWeakHeapObject(
- host, reinterpret_cast<HeapObjectReference**>(slot), heap_object);
+ ProcessWeakHeapObject(host, HeapObjectSlot(slot), heap_object);
}
}
}
// Weak list pointers should be ignored during marking. The lists are
// reconstructed after GC.
- void VisitCustomWeakPointers(HeapObject* host, Object** start,
- Object** end) override {}
+ void VisitCustomWeakPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override {}
void VisitPointersInSnapshot(HeapObject* host, const SlotSnapshot& snapshot) {
for (int i = 0; i < snapshot.number_of_slots(); i++) {
- Object** slot = snapshot.slot(i);
+ ObjectSlot slot = snapshot.slot(i);
Object* object = snapshot.value(i);
DCHECK(!HasWeakHeapObjectTag(object));
if (!object->IsHeapObject()) continue;
@@ -176,34 +186,57 @@ class ConcurrentMarkingVisitor final
// JS object =================================================================
// ===========================================================================
- int VisitJSObject(Map* map, JSObject* object) {
+ int VisitJSObject(Map map, JSObject* object) {
return VisitJSObjectSubclass(map, object);
}
- int VisitJSObjectFast(Map* map, JSObject* object) {
+ int VisitJSObjectFast(Map map, JSObject* object) {
return VisitJSObjectSubclass(map, object);
}
- int VisitWasmInstanceObject(Map* map, WasmInstanceObject* object) {
+ int VisitWasmInstanceObject(Map map, WasmInstanceObject* object) {
return VisitJSObjectSubclass(map, object);
}
+ int VisitJSWeakCell(Map map, JSWeakCell* weak_cell) {
+ int size = VisitJSObjectSubclass(map, weak_cell);
+ if (size == 0) {
+ return 0;
+ }
+
+ if (weak_cell->target()->IsHeapObject()) {
+ HeapObject* target = HeapObject::cast(weak_cell->target());
+ if (marking_state_.IsBlackOrGrey(target)) {
+ // Record the slot inside the JSWeakCell, since the
+ // VisitJSObjectSubclass above didn't visit it.
+ ObjectSlot slot =
+ HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
+ MarkCompactCollector::RecordSlot(weak_cell, slot, target);
+ } else {
+ // JSWeakCell points to a potentially dead object. We have to process
+ // them when we know the liveness of the whole transitive closure.
+ weak_objects_->js_weak_cells.Push(task_id_, weak_cell);
+ }
+ }
+ return size;
+ }
+
// Some JS objects can carry back links to embedders that contain information
// relevant to the garbage collectors.
- int VisitJSApiObject(Map* map, JSObject* object) {
+ int VisitJSApiObject(Map map, JSObject* object) {
return VisitEmbedderTracingSubclass(map, object);
}
- int VisitJSArrayBuffer(Map* map, JSArrayBuffer* object) {
+ int VisitJSArrayBuffer(Map map, JSArrayBuffer* object) {
return VisitEmbedderTracingSubclass(map, object);
}
- int VisitJSDataView(Map* map, JSDataView* object) {
+ int VisitJSDataView(Map map, JSDataView* object) {
return VisitEmbedderTracingSubclass(map, object);
}
- int VisitJSTypedArray(Map* map, JSTypedArray* object) {
+ int VisitJSTypedArray(Map map, JSTypedArray* object) {
return VisitEmbedderTracingSubclass(map, object);
}
@@ -211,42 +244,33 @@ class ConcurrentMarkingVisitor final
// Strings with pointers =====================================================
// ===========================================================================
- int VisitConsString(Map* map, ConsString* object) {
+ int VisitConsString(Map map, ConsString object) {
int size = ConsString::BodyDescriptor::SizeOf(map, object);
- const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
- if (!ShouldVisit(object)) return 0;
- VisitPointersInSnapshot(object, snapshot);
- return size;
+ return VisitWithSnapshot(map, object, size, size);
}
- int VisitSlicedString(Map* map, SlicedString* object) {
+ int VisitSlicedString(Map map, SlicedString object) {
int size = SlicedString::BodyDescriptor::SizeOf(map, object);
- const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
- if (!ShouldVisit(object)) return 0;
- VisitPointersInSnapshot(object, snapshot);
- return size;
+ return VisitWithSnapshot(map, object, size, size);
}
- int VisitThinString(Map* map, ThinString* object) {
+ int VisitThinString(Map map, ThinString object) {
int size = ThinString::BodyDescriptor::SizeOf(map, object);
- const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
- if (!ShouldVisit(object)) return 0;
- VisitPointersInSnapshot(object, snapshot);
- return size;
+ return VisitWithSnapshot(map, object, size, size);
}
// ===========================================================================
// Strings without pointers ==================================================
// ===========================================================================
- int VisitSeqOneByteString(Map* map, SeqOneByteString* object) {
+ int VisitSeqOneByteString(Map map, SeqOneByteString object) {
int size = SeqOneByteString::SizeFor(object->synchronized_length());
if (!ShouldVisit(object)) return 0;
VisitMapPointer(object, object->map_slot());
return size;
}
- int VisitSeqTwoByteString(Map* map, SeqTwoByteString* object) {
+ int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
int size = SeqTwoByteString::SizeFor(object->synchronized_length());
if (!ShouldVisit(object)) return 0;
VisitMapPointer(object, object->map_slot());
@@ -257,11 +281,11 @@ class ConcurrentMarkingVisitor final
// Fixed array object ========================================================
// ===========================================================================
- int VisitFixedArray(Map* map, FixedArray* object) {
+ int VisitFixedArray(Map map, FixedArray object) {
return VisitLeftTrimmableArray(map, object);
}
- int VisitFixedDoubleArray(Map* map, FixedDoubleArray* object) {
+ int VisitFixedDoubleArray(Map map, FixedDoubleArray object) {
return VisitLeftTrimmableArray(map, object);
}
@@ -269,7 +293,7 @@ class ConcurrentMarkingVisitor final
// Code object ===============================================================
// ===========================================================================
- int VisitCode(Map* map, Code* object) {
+ int VisitCode(Map map, Code object) {
bailout_.Push(object);
return 0;
}
@@ -278,7 +302,7 @@ class ConcurrentMarkingVisitor final
// Side-effectful visitation.
// ===========================================================================
- int VisitBytecodeArray(Map* map, BytecodeArray* object) {
+ int VisitBytecodeArray(Map map, BytecodeArray object) {
if (!ShouldVisit(object)) return 0;
int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
@@ -287,7 +311,7 @@ class ConcurrentMarkingVisitor final
return size;
}
- int VisitMap(Map* meta_map, Map* map) {
+ int VisitMap(Map meta_map, Map map) {
if (marking_state_.IsGrey(map)) {
// Maps have ad-hoc weakness for descriptor arrays. They also clear the
// code-cache. Conservatively visit strong fields skipping the
@@ -304,7 +328,7 @@ class ConcurrentMarkingVisitor final
return 0;
}
- int VisitTransitionArray(Map* map, TransitionArray* array) {
+ int VisitTransitionArray(Map map, TransitionArray* array) {
if (!ShouldVisit(array)) return 0;
VisitMapPointer(array, array->map_slot());
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
@@ -313,21 +337,21 @@ class ConcurrentMarkingVisitor final
return size;
}
- int VisitJSWeakCollection(Map* map, JSWeakCollection* object) {
+ int VisitJSWeakCollection(Map map, JSWeakCollection* object) {
return VisitJSObjectSubclass(map, object);
}
- int VisitEphemeronHashTable(Map* map, EphemeronHashTable* table) {
+ int VisitEphemeronHashTable(Map map, EphemeronHashTable table) {
if (!ShouldVisit(table)) return 0;
weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
for (int i = 0; i < table->Capacity(); i++) {
- Object** key_slot =
+ ObjectSlot key_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
HeapObject* key = HeapObject::cast(table->KeyAt(i));
MarkCompactCollector::RecordSlot(table, key_slot, key);
- Object** value_slot =
+ ObjectSlot value_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
if (marking_state_.IsBlackOrGrey(key)) {
@@ -391,40 +415,41 @@ class ConcurrentMarkingVisitor final
slot_snapshot_->clear();
}
- void VisitPointers(HeapObject* host, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++) {
- Object* object = reinterpret_cast<Object*>(
- base::Relaxed_Load(reinterpret_cast<const base::AtomicWord*>(p)));
+ void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot p = start; p < end; ++p) {
+ Object* object = p.Relaxed_Load();
slot_snapshot_->add(p, object);
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
+ void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
// This should never happen, because we don't use snapshotting for objects
// which contain weak references.
UNREACHABLE();
}
+ void VisitCustomWeakPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override {
+ DCHECK(host->IsJSWeakCell());
+ }
+
private:
SlotSnapshot* slot_snapshot_;
};
template <typename T>
- int VisitJSObjectSubclass(Map* map, T* object) {
+ int VisitJSObjectSubclass(Map map, T* object) {
int size = T::BodyDescriptor::SizeOf(map, object);
int used_size = map->UsedInstanceSize();
DCHECK_LE(used_size, size);
DCHECK_GE(used_size, T::kHeaderSize);
- const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, used_size);
- if (!ShouldVisit(object)) return 0;
- VisitPointersInSnapshot(object, snapshot);
- return size;
+ return VisitWithSnapshot(map, object, used_size, size);
}
template <typename T>
- int VisitEmbedderTracingSubclass(Map* map, T* object) {
+ int VisitEmbedderTracingSubclass(Map map, T* object) {
DCHECK(object->IsApiWrapper());
int size = VisitJSObjectSubclass(map, object);
if (size && embedder_tracing_enabled_) {
@@ -436,7 +461,7 @@ class ConcurrentMarkingVisitor final
}
template <typename T>
- int VisitLeftTrimmableArray(Map* map, T* object) {
+ int VisitLeftTrimmableArray(Map map, T object) {
// The synchronized_length() function checks that the length is a Smi.
// This is not necessarily the case if the array is being left-trimmed.
Object* length = object->unchecked_synchronized_length();
@@ -451,11 +476,20 @@ class ConcurrentMarkingVisitor final
}
template <typename T>
- const SlotSnapshot& MakeSlotSnapshot(Map* map, T* object, int size) {
+ int VisitWithSnapshot(Map map, T object, int used_size, int size) {
+ const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, used_size);
+ if (!ShouldVisit(object)) return 0;
+ VisitPointersInSnapshot(object, snapshot);
+ return size;
+ }
+
+ template <typename T>
+ const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
SlotSnapshottingVisitor visitor(&slot_snapshot_);
- visitor.VisitPointer(object,
- reinterpret_cast<Object**>(object->map_slot()));
- T::BodyDescriptor::IterateBody(map, object, size, &visitor);
+ visitor.VisitPointer(object, ObjectSlot(object->map_slot().address()));
+ // TODO(3770): Drop std::remove_pointer after the migration.
+ std::remove_pointer<T>::type::BodyDescriptor::IterateBody(map, object, size,
+ &visitor);
return slot_snapshot_;
}
@@ -470,36 +504,36 @@ class ConcurrentMarkingVisitor final
};
// Strings can change maps due to conversion to thin string or external strings.
-// Use reinterpret cast to avoid data race in slow dchecks.
+// Use unchecked cast to avoid data race in slow dchecks.
template <>
-ConsString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
- return reinterpret_cast<ConsString*>(object);
+ConsString ConcurrentMarkingVisitor::Cast(HeapObject* object) {
+ return ConsString::unchecked_cast(object);
}
template <>
-SlicedString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
- return reinterpret_cast<SlicedString*>(object);
+SlicedString ConcurrentMarkingVisitor::Cast(HeapObject* object) {
+ return SlicedString::unchecked_cast(object);
}
template <>
-ThinString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
- return reinterpret_cast<ThinString*>(object);
+ThinString ConcurrentMarkingVisitor::Cast(HeapObject* object) {
+ return ThinString::unchecked_cast(object);
}
template <>
-SeqOneByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
- return reinterpret_cast<SeqOneByteString*>(object);
+SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject* object) {
+ return SeqOneByteString::unchecked_cast(object);
}
template <>
-SeqTwoByteString* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
- return reinterpret_cast<SeqTwoByteString*>(object);
+SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject* object) {
+ return SeqTwoByteString::unchecked_cast(object);
}
// Fixed array can become a free space during left trimming.
template <>
-FixedArray* ConcurrentMarkingVisitor::Cast(HeapObject* object) {
- return reinterpret_cast<FixedArray*>(object);
+FixedArray ConcurrentMarkingVisitor::Cast(HeapObject* object) {
+ return FixedArray::unchecked_cast(object);
}
class ConcurrentMarking::Task : public CancelableTask {
@@ -538,7 +572,7 @@ ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
embedder_objects_(embedder_objects) {
// The runtime flag should be set only if the compile time flag was set.
#ifndef V8_CONCURRENT_MARKING
- CHECK(!FLAG_concurrent_marking);
+ CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
#endif
}
@@ -583,13 +617,14 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
break;
}
objects_processed++;
- Address new_space_top = heap_->new_space()->original_top();
- Address new_space_limit = heap_->new_space()->original_limit();
+ // The order of the two loads is important.
+ Address new_space_top = heap_->new_space()->original_top_acquire();
+ Address new_space_limit = heap_->new_space()->original_limit_relaxed();
Address addr = object->address();
if (new_space_top <= addr && addr < new_space_limit) {
on_hold_->Push(task_id, object);
} else {
- Map* map = object->synchronized_map();
+ Map map = object->synchronized_map();
current_marked_bytes += visitor.Visit(map, object);
}
}
@@ -624,6 +659,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
weak_objects_->next_ephemerons.FlushToGlobal(task_id);
weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
weak_objects_->weak_references.FlushToGlobal(task_id);
+ weak_objects_->js_weak_cells.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_ += marked_bytes;
@@ -632,7 +668,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
{
- base::LockGuard<base::Mutex> guard(&pending_lock_);
+ base::MutexGuard guard(&pending_lock_);
is_pending_[task_id] = false;
--pending_task_count_;
pending_condition_.NotifyAll();
@@ -646,9 +682,9 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
void ConcurrentMarking::ScheduleTasks() {
+ DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
DCHECK(!heap_->IsTearingDown());
- if (!FLAG_concurrent_marking) return;
- base::LockGuard<base::Mutex> guard(&pending_lock_);
+ base::MutexGuard guard(&pending_lock_);
DCHECK_EQ(0, pending_task_count_);
if (task_count_ == 0) {
static const int num_cores =
@@ -685,9 +721,10 @@ void ConcurrentMarking::ScheduleTasks() {
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
- if (!FLAG_concurrent_marking || heap_->IsTearingDown()) return;
+ DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
+ if (heap_->IsTearingDown()) return;
{
- base::LockGuard<base::Mutex> guard(&pending_lock_);
+ base::MutexGuard guard(&pending_lock_);
if (pending_task_count_ > 0) return;
}
if (!shared_->IsGlobalPoolEmpty() ||
@@ -698,8 +735,8 @@ void ConcurrentMarking::RescheduleTasksIfNeeded() {
}
bool ConcurrentMarking::Stop(StopRequest stop_request) {
- if (!FLAG_concurrent_marking) return false;
- base::LockGuard<base::Mutex> guard(&pending_lock_);
+ DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
+ base::MutexGuard guard(&pending_lock_);
if (pending_task_count_ == 0) return false;
@@ -709,7 +746,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
for (int i = 1; i <= task_count_; i++) {
if (is_pending_[i]) {
if (task_manager->TryAbort(cancelable_id_[i]) ==
- CancelableTaskManager::kTaskAborted) {
+ TryAbortResult::kTaskAborted) {
is_pending_[i] = false;
--pending_task_count_;
} else if (stop_request == StopRequest::PREEMPT_TASKS) {
@@ -730,7 +767,7 @@ bool ConcurrentMarking::Stop(StopRequest stop_request) {
bool ConcurrentMarking::IsStopped() {
if (!FLAG_concurrent_marking) return true;
- base::LockGuard<base::Mutex> guard(&pending_lock_);
+ base::MutexGuard guard(&pending_lock_);
return pending_task_count_ == 0;
}
@@ -772,8 +809,9 @@ size_t ConcurrentMarking::TotalMarkedBytes() {
ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
: concurrent_marking_(concurrent_marking),
- resume_on_exit_(concurrent_marking_->Stop(
- ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
+ resume_on_exit_(FLAG_concurrent_marking &&
+ concurrent_marking_->Stop(
+ ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
}
diff --git a/chromium/v8/src/heap/embedder-tracing.cc b/chromium/v8/src/heap/embedder-tracing.cc
index 198cdd4b1a6..55066956a78 100644
--- a/chromium/v8/src/heap/embedder-tracing.cc
+++ b/chromium/v8/src/heap/embedder-tracing.cc
@@ -5,22 +5,31 @@
#include "src/heap/embedder-tracing.h"
#include "src/base/logging.h"
+#include "src/objects/embedder-data-slot.h"
+#include "src/objects/js-objects-inl.h"
namespace v8 {
namespace internal {
+void LocalEmbedderHeapTracer::SetRemoteTracer(EmbedderHeapTracer* tracer) {
+ if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
+
+ remote_tracer_ = tracer;
+ if (remote_tracer_)
+ remote_tracer_->isolate_ = reinterpret_cast<v8::Isolate*>(isolate_);
+}
+
void LocalEmbedderHeapTracer::TracePrologue() {
if (!InUse()) return;
- CHECK(cached_wrappers_to_trace_.empty());
num_v8_marking_worklist_was_empty_ = 0;
+ embedder_worklist_empty_ = false;
remote_tracer_->TracePrologue();
}
void LocalEmbedderHeapTracer::TraceEpilogue() {
if (!InUse()) return;
- CHECK(cached_wrappers_to_trace_.empty());
remote_tracer_->TraceEpilogue();
}
@@ -36,37 +45,58 @@ void LocalEmbedderHeapTracer::EnterFinalPause() {
bool LocalEmbedderHeapTracer::Trace(double deadline) {
if (!InUse()) return true;
- DCHECK_EQ(0, NumberOfCachedWrappersToTrace());
return remote_tracer_->AdvanceTracing(deadline);
}
bool LocalEmbedderHeapTracer::IsRemoteTracingDone() {
- return (InUse()) ? cached_wrappers_to_trace_.empty() &&
- remote_tracer_->IsTracingDone()
- : true;
+ return !InUse() || remote_tracer_->IsTracingDone();
}
-void LocalEmbedderHeapTracer::RegisterWrappersWithRemoteTracer() {
+void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
+ EmbedderHeapTracer::EmbedderStackState stack_state) {
if (!InUse()) return;
- if (cached_wrappers_to_trace_.empty()) {
- return;
- }
+ embedder_stack_state_ = stack_state;
+}
- remote_tracer_->RegisterV8References(cached_wrappers_to_trace_);
- cached_wrappers_to_trace_.clear();
+LocalEmbedderHeapTracer::ProcessingScope::ProcessingScope(
+ LocalEmbedderHeapTracer* tracer)
+ : tracer_(tracer) {
+ wrapper_cache_.reserve(kWrapperCacheSize);
}
-bool LocalEmbedderHeapTracer::RequiresImmediateWrapperProcessing() {
- const size_t kTooManyWrappers = 16000;
- return cached_wrappers_to_trace_.size() > kTooManyWrappers;
+LocalEmbedderHeapTracer::ProcessingScope::~ProcessingScope() {
+ if (!wrapper_cache_.empty()) {
+ tracer_->remote_tracer()->RegisterV8References(std::move(wrapper_cache_));
+ }
}
-void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
- EmbedderHeapTracer::EmbedderStackState stack_state) {
- if (!InUse()) return;
+void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
+ JSObject* js_object) {
+ DCHECK(js_object->IsApiWrapper());
+ if (js_object->GetEmbedderFieldCount() < 2) return;
- embedder_stack_state_ = stack_state;
+ void* pointer0;
+ void* pointer1;
+ if (EmbedderDataSlot(js_object, 0).ToAlignedPointer(&pointer0) && pointer0 &&
+ EmbedderDataSlot(js_object, 1).ToAlignedPointer(&pointer1)) {
+ wrapper_cache_.push_back({pointer0, pointer1});
+ }
+ FlushWrapperCacheIfFull();
+}
+
+void LocalEmbedderHeapTracer::ProcessingScope::FlushWrapperCacheIfFull() {
+ if (wrapper_cache_.size() == wrapper_cache_.capacity()) {
+ tracer_->remote_tracer()->RegisterV8References(std::move(wrapper_cache_));
+ wrapper_cache_.clear();
+ wrapper_cache_.reserve(kWrapperCacheSize);
+ }
+}
+
+void LocalEmbedderHeapTracer::ProcessingScope::AddWrapperInfoForTesting(
+ WrapperInfo info) {
+ wrapper_cache_.push_back(info);
+ FlushWrapperCacheIfFull();
}
} // namespace internal
diff --git a/chromium/v8/src/heap/embedder-tracing.h b/chromium/v8/src/heap/embedder-tracing.h
index 2588200db9a..6873da56cd1 100644
--- a/chromium/v8/src/heap/embedder-tracing.h
+++ b/chromium/v8/src/heap/embedder-tracing.h
@@ -13,74 +13,98 @@ namespace v8 {
namespace internal {
class Heap;
+class JSObject;
class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
public:
typedef std::pair<void*, void*> WrapperInfo;
+ typedef std::vector<WrapperInfo> WrapperCache;
- explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {}
+ class V8_EXPORT_PRIVATE ProcessingScope {
+ public:
+ explicit ProcessingScope(LocalEmbedderHeapTracer* tracer);
+ ~ProcessingScope();
- ~LocalEmbedderHeapTracer() {
- if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
- }
+ void TracePossibleWrapper(JSObject* js_object);
- EmbedderHeapTracer* remote_tracer() const { return remote_tracer_; }
+ void AddWrapperInfoForTesting(WrapperInfo info);
- void SetRemoteTracer(EmbedderHeapTracer* tracer) {
- if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
+ private:
+ static constexpr size_t kWrapperCacheSize = 1000;
+
+ void FlushWrapperCacheIfFull();
- remote_tracer_ = tracer;
- if (remote_tracer_)
- remote_tracer_->isolate_ = reinterpret_cast<v8::Isolate*>(isolate_);
+ LocalEmbedderHeapTracer* const tracer_;
+ WrapperCache wrapper_cache_;
+ };
+
+ explicit LocalEmbedderHeapTracer(Isolate* isolate) : isolate_(isolate) {}
+
+ ~LocalEmbedderHeapTracer() {
+ if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
}
bool InUse() const { return remote_tracer_ != nullptr; }
+ EmbedderHeapTracer* remote_tracer() const { return remote_tracer_; }
+ void SetRemoteTracer(EmbedderHeapTracer* tracer);
void TracePrologue();
void TraceEpilogue();
void EnterFinalPause();
bool Trace(double deadline);
bool IsRemoteTracingDone();
- size_t NumberOfCachedWrappersToTrace() {
- return cached_wrappers_to_trace_.size();
- }
- void AddWrapperToTrace(WrapperInfo entry) {
- cached_wrappers_to_trace_.push_back(entry);
- }
- void ClearCachedWrappersToTrace() { cached_wrappers_to_trace_.clear(); }
- void RegisterWrappersWithRemoteTracer();
-
- // In order to avoid running out of memory we force tracing wrappers if there
- // are too many of them.
- bool RequiresImmediateWrapperProcessing();
-
void NotifyV8MarkingWorklistWasEmpty() {
num_v8_marking_worklist_was_empty_++;
}
+
bool ShouldFinalizeIncrementalMarking() {
static const size_t kMaxIncrementalFixpointRounds = 3;
return !FLAG_incremental_marking_wrappers || !InUse() ||
- IsRemoteTracingDone() ||
+ (IsRemoteTracingDone() && embedder_worklist_empty_) ||
num_v8_marking_worklist_was_empty_ > kMaxIncrementalFixpointRounds;
}
void SetEmbedderStackStateForNextFinalization(
EmbedderHeapTracer::EmbedderStackState stack_state);
- private:
- typedef std::vector<WrapperInfo> WrapperCache;
+ void SetEmbedderWorklistEmpty(bool is_empty) {
+ embedder_worklist_empty_ = is_empty;
+ }
+ private:
Isolate* const isolate_;
- WrapperCache cached_wrappers_to_trace_;
EmbedderHeapTracer* remote_tracer_ = nullptr;
+
size_t num_v8_marking_worklist_was_empty_ = 0;
EmbedderHeapTracer::EmbedderStackState embedder_stack_state_ =
EmbedderHeapTracer::kUnknown;
+ // Indicates whether the embedder worklist was observed empty on the main
+ // thread. This is opportunistic as concurrent marking tasks may hold local
+ // segments of potential embedder fields to move to the main thread.
+ bool embedder_worklist_empty_ = false;
friend class EmbedderStackStateScope;
};
+class V8_EXPORT_PRIVATE EmbedderStackStateScope final {
+ public:
+ EmbedderStackStateScope(LocalEmbedderHeapTracer* local_tracer,
+ EmbedderHeapTracer::EmbedderStackState stack_state)
+ : local_tracer_(local_tracer),
+ old_stack_state_(local_tracer_->embedder_stack_state_) {
+ local_tracer_->embedder_stack_state_ = stack_state;
+ }
+
+ ~EmbedderStackStateScope() {
+ local_tracer_->embedder_stack_state_ = old_stack_state_;
+ }
+
+ private:
+ LocalEmbedderHeapTracer* const local_tracer_;
+ const EmbedderHeapTracer::EmbedderStackState old_stack_state_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/factory-inl.h b/chromium/v8/src/heap/factory-inl.h
index eb1661aaee9..691a0cc09aa 100644
--- a/chromium/v8/src/heap/factory-inl.h
+++ b/chromium/v8/src/heap/factory-inl.h
@@ -16,10 +16,11 @@
namespace v8 {
namespace internal {
-#define ROOT_ACCESSOR(type, name, CamelName) \
- Handle<type> Factory::name() { \
- return Handle<type>(bit_cast<type**>( \
- &isolate()->heap()->roots_[RootIndex::k##CamelName])); \
+// TODO(jkummerow): Drop std::remove_pointer after the migration to ObjectPtr.
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ Handle<std::remove_pointer<Type>::type> Factory::name() { \
+ return Handle<std::remove_pointer<Type>::type>(bit_cast<Address*>( \
+ &isolate()->roots_table()[RootIndex::k##CamelName])); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
diff --git a/chromium/v8/src/heap/factory.cc b/chromium/v8/src/heap/factory.cc
index 9535eb4b88f..836a9cba759 100644
--- a/chromium/v8/src/heap/factory.cc
+++ b/chromium/v8/src/heap/factory.cc
@@ -13,6 +13,7 @@
#include "src/builtins/constants-table-builder.h"
#include "src/compiler.h"
#include "src/conversions.h"
+#include "src/counters.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
@@ -20,14 +21,16 @@
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/instance-type-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/microtask-inl.h"
-#include "src/objects/microtask-queue-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/scope-info.h"
@@ -94,7 +97,7 @@ void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
auto builder = heap->isolate()->builtins_constants_table_builder();
if (builder != nullptr) builder->PatchSelfReference(self_ref, code);
}
- *(self_ref.location()) = *code;
+ *(self_ref.location()) = code->ptr();
}
// Migrate generated code.
@@ -115,7 +118,7 @@ void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
HeapObject* Factory::AllocateRawWithImmortalMap(int size,
PretenureFlag pretenure,
- Map* map,
+ Map map,
AllocationAlignment alignment) {
HeapObject* result = isolate()->heap()->AllocateRawWithRetryOrFail(
size, Heap::SelectSpace(pretenure), alignment);
@@ -209,7 +212,7 @@ Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<PrototypeInfo> result =
Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE, TENURED));
- result->set_prototype_users(*empty_weak_array_list());
+ result->set_prototype_users(Smi::kZero);
result->set_registry_slot(PrototypeInfo::UNREGISTERED);
result->set_bit_field(0);
result->set_module_namespace(*undefined_value());
@@ -293,8 +296,8 @@ Handle<FixedArray> Factory::NewFixedArrayWithFiller(RootIndex map_root_index,
int length, Object* filler,
PretenureFlag pretenure) {
HeapObject* result = AllocateRawFixedArray(length, pretenure);
- DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
- Map* map = Map::cast(isolate()->heap()->root(map_root_index));
+ DCHECK(RootsTable::IsImmortalImmovable(map_root_index));
+ Map map = Map::cast(isolate()->root(map_root_index));
result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
Handle<FixedArray> array(FixedArray::cast(result), isolate());
array->set_length(length);
@@ -326,13 +329,12 @@ Handle<T> Factory::NewWeakFixedArrayWithMap(RootIndex map_root_index,
HeapObject* result =
AllocateRawArray(WeakFixedArray::SizeFor(length), pretenure);
- Map* map = Map::cast(isolate()->heap()->root(map_root_index));
+ Map map = Map::cast(isolate()->root(map_root_index));
result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
Handle<WeakFixedArray> array(WeakFixedArray::cast(result), isolate());
array->set_length(length);
- MemsetPointer(array->data_start(),
- HeapObjectReference::Strong(*undefined_value()), length);
+ MemsetPointer(ObjectSlot(array->data_start()), *undefined_value(), length);
return Handle<T>::cast(array);
}
@@ -340,10 +342,6 @@ Handle<T> Factory::NewWeakFixedArrayWithMap(RootIndex map_root_index,
template Handle<FixedArray> Factory::NewFixedArrayWithMap<FixedArray>(
RootIndex, int, PretenureFlag);
-template Handle<DescriptorArray>
-Factory::NewWeakFixedArrayWithMap<DescriptorArray>(RootIndex, int,
- PretenureFlag);
-
Handle<FixedArray> Factory::NewFixedArray(int length, PretenureFlag pretenure) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
@@ -357,12 +355,11 @@ Handle<WeakFixedArray> Factory::NewWeakFixedArray(int length,
if (length == 0) return empty_weak_fixed_array();
HeapObject* result =
AllocateRawArray(WeakFixedArray::SizeFor(length), pretenure);
- DCHECK(Heap::RootIsImmortalImmovable(RootIndex::kWeakFixedArrayMap));
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kWeakFixedArrayMap));
result->set_map_after_allocation(*weak_fixed_array_map(), SKIP_WRITE_BARRIER);
Handle<WeakFixedArray> array(WeakFixedArray::cast(result), isolate());
array->set_length(length);
- MemsetPointer(array->data_start(),
- HeapObjectReference::Strong(*undefined_value()), length);
+ MemsetPointer(ObjectSlot(array->data_start()), *undefined_value(), length);
return array;
}
@@ -427,11 +424,29 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
vector->set_profiler_ticks(0);
vector->set_deopt_count(0);
// TODO(leszeks): Initialize based on the feedback metadata.
- MemsetPointer(vector->slots_start(),
- MaybeObject::FromObject(*undefined_value()), length);
+ MemsetPointer(ObjectSlot(vector->slots_start()), *undefined_value(), length);
return vector;
}
+Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(
+ int length, PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ int size = EmbedderDataArray::SizeFor(length);
+
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, pretenure, *embedder_data_array_map());
+ Handle<EmbedderDataArray> array(EmbedderDataArray::cast(result), isolate());
+ array->set_length(length);
+
+ if (length > 0) {
+ ObjectSlot start(array->slots_start());
+ ObjectSlot end(array->slots_end());
+ size_t slot_count = end - start;
+ MemsetPointer(start, *undefined_value(), slot_count);
+ }
+ return array;
+}
+
Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription(
int boilerplate, int all_properties, int index_keys, bool has_seen_proto) {
DCHECK_GE(boilerplate, 0);
@@ -475,7 +490,7 @@ Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int length,
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
int size = FixedDoubleArray::SizeFor(length);
- Map* map = *fixed_double_array_map();
+ Map map = *fixed_double_array_map();
HeapObject* result =
AllocateRawWithImmortalMap(size, pretenure, map, kDoubleAligned);
Handle<FixedDoubleArray> array(FixedDoubleArray::cast(result), isolate());
@@ -520,34 +535,36 @@ Handle<FrameArray> Factory::NewFrameArray(int number_of_frames,
return Handle<FrameArray>::cast(result);
}
-Handle<SmallOrderedHashSet> Factory::NewSmallOrderedHashSet(
- int capacity, PretenureFlag pretenure) {
+template <typename T>
+Handle<T> Factory::AllocateSmallOrderedHashTable(Handle<Map> map, int capacity,
+ PretenureFlag pretenure) {
DCHECK_LE(0, capacity);
- CHECK_LE(capacity, SmallOrderedHashSet::kMaxCapacity);
- DCHECK_EQ(0, capacity % SmallOrderedHashSet::kLoadFactor);
+ CHECK_LE(capacity, T::kMaxCapacity);
+ DCHECK_EQ(0, capacity % T::kLoadFactor);
- int size = SmallOrderedHashSet::SizeFor(capacity);
- Map* map = *small_ordered_hash_set_map();
- HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
- Handle<SmallOrderedHashSet> table(SmallOrderedHashSet::cast(result),
- isolate());
+ int size = T::SizeFor(capacity);
+ HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, *map);
+ Handle<T> table(T::cast(result), isolate());
table->Initialize(isolate(), capacity);
return table;
}
+Handle<SmallOrderedHashSet> Factory::NewSmallOrderedHashSet(
+ int capacity, PretenureFlag pretenure) {
+ return AllocateSmallOrderedHashTable<SmallOrderedHashSet>(
+ small_ordered_hash_set_map(), capacity, pretenure);
+}
+
Handle<SmallOrderedHashMap> Factory::NewSmallOrderedHashMap(
int capacity, PretenureFlag pretenure) {
- DCHECK_LE(0, capacity);
- CHECK_LE(capacity, SmallOrderedHashMap::kMaxCapacity);
- DCHECK_EQ(0, capacity % SmallOrderedHashMap::kLoadFactor);
+ return AllocateSmallOrderedHashTable<SmallOrderedHashMap>(
+ small_ordered_hash_map_map(), capacity, pretenure);
+}
- int size = SmallOrderedHashMap::SizeFor(capacity);
- Map* map = *small_ordered_hash_map_map();
- HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
- Handle<SmallOrderedHashMap> table(SmallOrderedHashMap::cast(result),
- isolate());
- table->Initialize(isolate(), capacity);
- return table;
+Handle<SmallOrderedNameDictionary> Factory::NewSmallOrderedNameDictionary(
+ int capacity, PretenureFlag pretenure) {
+ return AllocateSmallOrderedHashTable<SmallOrderedNameDictionary>(
+ small_ordered_name_dictionary_map(), capacity, pretenure);
}
Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
@@ -558,6 +575,11 @@ Handle<OrderedHashMap> Factory::NewOrderedHashMap() {
return OrderedHashMap::Allocate(isolate(), OrderedHashMap::kMinCapacity);
}
+Handle<OrderedNameDictionary> Factory::NewOrderedNameDictionary() {
+ return OrderedNameDictionary::Allocate(isolate(),
+ OrderedNameDictionary::kMinCapacity);
+}
+
Handle<AccessorPair> Factory::NewAccessorPair() {
Handle<AccessorPair> accessors =
Handle<AccessorPair>::cast(NewStruct(ACCESSOR_PAIR_TYPE, TENURED));
@@ -595,6 +617,7 @@ Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
PretenureFlag pretenure) {
+ DCHECK_NE(pretenure, TENURED_READ_ONLY);
int length = string.length();
if (length == 0) return empty_string();
if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
@@ -612,6 +635,7 @@ MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
PretenureFlag pretenure) {
+ DCHECK_NE(pretenure, TENURED_READ_ONLY);
// Check for ASCII first since this is the common case.
const char* ascii_data = string.start();
int length = string.length();
@@ -638,6 +662,7 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
NewRawTwoByteString(non_ascii_start + utf16_length, pretenure), String);
// Copy ASCII portion.
+ DisallowHeapAllocation no_gc;
uint16_t* data = result->GetChars();
for (int i = 0; i < non_ascii_start; i++) {
*data++ = *ascii_data++;
@@ -651,23 +676,31 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
MaybeHandle<String> Factory::NewStringFromUtf8SubString(
Handle<SeqOneByteString> str, int begin, int length,
PretenureFlag pretenure) {
- const char* ascii_data =
- reinterpret_cast<const char*>(str->GetChars() + begin);
- int non_ascii_start = String::NonAsciiStart(ascii_data, length);
+ Access<UnicodeCache::Utf8Decoder> decoder(
+ isolate()->unicode_cache()->utf8_decoder());
+ int non_ascii_start;
+ int utf16_length = 0;
+ {
+ DisallowHeapAllocation no_gc;
+ const char* ascii_data =
+ reinterpret_cast<const char*>(str->GetChars() + begin);
+ non_ascii_start = String::NonAsciiStart(ascii_data, length);
+ if (non_ascii_start < length) {
+ // Non-ASCII and we need to decode.
+ auto non_ascii = Vector<const char>(ascii_data + non_ascii_start,
+ length - non_ascii_start);
+ decoder->Reset(non_ascii);
+
+ utf16_length = static_cast<int>(decoder->Utf16Length());
+ }
+ }
+
if (non_ascii_start >= length) {
// If the string is ASCII, we can just make a substring.
// TODO(v8): the pretenure flag is ignored in this case.
return NewSubString(str, begin, begin + length);
}
- // Non-ASCII and we need to decode.
- auto non_ascii = Vector<const char>(ascii_data + non_ascii_start,
- length - non_ascii_start);
- Access<UnicodeCache::Utf8Decoder> decoder(
- isolate()->unicode_cache()->utf8_decoder());
- decoder->Reset(non_ascii);
-
- int utf16_length = static_cast<int>(decoder->Utf16Length());
DCHECK_GT(utf16_length, 0);
// Allocate string.
@@ -678,9 +711,11 @@ MaybeHandle<String> Factory::NewStringFromUtf8SubString(
// Update pointer references, since the original string may have moved after
// allocation.
- ascii_data = reinterpret_cast<const char*>(str->GetChars() + begin);
- non_ascii = Vector<const char>(ascii_data + non_ascii_start,
- length - non_ascii_start);
+ DisallowHeapAllocation no_gc;
+ const char* ascii_data =
+ reinterpret_cast<const char*>(str->GetChars() + begin);
+ auto non_ascii = Vector<const char>(ascii_data + non_ascii_start,
+ length - non_ascii_start);
// Copy ASCII portion.
uint16_t* data = result->GetChars();
@@ -696,18 +731,21 @@ MaybeHandle<String> Factory::NewStringFromUtf8SubString(
MaybeHandle<String> Factory::NewStringFromTwoByte(const uc16* string,
int length,
PretenureFlag pretenure) {
+ DCHECK_NE(pretenure, TENURED_READ_ONLY);
if (length == 0) return empty_string();
if (String::IsOneByte(string, length)) {
if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
Handle<SeqOneByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
NewRawOneByteString(length, pretenure), String);
+ DisallowHeapAllocation no_gc;
CopyChars(result->GetChars(), string, length);
return result;
} else {
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
NewRawTwoByteString(length, pretenure), String);
+ DisallowHeapAllocation no_gc;
CopyChars(result->GetChars(), string, length);
return result;
}
@@ -773,11 +811,10 @@ Handle<SeqOneByteString> Factory::AllocateRawOneByteInternalizedString(
int length, uint32_t hash_field) {
CHECK_GE(String::kMaxLength, length);
// The canonical empty_string is the only zero-length string we allow.
- DCHECK_IMPLIES(
- length == 0,
- isolate()->heap()->roots_[RootIndex::kempty_string] == nullptr);
+ DCHECK_IMPLIES(length == 0,
+ isolate()->roots_table()[RootIndex::kempty_string] == nullptr);
- Map* map = *one_byte_internalized_string_map();
+ Map map = *one_byte_internalized_string_map();
int size = SeqOneByteString::SizeFor(length);
HeapObject* result = AllocateRawWithImmortalMap(
size,
@@ -796,13 +833,14 @@ Handle<String> Factory::AllocateTwoByteInternalizedString(
CHECK_GE(String::kMaxLength, str.length());
DCHECK_NE(0, str.length()); // Use Heap::empty_string() instead.
- Map* map = *internalized_string_map();
+ Map map = *internalized_string_map();
int size = SeqTwoByteString::SizeFor(str.length());
HeapObject* result = AllocateRawWithImmortalMap(size, TENURED, map);
Handle<SeqTwoByteString> answer(SeqTwoByteString::cast(result), isolate());
answer->set_length(str.length());
answer->set_hash_field(hash_field);
DCHECK_EQ(size, answer->Size());
+ DisallowHeapAllocation no_gc;
// Fill in the characters.
MemCopy(answer->GetChars(), str.start(), str.length() * kUC16Size);
@@ -818,7 +856,7 @@ Handle<String> Factory::AllocateInternalizedStringImpl(T t, int chars,
// Compute map and object size.
int size;
- Map* map;
+ Map map;
if (is_one_byte) {
map = *one_byte_internalized_string_map();
size = SeqOneByteString::SizeFor(chars);
@@ -836,6 +874,7 @@ Handle<String> Factory::AllocateInternalizedStringImpl(T t, int chars,
answer->set_length(chars);
answer->set_hash_field(hash_field);
DCHECK_EQ(size, answer->Size());
+ DisallowHeapAllocation no_gc;
if (is_one_byte) {
WriteOneByteData(t, SeqOneByteString::cast(*answer)->GetChars(), chars);
@@ -851,6 +890,7 @@ Handle<String> Factory::NewInternalizedStringFromUtf8(Vector<const char> str,
if (IsOneByte(str, chars)) {
Handle<SeqOneByteString> result =
AllocateRawOneByteInternalizedString(str.length(), hash_field);
+ DisallowHeapAllocation no_allocation;
MemCopy(result->GetChars(), str.start(), str.length());
return result;
}
@@ -861,6 +901,7 @@ Handle<String> Factory::NewOneByteInternalizedString(Vector<const uint8_t> str,
uint32_t hash_field) {
Handle<SeqOneByteString> result =
AllocateRawOneByteInternalizedString(str.length(), hash_field);
+ DisallowHeapAllocation no_allocation;
MemCopy(result->GetChars(), str.start(), str.length());
return result;
}
@@ -870,6 +911,7 @@ Handle<String> Factory::NewOneByteInternalizedSubString(
uint32_t hash_field) {
Handle<SeqOneByteString> result =
AllocateRawOneByteInternalizedString(length, hash_field);
+ DisallowHeapAllocation no_allocation;
MemCopy(result->GetChars(), string->GetChars() + offset, length);
return result;
}
@@ -1027,6 +1069,7 @@ static inline Handle<String> MakeOrFindTwoCharacterString(Isolate* isolate,
1)); // because of this.
Handle<SeqOneByteString> str =
isolate->factory()->NewRawOneByteString(2).ToHandleChecked();
+ DisallowHeapAllocation no_allocation;
uint8_t* dest = str->GetChars();
dest[0] = static_cast<uint8_t>(c1);
dest[1] = static_cast<uint8_t>(c2);
@@ -1034,6 +1077,7 @@ static inline Handle<String> MakeOrFindTwoCharacterString(Isolate* isolate,
} else {
Handle<SeqTwoByteString> str =
isolate->factory()->NewRawTwoByteString(2).ToHandleChecked();
+ DisallowHeapAllocation no_allocation;
uc16* dest = str->GetChars();
dest[0] = c1;
dest[1] = c2;
@@ -1163,6 +1207,7 @@ Handle<String> Factory::NewSurrogatePairString(uint16_t lead, uint16_t trail) {
Handle<SeqTwoByteString> str =
isolate()->factory()->NewRawTwoByteString(2).ToHandleChecked();
+ DisallowHeapAllocation no_allocation;
uc16* dest = str->GetChars();
dest[0] = lead;
dest[1] = trail;
@@ -1196,15 +1241,15 @@ Handle<String> Factory::NewProperSubString(Handle<String> str, int begin,
if (str->IsOneByteRepresentation()) {
Handle<SeqOneByteString> result =
NewRawOneByteString(length).ToHandleChecked();
- uint8_t* dest = result->GetChars();
DisallowHeapAllocation no_gc;
+ uint8_t* dest = result->GetChars();
String::WriteToFlat(*str, dest, begin, end);
return result;
} else {
Handle<SeqTwoByteString> result =
NewRawTwoByteString(length).ToHandleChecked();
- uc16* dest = result->GetChars();
DisallowHeapAllocation no_gc;
+ uc16* dest = result->GetChars();
String::WriteToFlat(*str, dest, begin, end);
return result;
}
@@ -1347,27 +1392,57 @@ Handle<Symbol> Factory::NewPrivateSymbol(PretenureFlag flag) {
return symbol;
}
-Handle<Symbol> Factory::NewPrivateFieldSymbol() {
+Handle<Symbol> Factory::NewPrivateNameSymbol() {
Handle<Symbol> symbol = NewSymbol();
- symbol->set_is_private_field();
+ symbol->set_is_private_name();
return symbol;
}
+Handle<Context> Factory::NewContext(RootIndex map_root_index, int size,
+ int variadic_part_length,
+ PretenureFlag pretenure) {
+ DCHECK(RootsTable::IsImmortalImmovable(map_root_index));
+ DCHECK_LE(Context::kTodoHeaderSize, size);
+ DCHECK(IsAligned(size, kTaggedSize));
+ DCHECK_LE(Context::MIN_CONTEXT_SLOTS, variadic_part_length);
+ DCHECK_LE(Context::SizeFor(variadic_part_length), size);
+
+ Map map = Map::cast(isolate()->root(map_root_index));
+ HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
+ Handle<Context> context(Context::cast(result), isolate());
+ context->set_length(variadic_part_length);
+ DCHECK_EQ(context->SizeFromMap(map), size);
+ if (size > Context::kTodoHeaderSize) {
+ ObjectSlot start = context->RawField(Context::kTodoHeaderSize);
+ ObjectSlot end = context->RawField(size);
+ size_t slot_count = end - start;
+ MemsetPointer(start, *undefined_value(), slot_count);
+ }
+ return context;
+}
+
Handle<NativeContext> Factory::NewNativeContext() {
- Handle<NativeContext> context = NewFixedArrayWithMap<NativeContext>(
- RootIndex::kNativeContextMap, Context::NATIVE_CONTEXT_SLOTS, TENURED);
+ Handle<NativeContext> context = Handle<NativeContext>::cast(
+ NewContext(RootIndex::kNativeContextMap, NativeContext::kSize,
+ NativeContext::NATIVE_CONTEXT_SLOTS, TENURED));
+ context->set_scope_info(ReadOnlyRoots(isolate()).empty_scope_info());
+ context->set_previous(Context::unchecked_cast(Smi::zero()));
+ context->set_extension(*the_hole_value());
context->set_native_context(*context);
- context->set_errors_thrown(Smi::kZero);
- context->set_math_random_index(Smi::kZero);
+ context->set_errors_thrown(Smi::zero());
+ context->set_math_random_index(Smi::zero());
context->set_serialized_objects(*empty_fixed_array());
+ context->set_microtask_queue(nullptr);
return context;
}
Handle<Context> Factory::NewScriptContext(Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), SCRIPT_SCOPE);
- Handle<Context> context = NewFixedArrayWithMap<Context>(
- RootIndex::kScriptContextMap, scope_info->ContextLength(), TENURED);
+ int variadic_part_length = scope_info->ContextLength();
+ Handle<Context> context = NewContext(RootIndex::kScriptContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*outer);
context->set_extension(*the_hole_value());
@@ -1388,8 +1463,10 @@ Handle<Context> Factory::NewModuleContext(Handle<Module> module,
Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
- Handle<Context> context = NewFixedArrayWithMap<Context>(
- RootIndex::kModuleContextMap, scope_info->ContextLength(), TENURED);
+ int variadic_part_length = scope_info->ContextLength();
+ Handle<Context> context = NewContext(RootIndex::kModuleContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*outer);
context->set_extension(*module);
@@ -1400,8 +1477,6 @@ Handle<Context> Factory::NewModuleContext(Handle<Module> module,
Handle<Context> Factory::NewFunctionContext(Handle<Context> outer,
Handle<ScopeInfo> scope_info) {
- int length = scope_info->ContextLength();
- DCHECK_LE(Context::MIN_CONTEXT_SLOTS, length);
RootIndex mapRootIndex;
switch (scope_info->scope_type()) {
case EVAL_SCOPE:
@@ -1413,7 +1488,10 @@ Handle<Context> Factory::NewFunctionContext(Handle<Context> outer,
default:
UNREACHABLE();
}
- Handle<Context> context = NewFixedArrayWithMap<Context>(mapRootIndex, length);
+ int variadic_part_length = scope_info->ContextLength();
+ Handle<Context> context =
+ NewContext(mapRootIndex, Context::SizeFor(variadic_part_length),
+ variadic_part_length, NOT_TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*outer);
context->set_extension(*the_hole_value());
@@ -1424,9 +1502,13 @@ Handle<Context> Factory::NewFunctionContext(Handle<Context> outer,
Handle<Context> Factory::NewCatchContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info,
Handle<Object> thrown_object) {
+ DCHECK_EQ(scope_info->scope_type(), CATCH_SCOPE);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
- Handle<Context> context = NewFixedArrayWithMap<Context>(
- RootIndex::kCatchContextMap, Context::MIN_CONTEXT_SLOTS + 1);
+ // TODO(ishell): Take the details from CatchContext class.
+ int variadic_part_length = Context::MIN_CONTEXT_SLOTS + 1;
+ Handle<Context> context = NewContext(RootIndex::kCatchContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, NOT_TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*previous);
context->set_extension(*the_hole_value());
@@ -1445,8 +1527,11 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
Handle<HeapObject> ext = extension.is_null()
? Handle<HeapObject>::cast(the_hole_value())
: Handle<HeapObject>::cast(extension);
- Handle<Context> c = NewFixedArrayWithMap<Context>(
- RootIndex::kDebugEvaluateContextMap, Context::MIN_CONTEXT_SLOTS + 2);
+ // TODO(ishell): Take the details from DebugEvaluateContextContext class.
+ int variadic_part_length = Context::MIN_CONTEXT_SLOTS + 2;
+ Handle<Context> c = NewContext(RootIndex::kDebugEvaluateContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, NOT_TENURED);
c->set_scope_info(*scope_info);
c->set_previous(*previous);
c->set_native_context(previous->native_context());
@@ -1459,8 +1544,12 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
Handle<Context> Factory::NewWithContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info,
Handle<JSReceiver> extension) {
- Handle<Context> context = NewFixedArrayWithMap<Context>(
- RootIndex::kWithContextMap, Context::MIN_CONTEXT_SLOTS);
+ DCHECK_EQ(scope_info->scope_type(), WITH_SCOPE);
+ // TODO(ishell): Take the details from WithContext class.
+ int variadic_part_length = Context::MIN_CONTEXT_SLOTS;
+ Handle<Context> context = NewContext(RootIndex::kWithContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, NOT_TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*previous);
context->set_extension(*extension);
@@ -1471,8 +1560,10 @@ Handle<Context> Factory::NewWithContext(Handle<Context> previous,
Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), BLOCK_SCOPE);
- Handle<Context> context = NewFixedArrayWithMap<Context>(
- RootIndex::kBlockContextMap, scope_info->ContextLength());
+ int variadic_part_length = scope_info->ContextLength();
+ Handle<Context> context = NewContext(RootIndex::kBlockContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, NOT_TENURED);
context->set_scope_info(*scope_info);
context->set_previous(*previous);
context->set_extension(*the_hole_value());
@@ -1481,18 +1572,20 @@ Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
}
Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
- int length) {
- DCHECK_GE(length, Context::MIN_CONTEXT_SLOTS);
- Handle<Context> context =
- NewFixedArrayWithMap<Context>(RootIndex::kFunctionContextMap, length);
+ int variadic_part_length) {
+ DCHECK_LE(Context::MIN_CONTEXT_SLOTS, variadic_part_length);
+ Handle<Context> context = NewContext(RootIndex::kFunctionContextMap,
+ Context::SizeFor(variadic_part_length),
+ variadic_part_length, NOT_TENURED);
context->set_scope_info(ReadOnlyRoots(isolate()).empty_scope_info());
+ context->set_previous(*native_context);
context->set_extension(*the_hole_value());
context->set_native_context(*native_context);
return context;
}
Handle<Struct> Factory::NewStruct(InstanceType type, PretenureFlag pretenure) {
- Map* map;
+ Map map;
switch (type) {
#define MAKE_CASE(TYPE, Name, name) \
case TYPE: \
@@ -1622,20 +1715,19 @@ Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
return microtask;
}
-Handle<MicrotaskQueue> Factory::NewMicrotaskQueue() {
- // MicrotaskQueue should be TENURED, as it outlives Context, and is mostly
- // as long-living as Context is.
- Handle<MicrotaskQueue> microtask_queue =
- Handle<MicrotaskQueue>::cast(NewStruct(MICROTASK_QUEUE_TYPE, TENURED));
- microtask_queue->set_queue(*empty_fixed_array());
- microtask_queue->set_pending_microtask_count(0);
- return microtask_queue;
+Handle<WeakFactoryCleanupJobTask> Factory::NewWeakFactoryCleanupJobTask(
+ Handle<JSWeakFactory> weak_factory) {
+ Handle<WeakFactoryCleanupJobTask> microtask =
+ Handle<WeakFactoryCleanupJobTask>::cast(
+ NewStruct(WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE));
+ microtask->set_factory(*weak_factory);
+ return microtask;
}
Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
- Map* map = *foreign_map();
+ Map map = *foreign_map();
HeapObject* result =
AllocateRawWithImmortalMap(map->instance_size(), pretenure, map);
Handle<Foreign> foreign(Foreign::cast(result), isolate());
@@ -1714,7 +1806,7 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
CHECK(byte_length <= kMaxInt - FixedTypedArrayBase::kDataOffset);
size_t size =
OBJECT_POINTER_ALIGN(byte_length + FixedTypedArrayBase::kDataOffset);
- Map* map = ReadOnlyRoots(isolate()).MapForFixedTypedArray(array_type);
+ Map map = ReadOnlyRoots(isolate()).MapForFixedTypedArray(array_type);
AllocationAlignment alignment =
array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned;
HeapObject* object = AllocateRawWithImmortalMap(static_cast<int>(size),
@@ -1769,6 +1861,17 @@ Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
return cell;
}
+Handle<FeedbackCell> Factory::NewNoFeedbackCell() {
+ AllowDeferredHandleDereference convert_to_cell;
+ HeapObject* result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
+ *no_feedback_cell_map());
+ Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
+ // Set the value to undefined. We wouldn't allocate feedback vectors with
+ // NoFeedbackCell map type.
+ cell->set_value(*undefined_value());
+ return cell;
+}
+
Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
PretenureFlag pretenure) {
DCHECK(name->IsUniqueName());
@@ -1778,12 +1881,28 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
Handle<PropertyCell> cell(PropertyCell::cast(result), isolate());
cell->set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
SKIP_WRITE_BARRIER);
- cell->set_property_details(PropertyDetails(Smi::kZero));
+ cell->set_property_details(PropertyDetails(Smi::zero()));
cell->set_name(*name);
cell->set_value(*the_hole_value());
return cell;
}
+Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
+ int slack) {
+ int number_of_all_descriptors = number_of_descriptors + slack;
+ // Zero-length case must be handled outside.
+ DCHECK_LT(0, number_of_all_descriptors);
+ int size = DescriptorArray::SizeFor(number_of_all_descriptors);
+ DCHECK_LT(size, kMaxRegularHeapObjectSize);
+ HeapObject* obj =
+ isolate()->heap()->AllocateRawWithRetryOrFail(size, OLD_SPACE);
+ obj->set_map_after_allocation(*descriptor_array_map(), SKIP_WRITE_BARRIER);
+ DescriptorArray* array = DescriptorArray::cast(obj);
+ array->Initialize(*empty_enum_cache(), *undefined_value(),
+ number_of_descriptors, slack);
+ return Handle<DescriptorArray>(array, isolate());
+}
+
Handle<TransitionArray> Factory::NewTransitionArray(int number_of_transitions,
int slack) {
int capacity = TransitionArray::LengthFor(number_of_transitions + slack);
@@ -1834,9 +1953,9 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
isolate());
}
-Map* Factory::InitializeMap(Map* map, InstanceType type, int instance_size,
- ElementsKind elements_kind,
- int inobject_properties) {
+Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
+ ElementsKind elements_kind,
+ int inobject_properties) {
map->set_instance_type(type);
map->set_prototype(*null_value(), SKIP_WRITE_BARRIER);
map->set_constructor_or_backpointer(*null_value(), SKIP_WRITE_BARRIER);
@@ -1854,7 +1973,7 @@ Map* Factory::InitializeMap(Map* map, InstanceType type, int instance_size,
}
map->set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
SKIP_WRITE_BARRIER);
- map->set_raw_transitions(MaybeObject::FromSmi(Smi::kZero));
+ map->set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
map->SetInObjectUnusedPropertyFields(inobject_properties);
map->set_instance_descriptors(*empty_descriptor_array());
if (FLAG_unbox_double_fields) {
@@ -1919,10 +2038,10 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
}
SLOW_DCHECK(clone->GetElementsKind() == source->GetElementsKind());
- FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
+ FixedArrayBase elements = source->elements();
// Update elements if necessary.
if (elements->length() > 0) {
- FixedArrayBase* elem = nullptr;
+ FixedArrayBase elem;
if (elements->map() == *fixed_cow_array_map()) {
elem = elements;
} else if (source->HasDoubleElements()) {
@@ -1936,7 +2055,7 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
// Update properties if necessary.
if (source->HasFastProperties()) {
- PropertyArray* properties = source->property_array();
+ PropertyArray properties = source->property_array();
if (properties->length() > 0) {
// TODO(gsathya): Do not copy hash code.
Handle<PropertyArray> prop = CopyArrayWithMap(
@@ -1954,12 +2073,12 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
namespace {
template <typename T>
-void initialize_length(T* array, int length) {
+void initialize_length(Handle<T> array, int length) {
array->set_length(length);
}
template <>
-void initialize_length<PropertyArray>(PropertyArray* array, int length) {
+void initialize_length<PropertyArray>(Handle<PropertyArray> array, int length) {
array->initialize_length(length);
}
@@ -1971,7 +2090,7 @@ Handle<T> Factory::CopyArrayWithMap(Handle<T> src, Handle<Map> map) {
HeapObject* obj = AllocateRawFixedArray(len, NOT_TENURED);
obj->set_map_after_allocation(*map, SKIP_WRITE_BARRIER);
- T* result = T::cast(obj);
+ Handle<T> result(T::cast(obj), isolate());
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
@@ -1985,7 +2104,7 @@ Handle<T> Factory::CopyArrayWithMap(Handle<T> src, Handle<Map> map) {
initialize_length(result, len);
for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
}
- return Handle<T>(result, isolate());
+ return result;
}
template <typename T>
@@ -1998,7 +2117,7 @@ Handle<T> Factory::CopyArrayAndGrow(Handle<T> src, int grow_by,
HeapObject* obj = AllocateRawFixedArray(new_len, pretenure);
obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
- T* result = T::cast(obj);
+ Handle<T> result(T::cast(obj), isolate());
initialize_length(result, new_len);
// Copy the content.
@@ -2006,7 +2125,7 @@ Handle<T> Factory::CopyArrayAndGrow(Handle<T> src, int grow_by,
WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
MemsetPointer(result->data_start() + old_len, *undefined_value(), grow_by);
- return Handle<T>(result, isolate());
+ return result;
}
Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array,
@@ -2038,9 +2157,8 @@ Handle<WeakFixedArray> Factory::CopyWeakFixedArrayAndGrow(
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
for (int i = 0; i < old_len; i++) result->Set(i, src->Get(i), mode);
- HeapObjectReference* undefined_reference =
- HeapObjectReference::Strong(ReadOnlyRoots(isolate()).undefined_value());
- MemsetPointer(result->data_start() + old_len, undefined_reference, grow_by);
+ MemsetPointer(ObjectSlot(result->RawFieldOfElementAt(old_len)),
+ ReadOnlyRoots(isolate()).undefined_value(), grow_by);
return Handle<WeakFixedArray>(result, isolate());
}
@@ -2060,10 +2178,8 @@ Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow(
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
for (int i = 0; i < old_capacity; i++) result->Set(i, src->Get(i), mode);
- HeapObjectReference* undefined_reference =
- HeapObjectReference::Strong(ReadOnlyRoots(isolate()).undefined_value());
- MemsetPointer(result->data_start() + old_capacity, undefined_reference,
- grow_by);
+ MemsetPointer(ObjectSlot(result->data_start() + old_capacity),
+ ReadOnlyRoots(isolate()).undefined_value(), grow_by);
return Handle<WeakArrayList>(result, isolate());
}
@@ -2176,7 +2292,7 @@ Handle<Object> Factory::NewNumberFromUint(uint32_t value,
Handle<HeapNumber> Factory::NewHeapNumber(PretenureFlag pretenure) {
STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
- Map* map = *heap_number_map();
+ Map map = *heap_number_map();
HeapObject* result = AllocateRawWithImmortalMap(HeapNumber::kSize, pretenure,
map, kDoubleUnaligned);
return handle(HeapNumber::cast(result), isolate());
@@ -2185,7 +2301,7 @@ Handle<HeapNumber> Factory::NewHeapNumber(PretenureFlag pretenure) {
Handle<MutableHeapNumber> Factory::NewMutableHeapNumber(
PretenureFlag pretenure) {
STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
- Map* map = *mutable_heap_number_map();
+ Map map = *mutable_heap_number_map();
HeapObject* result = AllocateRawWithImmortalMap(
MutableHeapNumber::kSize, pretenure, map, kDoubleUnaligned);
return handle(MutableHeapNumber::cast(result), isolate());
@@ -2202,14 +2318,14 @@ Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
}
Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
- MessageTemplate::Template template_index,
+ MessageTemplate template_index,
Handle<Object> arg0, Handle<Object> arg1,
Handle<Object> arg2) {
HandleScope scope(isolate());
if (isolate()->bootstrapper()->IsActive()) {
// During bootstrapping we cannot construct error objects.
return scope.CloseAndEscape(NewStringFromAsciiChecked(
- MessageTemplate::TemplateString(template_index)));
+ MessageFormatter::TemplateString(template_index)));
}
if (arg0.is_null()) arg0 = undefined_value();
@@ -2260,7 +2376,7 @@ Handle<Object> Factory::NewInvalidStringLengthError() {
}
#define DEFINE_ERROR(NAME, name) \
- Handle<Object> Factory::New##NAME(MessageTemplate::Template template_index, \
+ Handle<Object> Factory::New##NAME(MessageTemplate template_index, \
Handle<Object> arg0, Handle<Object> arg1, \
Handle<Object> arg2) { \
return NewError(isolate()->name##_function(), template_index, arg0, arg1, \
@@ -2471,7 +2587,8 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
} else if (feedback_cell->map() == *one_closure_cell_map()) {
feedback_cell->set_map(*many_closures_cell_map());
} else {
- DCHECK_EQ(feedback_cell->map(), *many_closures_cell_map());
+ DCHECK(feedback_cell->map() == *no_feedback_cell_map() ||
+ feedback_cell->map() == *many_closures_cell_map());
}
// Check that the optimized code in the feedback cell wasn't marked for
@@ -2574,7 +2691,9 @@ MaybeHandle<Code> Factory::TryNewCode(
uint32_t stub_key, bool is_turbofanned, int stack_slots,
int safepoint_table_offset, int handler_table_offset) {
// Allocate objects needed for code initialization.
- Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
+ Handle<ByteArray> reloc_info = NewByteArray(
+ desc.reloc_size,
+ Builtins::IsBuiltinId(builtin_index) ? TENURED_READ_ONLY : TENURED);
Handle<CodeDataContainer> data_container = NewCodeDataContainer(0);
Handle<ByteArray> source_position_table =
maybe_source_position_table.is_null()
@@ -2610,9 +2729,14 @@ MaybeHandle<Code> Factory::TryNewCode(
source_position_table, deopt_data, reloc_info,
data_container, stub_key, is_turbofanned, stack_slots,
safepoint_table_offset, handler_table_offset);
+
+ // Flush the instruction cache before changing the permissions.
+ // Note: we do this before setting permissions to ReadExecute because on
+ // some older ARM kernels there is a bug which causes an access error on
+ // cache flush instructions to trigger access error on non-writable memory.
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=8157
+ code->FlushICache();
}
- // Flush the instruction cache after changing the permissions.
- code->FlushICache();
return code;
}
@@ -2624,7 +2748,9 @@ Handle<Code> Factory::NewCode(
uint32_t stub_key, bool is_turbofanned, int stack_slots,
int safepoint_table_offset, int handler_table_offset) {
// Allocate objects needed for code initialization.
- Handle<ByteArray> reloc_info = NewByteArray(desc.reloc_size, TENURED);
+ Handle<ByteArray> reloc_info = NewByteArray(
+ desc.reloc_size,
+ Builtins::IsBuiltinId(builtin_index) ? TENURED_READ_ONLY : TENURED);
Handle<CodeDataContainer> data_container = NewCodeDataContainer(0);
Handle<ByteArray> source_position_table =
maybe_source_position_table.is_null()
@@ -2642,7 +2768,6 @@ Handle<Code> Factory::NewCode(
CodePageCollectionMemoryModificationScope code_allocation(heap);
HeapObject* result =
heap->AllocateRawWithRetryOrFail(object_size, CODE_SPACE);
-
if (movability == kImmovable) {
result = heap->EnsureImmovableCode(result, object_size);
}
@@ -2658,32 +2783,20 @@ Handle<Code> Factory::NewCode(
source_position_table, deopt_data, reloc_info,
data_container, stub_key, is_turbofanned, stack_slots,
safepoint_table_offset, handler_table_offset);
+
+ // Flush the instruction cache before changing the permissions.
+ // Note: we do this before setting permissions to ReadExecute because on
+ // some older ARM kernels there is a bug which causes an access error on
+ // cache flush instructions to trigger access error on non-writable memory.
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=8157
+ code->FlushICache();
}
- // Flush the instruction cache after changing the permissions.
- code->FlushICache();
return code;
}
-Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) {
- DCHECK(IsAligned(static_cast<intptr_t>(size), kCodeAlignment));
- Heap* heap = isolate()->heap();
- HeapObject* result = heap->AllocateRawWithRetryOrFail(size, CODE_SPACE);
- // Unprotect the memory chunk of the object if it was not unprotected
- // already.
- heap->UnprotectAndRegisterMemoryChunk(result);
- heap->ZapCodeObject(result->address(), size);
- result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
- DCHECK(IsAligned(result->address(), kCodeAlignment));
- DCHECK_IMPLIES(
- !heap->memory_allocator()->code_range().is_empty(),
- heap->memory_allocator()->code_range().contains(result->address()));
- return handle(Code::cast(result), isolate());
-}
-
Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Address off_heap_entry) {
- CHECK(isolate()->serializer_enabled());
CHECK_NOT_NULL(isolate()->embedded_blob());
CHECK_NE(0, isolate()->embedded_blob_size());
CHECK(Builtins::IsIsolateIndependentBuiltin(*code));
@@ -2694,18 +2807,39 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
// The trampoline code object must inherit specific flags from the original
// builtin (e.g. the safepoint-table offset). We set them manually here.
- const bool set_is_off_heap_trampoline = true;
- const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
- result->initialize_flags(code->kind(), code->has_unwinding_info(),
- code->is_turbofanned(), stack_slots,
- set_is_off_heap_trampoline);
- result->set_builtin_index(code->builtin_index());
- result->set_handler_table_offset(code->handler_table_offset());
- result->code_data_container()->set_kind_specific_flags(
- code->code_data_container()->kind_specific_flags());
- result->set_constant_pool_offset(code->constant_pool_offset());
- if (code->has_safepoint_info()) {
- result->set_safepoint_table_offset(code->safepoint_table_offset());
+ {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(result->ptr());
+ CodePageMemoryModificationScope code_allocation(chunk);
+
+ const bool set_is_off_heap_trampoline = true;
+ const int stack_slots =
+ code->has_safepoint_info() ? code->stack_slots() : 0;
+ result->initialize_flags(code->kind(), code->has_unwinding_info(),
+ code->is_turbofanned(), stack_slots,
+ set_is_off_heap_trampoline);
+ result->set_builtin_index(code->builtin_index());
+ result->set_handler_table_offset(code->handler_table_offset());
+ result->code_data_container()->set_kind_specific_flags(
+ code->code_data_container()->kind_specific_flags());
+ result->set_constant_pool_offset(code->constant_pool_offset());
+ if (code->has_safepoint_info()) {
+ result->set_safepoint_table_offset(code->safepoint_table_offset());
+ }
+
+ // Replace the newly generated trampoline's RelocInfo ByteArray with the
+ // canonical one stored in the roots to avoid duplicating it for every
+ // single builtin.
+ ByteArray canonical_reloc_info =
+ ReadOnlyRoots(isolate()).off_heap_trampoline_relocation_info();
+#ifdef DEBUG
+ // Verify that the contents are the same.
+ ByteArray reloc_info = result->relocation_info();
+ DCHECK_EQ(reloc_info->length(), canonical_reloc_info->length());
+ for (int i = 0; i < reloc_info->length(); ++i) {
+ DCHECK_EQ(reloc_info->get(i), canonical_reloc_info->get(i));
+ }
+#endif
+ result->set_relocation_info(canonical_reloc_info);
}
return result;
@@ -2716,24 +2850,28 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
NewCodeDataContainer(code->code_data_container()->kind_specific_flags());
Heap* heap = isolate()->heap();
- int obj_size = code->Size();
- HeapObject* result = heap->AllocateRawWithRetryOrFail(obj_size, CODE_SPACE);
-
- // Copy code object.
- Address old_addr = code->address();
- Address new_addr = result->address();
- Heap::CopyBlock(new_addr, old_addr, obj_size);
- Handle<Code> new_code(Code::cast(result), isolate());
-
- // Set the {CodeDataContainer}, it cannot be shared.
- new_code->set_code_data_container(*data_container);
-
- new_code->Relocate(new_addr - old_addr);
- // We have to iterate over the object and process its pointers when black
- // allocation is on.
- heap->incremental_marking()->ProcessBlackAllocatedObject(*new_code);
- // Record all references to embedded objects in the new code object.
- WriteBarrierForCode(*new_code);
+ Handle<Code> new_code;
+ {
+ int obj_size = code->Size();
+ CodePageCollectionMemoryModificationScope code_allocation(heap);
+ HeapObject* result = heap->AllocateRawWithRetryOrFail(obj_size, CODE_SPACE);
+
+ // Copy code object.
+ Address old_addr = code->address();
+ Address new_addr = result->address();
+ Heap::CopyBlock(new_addr, old_addr, obj_size);
+ new_code = handle(Code::cast(result), isolate());
+
+ // Set the {CodeDataContainer}, it cannot be shared.
+ new_code->set_code_data_container(*data_container);
+
+ new_code->Relocate(new_addr - old_addr);
+ // We have to iterate over the object and process its pointers when black
+ // allocation is on.
+ heap->incremental_marking()->ProcessBlackAllocatedObject(*new_code);
+ // Record all references to embedded objects in the new code object.
+ WriteBarrierForCode(*new_code);
+ }
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) new_code->ObjectVerify(isolate());
@@ -2835,6 +2973,7 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
Handle<Map> new_map = Map::CopyDropDescriptors(isolate(), map);
new_map->set_may_have_interesting_symbols(true);
new_map->set_is_dictionary_map(true);
+ LOG(isolate(), MapDetails(*new_map));
// Set up the global object as a normalized object.
global->set_global_dictionary(*dictionary);
@@ -2920,11 +3059,31 @@ Handle<JSObject> Factory::NewSlowJSObjectFromMap(Handle<Map> map, int capacity,
return js_object;
}
+Handle<JSObject> Factory::NewSlowJSObjectWithPropertiesAndElements(
+ Handle<Object> prototype, Handle<NameDictionary> properties,
+ Handle<FixedArrayBase> elements, PretenureFlag pretenure) {
+ Handle<Map> object_map = isolate()->slow_object_with_object_prototype_map();
+ if (object_map->prototype() != *prototype) {
+ object_map = Map::TransitionToPrototype(isolate(), object_map, prototype);
+ }
+ DCHECK(object_map->is_dictionary_map());
+ Handle<JSObject> object = NewJSObjectFromMap(object_map, pretenure);
+ object->set_raw_properties_or_hash(*properties);
+ if (*elements != ReadOnlyRoots(isolate()).empty_fixed_array()) {
+ DCHECK(elements->IsNumberDictionary());
+ object_map =
+ JSObject::GetElementsTransitionMap(object, DICTIONARY_ELEMENTS);
+ JSObject::MigrateToMap(object, object_map);
+ object->set_elements(*elements);
+ }
+ return object;
+}
+
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
PretenureFlag pretenure) {
- NativeContext* native_context = isolate()->raw_native_context();
- Map* map = native_context->GetInitialJSArrayMap(elements_kind);
- if (map == nullptr) {
+ NativeContext native_context = isolate()->raw_native_context();
+ Map map = native_context->GetInitialJSArrayMap(elements_kind);
+ if (map.is_null()) {
JSFunction* array_function = native_context->array_function();
map = array_function->initial_map();
}
@@ -2989,7 +3148,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
}
Handle<JSWeakMap> Factory::NewJSWeakMap() {
- NativeContext* native_context = isolate()->raw_native_context();
+ NativeContext native_context = isolate()->raw_native_context();
Handle<Map> map(native_context->js_weak_map_fun()->initial_map(), isolate());
Handle<JSWeakMap> weakmap(JSWeakMap::cast(*NewJSObjectFromMap(map)),
isolate());
@@ -3140,7 +3299,7 @@ static void ForFixedTypedArray(ExternalArrayType array_type,
}
JSFunction* GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
- NativeContext* native_context = isolate->context()->native_context();
+ NativeContext native_context = isolate->context()->native_context();
switch (type) {
#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype) \
case kExternal##Type##Array: \
@@ -3153,7 +3312,7 @@ JSFunction* GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
}
JSFunction* GetTypedArrayFun(ElementsKind elements_kind, Isolate* isolate) {
- NativeContext* native_context = isolate->context()->native_context();
+ NativeContext native_context = isolate->context()->native_context();
switch (elements_kind) {
#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: \
@@ -3352,6 +3511,7 @@ Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy(int size) {
// Maintain invariant expected from any JSGlobalProxy.
map->set_is_access_check_needed(true);
map->set_may_have_interesting_symbols(true);
+ LOG(isolate(), MapDetails(*map));
return Handle<JSGlobalProxy>::cast(NewJSObjectFromMap(map, NOT_TENURED));
}
@@ -3400,9 +3560,8 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
}
Handle<JSMessageObject> Factory::NewJSMessageObject(
- MessageTemplate::Template message, Handle<Object> argument,
- int start_position, int end_position, Handle<Script> script,
- Handle<Object> stack_frames) {
+ MessageTemplate message, Handle<Object> argument, int start_position,
+ int end_position, Handle<Script> script, Handle<Object> stack_frames) {
Handle<Map> map = message_object_map();
Handle<JSMessageObject> message_obj(
JSMessageObject::cast(New(map, NOT_TENURED)), isolate());
@@ -3430,8 +3589,9 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForApiFunction(
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForBuiltin(
MaybeHandle<String> maybe_name, int builtin_index, FunctionKind kind) {
+ // TODO(3770): Switch to MaybeHandle<Code>() after migration.
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
- maybe_name, MaybeHandle<Code>(), builtin_index, kind);
+ maybe_name, MaybeHandle<HeapObject>(), builtin_index, kind);
return shared;
}
@@ -3454,7 +3614,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
// Set pointer fields.
share->set_name_or_scope_info(
- has_shared_name ? *shared_name
+ has_shared_name ? Object::cast(*shared_name)
: SharedFunctionInfo::kNoSharedNameSentinel);
Handle<HeapObject> function_data;
if (maybe_function_data.ToHandle(&function_data)) {
@@ -3465,7 +3625,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
!Code::cast(*function_data)->is_builtin());
share->set_function_data(*function_data);
} else if (Builtins::IsBuiltinId(maybe_builtin_index)) {
- DCHECK_NE(maybe_builtin_index, Builtins::kDeserializeLazy);
share->set_builtin_id(maybe_builtin_index);
} else {
share->set_builtin_id(Builtins::kIllegal);
@@ -3512,7 +3671,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
}
namespace {
-inline int NumberToStringCacheHash(Handle<FixedArray> cache, Smi* number) {
+inline int NumberToStringCacheHash(Handle<FixedArray> cache, Smi number) {
int mask = (cache->length() >> 1) - 1;
return number->value() & mask;
}
@@ -3581,7 +3740,7 @@ Handle<String> Factory::NumberToString(Handle<Object> number,
return NumberToStringCacheSet(number, hash, string, check_cache);
}
-Handle<String> Factory::NumberToString(Smi* number, bool check_cache) {
+Handle<String> Factory::NumberToString(Smi number, bool check_cache) {
int hash = 0;
if (check_cache) {
hash = NumberToStringCacheHash(number_string_cache(), number);
@@ -3611,6 +3770,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
debug_info->set_script(shared->script_or_debug_info());
debug_info->set_original_bytecode_array(
ReadOnlyRoots(heap).undefined_value());
+ debug_info->set_debug_bytecode_array(ReadOnlyRoots(heap).undefined_value());
debug_info->set_break_points(ReadOnlyRoots(heap).empty_fixed_array());
// Link debug info to function.
@@ -3729,10 +3889,10 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
} else {
// Check to see whether there is a matching element in the cache.
Handle<WeakFixedArray> cache = Handle<WeakFixedArray>::cast(maybe_cache);
- MaybeObject* result = cache->Get(cache_index);
+ MaybeObject result = cache->Get(cache_index);
HeapObject* heap_object;
if (result->GetHeapObjectIfWeak(&heap_object)) {
- Map* map = Map::cast(heap_object);
+ Map map = Map::cast(heap_object);
DCHECK(!map->is_dictionary_map());
return handle(map, isolate());
}
@@ -3803,7 +3963,7 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
JSRegExp::Type type, Handle<String> source,
JSRegExp::Flags flags, int capture_count) {
Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize);
- Smi* uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
+ Smi uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
store->set(JSRegExp::kSourceIndex, *source);
store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
@@ -3932,6 +4092,7 @@ Handle<Map> Factory::CreateSloppyFunctionMap(
map->AppendDescriptor(&d);
}
DCHECK_EQ(inobject_properties_count, field_index);
+ LOG(isolate(), MapDetails(*map));
return map;
}
@@ -4008,6 +4169,7 @@ Handle<Map> Factory::CreateStrictFunctionMap(
map->AppendDescriptor(&d);
}
DCHECK_EQ(inobject_properties_count, field_index);
+ LOG(isolate(), MapDetails(*map));
return map;
}
@@ -4042,6 +4204,7 @@ Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
prototype_string(), function_prototype_accessor(), ro_attribs);
map->AppendDescriptor(&d);
}
+ LOG(isolate(), MapDetails(*map));
return map;
}
diff --git a/chromium/v8/src/heap/factory.h b/chromium/v8/src/heap/factory.h
index 8c6d32090e3..a85dd97219c 100644
--- a/chromium/v8/src/heap/factory.h
+++ b/chromium/v8/src/heap/factory.h
@@ -34,6 +34,7 @@ class CallableTask;
class CallbackTask;
class CallHandlerInfo;
class Expression;
+class EmbedderDataArray;
class ArrayBoilerplateDescription;
class CoverageInfo;
class DebugInfo;
@@ -63,6 +64,7 @@ class TemplateObjectDescription;
class UncompiledDataWithoutPreParsedScope;
class UncompiledDataWithPreParsedScope;
class WasmExportedFunctionData;
+class WeakFactoryCleanupJobTask;
struct SourceRange;
template <typename T>
class ZoneVector;
@@ -152,6 +154,10 @@ class V8_EXPORT_PRIVATE Factory {
Handle<FeedbackVector> NewFeedbackVector(
Handle<SharedFunctionInfo> shared, PretenureFlag pretenure = NOT_TENURED);
+ // Allocates a clean embedder data array with given capacity.
+ Handle<EmbedderDataArray> NewEmbedderDataArray(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+
// Allocates a fixed array for name-value pairs of boilerplate properties and
// calculates the number of properties we need to store in the backing store.
Handle<ObjectBoilerplateDescription> NewObjectBoilerplateDescription(
@@ -176,6 +182,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<OrderedHashSet> NewOrderedHashSet();
Handle<OrderedHashMap> NewOrderedHashMap();
+ Handle<OrderedNameDictionary> NewOrderedNameDictionary();
Handle<SmallOrderedHashSet> NewSmallOrderedHashSet(
int capacity = SmallOrderedHashSet::kMinCapacity,
@@ -183,6 +190,9 @@ class V8_EXPORT_PRIVATE Factory {
Handle<SmallOrderedHashMap> NewSmallOrderedHashMap(
int capacity = SmallOrderedHashMap::kMinCapacity,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<SmallOrderedNameDictionary> NewSmallOrderedNameDictionary(
+ int capacity = SmallOrderedHashMap::kMinCapacity,
+ PretenureFlag pretenure = NOT_TENURED);
// Create a new PrototypeInfo struct.
Handle<PrototypeInfo> NewPrototypeInfo();
@@ -360,7 +370,7 @@ class V8_EXPORT_PRIVATE Factory {
// Create a symbol in old or read-only space.
Handle<Symbol> NewSymbol(PretenureFlag pretenure = TENURED);
Handle<Symbol> NewPrivateSymbol(PretenureFlag pretenure = TENURED);
- Handle<Symbol> NewPrivateFieldSymbol();
+ Handle<Symbol> NewPrivateNameSymbol();
// Create a global (but otherwise uninitialized) context.
Handle<NativeContext> NewNativeContext();
@@ -439,8 +449,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<PromiseResolveThenableJobTask> NewPromiseResolveThenableJobTask(
Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
Handle<JSReceiver> thenable, Handle<Context> context);
-
- Handle<MicrotaskQueue> NewMicrotaskQueue();
+ Handle<WeakFactoryCleanupJobTask> NewWeakFactoryCleanupJobTask(
+ Handle<JSWeakFactory> weak_factory);
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(Address addr,
@@ -469,7 +479,10 @@ class V8_EXPORT_PRIVATE Factory {
Handle<FeedbackCell> NewNoClosuresCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewManyClosuresCell(Handle<HeapObject> value);
+ Handle<FeedbackCell> NewNoFeedbackCell();
+ Handle<DescriptorArray> NewDescriptorArray(int number_of_entries,
+ int slack = 0);
Handle<TransitionArray> NewTransitionArray(int number_of_transitions,
int slack = 0);
@@ -482,8 +495,8 @@ class V8_EXPORT_PRIVATE Factory {
int inobject_properties = 0);
// Initializes the fields of a newly created Map. Exposed for tests and
// heap setup; other code should just call NewMap which takes care of it.
- Map* InitializeMap(Map* map, InstanceType type, int instance_size,
- ElementsKind elements_kind, int inobject_properties);
+ Map InitializeMap(Map map, InstanceType type, int instance_size,
+ ElementsKind elements_kind, int inobject_properties);
// Allocate a block of memory in the given space (filled with a filler).
// Used as a fall-back for generated code when the space is full.
@@ -593,6 +606,15 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Map> map,
int number_of_slow_properties = NameDictionary::kInitialCapacity,
PretenureFlag pretenure = NOT_TENURED);
+ // Allocates and initializes a new JavaScript object with the given
+ // {prototype} and {properties}. The newly created object will be
+ // in dictionary properties mode. The {elements} can either be the
+ // empty fixed array, in which case the resulting object will have
+ // fast elements, or a NumberDictionary, in which case the resulting
+ // object will have dictionary elements.
+ Handle<JSObject> NewSlowJSObjectWithPropertiesAndElements(
+ Handle<Object> prototype, Handle<NameDictionary> properties,
+ Handle<FixedArrayBase> elements, PretenureFlag pretenure = NOT_TENURED);
// JS arrays are pretenured when allocated by the parser.
@@ -776,12 +798,6 @@ class V8_EXPORT_PRIVATE Factory {
bool is_turbofanned = false, int stack_slots = 0,
int safepoint_table_offset = 0, int handler_table_offset = 0);
- // Allocates a new, empty code object for use by builtin deserialization. The
- // given {size} argument specifies the size of the entire code object.
- // Can only be used when code space is unprotected and requires manual
- // initialization by the caller.
- Handle<Code> NewCodeForDeserialization(uint32_t size);
-
// Allocates a new code object and initializes it as the trampoline to the
// given off-heap entry point.
Handle<Code> NewOffHeapTrampolineFor(Handle<Code> code,
@@ -800,15 +816,15 @@ class V8_EXPORT_PRIVATE Factory {
inline Handle<Object> NewURIError();
Handle<Object> NewError(Handle<JSFunction> constructor,
- MessageTemplate::Template template_index,
+ MessageTemplate template_index,
Handle<Object> arg0 = Handle<Object>(),
Handle<Object> arg1 = Handle<Object>(),
Handle<Object> arg2 = Handle<Object>());
-#define DECLARE_ERROR(NAME) \
- Handle<Object> New##NAME(MessageTemplate::Template template_index, \
- Handle<Object> arg0 = Handle<Object>(), \
- Handle<Object> arg1 = Handle<Object>(), \
+#define DECLARE_ERROR(NAME) \
+ Handle<Object> New##NAME(MessageTemplate template_index, \
+ Handle<Object> arg0 = Handle<Object>(), \
+ Handle<Object> arg1 = Handle<Object>(), \
Handle<Object> arg2 = Handle<Object>());
DECLARE_ERROR(Error)
DECLARE_ERROR(EvalError)
@@ -822,11 +838,13 @@ class V8_EXPORT_PRIVATE Factory {
#undef DECLARE_ERROR
Handle<String> NumberToString(Handle<Object> number, bool check_cache = true);
- Handle<String> NumberToString(Smi* number, bool check_cache = true);
+ Handle<String> NumberToString(Smi number, bool check_cache = true);
inline Handle<String> Uint32ToString(uint32_t value, bool check_cache = true);
-#define ROOT_ACCESSOR(type, name, CamelName) inline Handle<type> name();
+// TODO(jkummerow): Drop std::remove_pointer after the migration to ObjectPtr.
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ inline Handle<std::remove_pointer<Type>::type> name();
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
@@ -867,12 +885,9 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Map> CreateClassFunctionMap(Handle<JSFunction> empty_function);
// Allocates a new JSMessageObject object.
- Handle<JSMessageObject> NewJSMessageObject(MessageTemplate::Template message,
- Handle<Object> argument,
- int start_position,
- int end_position,
- Handle<Script> script,
- Handle<Object> stack_frames);
+ Handle<JSMessageObject> NewJSMessageObject(
+ MessageTemplate message, Handle<Object> argument, int start_position,
+ int end_position, Handle<Script> script, Handle<Object> stack_frames);
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
@@ -911,6 +926,8 @@ class V8_EXPORT_PRIVATE Factory {
// Converts the given ToPrimitive hint to it's string representation.
Handle<String> ToPrimitiveHintString(ToPrimitiveHint hint);
+ Handle<JSPromise> NewJSPromiseWithoutHook(
+ PretenureFlag pretenure = NOT_TENURED);
Handle<JSPromise> NewJSPromise(PretenureFlag pretenure = NOT_TENURED);
Handle<CallHandlerInfo> NewCallHandlerInfo(bool has_no_side_effect = false);
@@ -929,7 +946,7 @@ class V8_EXPORT_PRIVATE Factory {
}
HeapObject* AllocateRawWithImmortalMap(
- int size, PretenureFlag pretenure, Map* map,
+ int size, PretenureFlag pretenure, Map map,
AllocationAlignment alignment = kWordAligned);
HeapObject* AllocateRawWithAllocationSite(
Handle<Map> map, PretenureFlag pretenure,
@@ -943,6 +960,16 @@ class V8_EXPORT_PRIVATE Factory {
int length, Object* filler,
PretenureFlag pretenure);
+ // Allocates new context with given map, sets length and initializes the
+ // after-header part with uninitialized values and leaves the context header
+ // uninitialized.
+ Handle<Context> NewContext(RootIndex map_root_index, int size,
+ int variadic_part_length, PretenureFlag pretenure);
+
+ template <typename T>
+ Handle<T> AllocateSmallOrderedHashTable(Handle<Map> map, int capacity,
+ PretenureFlag pretenure);
+
// Creates a heap object based on the map. The fields of the heap object are
// not initialized, it's the responsibility of the caller to do that.
HeapObject* New(Handle<Map> map, PretenureFlag pretenure);
@@ -978,9 +1005,6 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSArray> NewJSArray(ElementsKind elements_kind,
PretenureFlag pretenure = NOT_TENURED);
- Handle<JSPromise> NewJSPromiseWithoutHook(
- PretenureFlag pretenure = NOT_TENURED);
-
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
MaybeHandle<String> name, MaybeHandle<HeapObject> maybe_function_data,
int maybe_builtin_index, FunctionKind kind = kNormalFunction);
diff --git a/chromium/v8/src/heap/gc-tracer.cc b/chromium/v8/src/heap/gc-tracer.cc
index 7d33c68ad19..8388cfd38bc 100644
--- a/chromium/v8/src/heap/gc-tracer.cc
+++ b/chromium/v8/src/heap/gc-tracer.cc
@@ -7,8 +7,10 @@
#include <cstdarg>
#include "src/base/atomic-utils.h"
-#include "src/counters.h"
+#include "src/counters-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/spaces.h"
#include "src/isolate.h"
namespace v8 {
@@ -196,7 +198,7 @@ void GCTracer::ResetForTesting() {
average_mark_compact_duration_ = 0;
current_mark_compact_mutator_utilization_ = 1.0;
previous_mark_compact_end_time_ = 0;
- base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
+ base::MutexGuard guard(&background_counter_mutex_);
for (int i = 0; i < BackgroundScope::NUMBER_OF_SCOPES; i++) {
background_counter_[i].total_duration_ms = 0;
background_counter_[i].runtime_call_counter.Reset();
@@ -326,6 +328,7 @@ void GCTracer::Stop(GarbageCollector collector) {
current_.incremental_marking_duration);
recorded_incremental_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
+ RecordGCSumCounters(duration);
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
FetchBackgroundMarkCompactCounters();
@@ -337,6 +340,7 @@ void GCTracer::Stop(GarbageCollector collector) {
current_.end_time, duration + current_.incremental_marking_duration);
recorded_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
+ RecordGCSumCounters(duration);
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
FetchBackgroundMarkCompactCounters();
@@ -654,6 +658,7 @@ void GCTracer::PrintNVP() const {
"gc=%s "
"reduce_memory=%d "
"heap.prologue=%.2f "
+ "heap.embedder_tracing_epilogue=%.2f "
"heap.epilogue=%.2f "
"heap.epilogue.reduce_new_space=%.2f "
"heap.external.prologue=%.1f "
@@ -693,9 +698,8 @@ void GCTracer::PrintNVP() const {
"mark.weak_closure.weak_handles=%.1f "
"mark.weak_closure.weak_roots=%.1f "
"mark.weak_closure.harmony=%.1f "
- "mark.wrapper_prologue=%.1f "
- "mark.wrapper_epilogue=%.1f "
- "mark.wrapper_tracing=%.1f "
+ "mark.embedder_prologue=%.1f "
+ "mark.embedder_tracing=%.1f "
"prologue=%.1f "
"sweep=%.1f "
"sweep.code=%.1f "
@@ -707,8 +711,8 @@ void GCTracer::PrintNVP() const {
"incremental.finalize.external.prologue=%.1f "
"incremental.finalize.external.epilogue=%.1f "
"incremental.sweeping=%.1f "
- "incremental.wrapper_prologue=%.1f "
- "incremental.wrapper_tracing=%.1f "
+ "incremental.embedder_prologue=%.1f "
+ "incremental.embedder_tracing=%.1f "
"incremental_wrapper_tracing_longest_step=%.1f "
"incremental_finalize_longest_step=%.1f "
"incremental_finalize_steps_count=%d "
@@ -750,6 +754,7 @@ void GCTracer::PrintNVP() const {
"compaction_speed=%.f\n",
duration, spent_in_mutator, current_.TypeName(true),
current_.reduce_memory, current_.scopes[Scope::HEAP_PROLOGUE],
+ current_.scopes[Scope::HEAP_EMBEDDER_TRACING_EPILOGUE],
current_.scopes[Scope::HEAP_EPILOGUE],
current_.scopes[Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE],
current_.scopes[Scope::HEAP_EXTERNAL_PROLOGUE],
@@ -788,9 +793,8 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS],
current_.scopes[Scope::MC_MARK_WEAK_CLOSURE_HARMONY],
- current_.scopes[Scope::MC_MARK_WRAPPER_PROLOGUE],
- current_.scopes[Scope::MC_MARK_WRAPPER_EPILOGUE],
- current_.scopes[Scope::MC_MARK_WRAPPER_TRACING],
+ current_.scopes[Scope::MC_MARK_EMBEDDER_PROLOGUE],
+ current_.scopes[Scope::MC_MARK_EMBEDDER_TRACING],
current_.scopes[Scope::MC_PROLOGUE], current_.scopes[Scope::MC_SWEEP],
current_.scopes[Scope::MC_SWEEP_CODE],
current_.scopes[Scope::MC_SWEEP_MAP],
@@ -801,10 +805,11 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE],
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE],
current_.scopes[Scope::MC_INCREMENTAL_SWEEPING],
- current_.scopes[Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE],
- current_.scopes[Scope::MC_INCREMENTAL_WRAPPER_TRACING],
+ current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE],
+ current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_TRACING],
current_
- .incremental_marking_scopes[Scope::MC_INCREMENTAL_WRAPPER_TRACING]
+ .incremental_marking_scopes
+ [Scope::MC_INCREMENTAL_EMBEDDER_TRACING]
.longest_step,
current_
.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE_BODY]
@@ -1060,7 +1065,7 @@ void GCTracer::FetchBackgroundCounters(int first_global_scope,
int last_background_scope) {
DCHECK_EQ(last_global_scope - first_global_scope,
last_background_scope - first_background_scope);
- base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
+ base::MutexGuard guard(&background_counter_mutex_);
int background_mc_scopes = last_background_scope - first_background_scope + 1;
for (int i = 0; i < background_mc_scopes; i++) {
current_.scopes[first_global_scope + i] +=
@@ -1085,7 +1090,7 @@ void GCTracer::FetchBackgroundCounters(int first_global_scope,
void GCTracer::AddBackgroundScopeSample(
BackgroundScope::ScopeId scope, double duration,
RuntimeCallCounter* runtime_call_counter) {
- base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
+ base::MutexGuard guard(&background_counter_mutex_);
BackgroundCounter& counter = background_counter_[scope];
counter.total_duration_ms += duration;
if (runtime_call_counter) {
@@ -1093,7 +1098,7 @@ void GCTracer::AddBackgroundScopeSample(
}
}
-void GCTracer::RecordGCPhasesHistograms(HistogramTimer* gc_timer) {
+void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
Counters* counters = heap_->isolate()->counters();
if (gc_timer == counters->gc_finalize()) {
DCHECK_EQ(Scope::FIRST_TOP_MC_SCOPE, Scope::MC_CLEAR);
@@ -1120,5 +1125,54 @@ void GCTracer::RecordGCPhasesHistograms(HistogramTimer* gc_timer) {
}
}
+void GCTracer::RecordGCSumCounters(double atomic_pause_duration) {
+ base::MutexGuard guard(&background_counter_mutex_);
+
+ const double overall_duration =
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
+ .duration +
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_SWEEPING]
+ .duration +
+ incremental_marking_duration_ +
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE]
+ .duration +
+ atomic_pause_duration;
+ const double background_duration =
+ background_counter_[BackgroundScope::MC_BACKGROUND_EVACUATE_COPY]
+ .total_duration_ms +
+ background_counter_
+ [BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS]
+ .total_duration_ms +
+ background_counter_[BackgroundScope::MC_BACKGROUND_MARKING]
+ .total_duration_ms +
+ background_counter_[BackgroundScope::MC_BACKGROUND_SWEEPING]
+ .total_duration_ms;
+
+ const double marking_duration =
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_START]
+ .duration +
+ incremental_marking_duration_ +
+ current_.incremental_marking_scopes[Scope::MC_INCREMENTAL_FINALIZE]
+ .duration +
+ current_.scopes[Scope::MC_MARK];
+ const double marking_background_duration =
+ background_counter_[BackgroundScope::MC_BACKGROUND_MARKING]
+ .total_duration_ms;
+
+ // UMA.
+ heap_->isolate()->counters()->gc_mark_compactor()->AddSample(
+ static_cast<int>(overall_duration));
+
+ // Emit trace event counters.
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "V8.GCMarkCompactorSummary", TRACE_EVENT_SCOPE_THREAD,
+ "duration", overall_duration, "background_duration",
+ background_duration);
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "V8.GCMarkCompactorMarkingSummary",
+ TRACE_EVENT_SCOPE_THREAD, "duration", marking_duration,
+ "background_duration", marking_background_duration);
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/gc-tracer.h b/chromium/v8/src/heap/gc-tracer.h
index bf49586d57b..eb86cd1ab68 100644
--- a/chromium/v8/src/heap/gc-tracer.h
+++ b/chromium/v8/src/heap/gc-tracer.h
@@ -321,7 +321,7 @@ class V8_EXPORT_PRIVATE GCTracer {
void AddBackgroundScopeSample(BackgroundScope::ScopeId scope, double duration,
RuntimeCallCounter* runtime_call_counter);
- void RecordGCPhasesHistograms(HistogramTimer* gc_timer);
+ void RecordGCPhasesHistograms(TimedHistogram* gc_timer);
private:
FRIEND_TEST(GCTracer, AverageSpeed);
@@ -338,6 +338,7 @@ class V8_EXPORT_PRIVATE GCTracer {
FRIEND_TEST(GCTracerTest, IncrementalScope);
FRIEND_TEST(GCTracerTest, IncrementalMarkingSpeed);
FRIEND_TEST(GCTracerTest, MutatorUtilization);
+ FRIEND_TEST(GCTracerTest, RecordGCSumHistograms);
FRIEND_TEST(GCTracerTest, RecordMarkCompactHistograms);
FRIEND_TEST(GCTracerTest, RecordScavengerHistograms);
@@ -359,6 +360,11 @@ class V8_EXPORT_PRIVATE GCTracer {
void RecordMutatorUtilization(double mark_compactor_end_time,
double mark_compactor_duration);
+ // Overall time spent in mark compact within a given GC cycle. Exact
+ // accounting of events within a GC is not necessary which is why the
+ // recording takes place at the end of the atomic pause.
+ void RecordGCSumCounters(double atomic_pause_duration);
+
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
void PrintNVP() const;
diff --git a/chromium/v8/src/heap/heap-inl.h b/chromium/v8/src/heap/heap-inl.h
index 65b791a42fd..c1f4e8ce96f 100644
--- a/chromium/v8/src/heap/heap-inl.h
+++ b/chromium/v8/src/heap/heap-inl.h
@@ -15,12 +15,12 @@
#include "src/base/atomic-utils.h"
#include "src/base/platform/platform.h"
-#include "src/counters-inl.h"
#include "src/feedback-vector.h"
// TODO(mstarzinger): There is one more include to remove in order to no longer
// leak heap internals to users of this interface!
#include "src/heap/spaces-inl.h"
+#include "src/isolate-data.h"
#include "src/isolate.h"
#include "src/log.h"
#include "src/msan.h"
@@ -28,8 +28,7 @@
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks-inl.h"
#include "src/objects/descriptor-array.h"
-#include "src/objects/literal-objects.h"
-#include "src/objects/microtask-queue-inl.h"
+#include "src/objects/literal-objects-inl.h"
#include "src/objects/scope-info.h"
#include "src/objects/script-inl.h"
#include "src/profiler/heap-profiler.h"
@@ -55,27 +54,83 @@ HeapObject* AllocationResult::ToObjectChecked() {
return HeapObject::cast(object_);
}
-#define ROOT_ACCESSOR(type, name, CamelName) \
- type* Heap::name() { return type::cast(roots_[RootIndex::k##CamelName]); }
+Isolate* Heap::isolate() {
+ return reinterpret_cast<Isolate*>(
+ reinterpret_cast<intptr_t>(this) -
+ reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
+}
+
+int64_t Heap::external_memory() {
+ return isolate()->isolate_data()->external_memory_;
+}
+
+void Heap::update_external_memory(int64_t delta) {
+ isolate()->isolate_data()->external_memory_ += delta;
+}
+
+void Heap::update_external_memory_concurrently_freed(intptr_t freed) {
+ external_memory_concurrently_freed_ += freed;
+}
+
+void Heap::account_external_memory_concurrently_freed() {
+ isolate()->isolate_data()->external_memory_ -=
+ external_memory_concurrently_freed_;
+ external_memory_concurrently_freed_ = 0;
+}
+
+RootsTable& Heap::roots_table() { return isolate()->roots_table(); }
+
+// TODO(jkummerow): Drop std::remove_pointer after the migration to ObjectPtr.
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ Type Heap::name() { \
+ return std::remove_pointer<Type>::type::cast( \
+ roots_table()[RootIndex::k##CamelName]); \
+ }
MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
#define ROOT_ACCESSOR(type, name, CamelName) \
- void Heap::set_##name(type* value) { \
+ void Heap::set_##name(type value) { \
/* The deserializer makes use of the fact that these common roots are */ \
/* never in new space and never on a page that is being compacted. */ \
- DCHECK(!deserialization_complete() || \
- RootCanBeWrittenAfterInitialization(RootIndex::k##CamelName)); \
- DCHECK_IMPLIES(static_cast<int>(RootIndex::k##CamelName) < kOldSpaceRoots, \
- !InNewSpace(value)); \
- roots_[RootIndex::k##CamelName] = value; \
+ DCHECK_IMPLIES(deserialization_complete(), \
+ !RootsTable::IsImmortalImmovable(RootIndex::k##CamelName)); \
+ DCHECK_IMPLIES(RootsTable::IsImmortalImmovable(RootIndex::k##CamelName), \
+ IsImmovable(HeapObject::cast(value))); \
+ roots_table()[RootIndex::k##CamelName] = value; \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
+void Heap::SetRootCodeStubs(SimpleNumberDictionary value) {
+ roots_table()[RootIndex::kCodeStubs] = value;
+}
+
+void Heap::SetRootMaterializedObjects(FixedArray objects) {
+ roots_table()[RootIndex::kMaterializedObjects] = objects;
+}
+
+void Heap::SetRootScriptList(Object* value) {
+ roots_table()[RootIndex::kScriptList] = value;
+}
+
+void Heap::SetRootStringTable(StringTable value) {
+ roots_table()[RootIndex::kStringTable] = value;
+}
+
+void Heap::SetRootNoScriptSharedFunctionInfos(Object* value) {
+ roots_table()[RootIndex::kNoScriptSharedFunctionInfos] = value;
+}
+
+void Heap::SetMessageListeners(TemplateList value) {
+ roots_table()[RootIndex::kMessageListeners] = value;
+}
+
PagedSpace* Heap::paged_space(int idx) {
DCHECK_NE(idx, LO_SPACE);
DCHECK_NE(idx, NEW_SPACE);
+ DCHECK_NE(idx, CODE_LO_SPACE);
+ DCHECK_NE(idx, NEW_LO_SPACE);
return static_cast<PagedSpace*>(space_[idx]);
}
@@ -118,24 +173,19 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
}
#endif
#ifdef DEBUG
- isolate_->counters()->objs_since_last_full()->Increment();
- isolate_->counters()->objs_since_last_young()->Increment();
+ IncrementObjectCounters();
#endif
bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
- bool new_large_object = FLAG_young_generation_large_objects &&
- size_in_bytes > kMaxNewSpaceHeapObjectSize;
+
HeapObject* object = nullptr;
AllocationResult allocation;
if (NEW_SPACE == space) {
if (large_object) {
- space = LO_SPACE;
+ // TODO(hpayer): Implement a LO tenuring strategy.
+ space = FLAG_young_generation_large_objects ? NEW_LO_SPACE : LO_SPACE;
} else {
- if (new_large_object) {
- allocation = new_lo_space_->AllocateRaw(size_in_bytes);
- } else {
- allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
- }
+ allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
}
@@ -146,7 +196,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
// Here we only allocate in the old generation.
if (OLD_SPACE == space) {
if (large_object) {
- allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+ allocation = lo_space_->AllocateRaw(size_in_bytes);
} else {
allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
}
@@ -154,11 +204,16 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
if (size_in_bytes <= code_space()->AreaSize()) {
allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
} else {
- allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
+ allocation = code_lo_space_->AllocateRaw(size_in_bytes);
}
} else if (LO_SPACE == space) {
DCHECK(large_object);
- allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+ allocation = lo_space_->AllocateRaw(size_in_bytes);
+ } else if (NEW_LO_SPACE == space) {
+ DCHECK(FLAG_young_generation_large_objects);
+ allocation = new_lo_space_->AllocateRaw(size_in_bytes);
+ } else if (CODE_LO_SPACE == space) {
+ allocation = code_lo_space_->AllocateRaw(size_in_bytes);
} else if (MAP_SPACE == space) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
} else if (RO_SPACE == space) {
@@ -274,14 +329,13 @@ void Heap::UpdateAllocationsHash(uint32_t value) {
StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
}
-
-void Heap::RegisterExternalString(String* string) {
+void Heap::RegisterExternalString(String string) {
DCHECK(string->IsExternalString());
DCHECK(!string->IsThinString());
external_string_table_.AddString(string);
}
-void Heap::UpdateExternalString(String* string, size_t old_payload,
+void Heap::UpdateExternalString(String string, size_t old_payload,
size_t new_payload) {
DCHECK(string->IsExternalString());
Page* page = Page::FromHeapObject(string);
@@ -294,10 +348,10 @@ void Heap::UpdateExternalString(String* string, size_t old_payload,
ExternalBackingStoreType::kExternalString, new_payload - old_payload);
}
-void Heap::FinalizeExternalString(String* string) {
+void Heap::FinalizeExternalString(String string) {
DCHECK(string->IsExternalString());
Page* page = Page::FromHeapObject(string);
- ExternalString* ext_string = ExternalString::cast(string);
+ ExternalString ext_string = ExternalString::cast(string);
page->DecrementExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString,
@@ -305,8 +359,7 @@ void Heap::FinalizeExternalString(String* string) {
v8::String::ExternalStringResourceBase** resource_addr =
reinterpret_cast<v8::String::ExternalStringResourceBase**>(
- reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset -
- kHeapObjectTag);
+ string->address() + ExternalString::kResourceOffset);
// Dispose of the C++ object if it has not already been disposed.
if (*resource_addr != nullptr) {
@@ -324,7 +377,7 @@ bool Heap::InNewSpace(Object* object) {
}
// static
-bool Heap::InNewSpace(MaybeObject* object) {
+bool Heap::InNewSpace(MaybeObject object) {
HeapObject* heap_object;
return object->GetHeapObject(&heap_object) && InNewSpace(heap_object);
}
@@ -346,13 +399,28 @@ bool Heap::InNewSpace(HeapObject* heap_object) {
}
// static
+bool Heap::InNewSpace(HeapObjectPtr heap_object) {
+ bool result = MemoryChunk::FromHeapObject(heap_object)->InNewSpace();
+#ifdef DEBUG
+ // If in NEW_SPACE, then check we're either not in the middle of GC or the
+ // object is in to-space.
+ if (result) {
+ // If the object is in NEW_SPACE, then it's not in RO_SPACE so this is safe.
+ Heap* heap = Heap::FromWritableHeapObject(&heap_object);
+ DCHECK(heap->gc_state_ != NOT_IN_GC || InToSpace(heap_object));
+ }
+#endif
+ return result;
+}
+
+// static
bool Heap::InFromSpace(Object* object) {
DCHECK(!HasWeakHeapObjectTag(object));
return object->IsHeapObject() && InFromSpace(HeapObject::cast(object));
}
// static
-bool Heap::InFromSpace(MaybeObject* object) {
+bool Heap::InFromSpace(MaybeObject object) {
HeapObject* heap_object;
return object->GetHeapObject(&heap_object) && InFromSpace(heap_object);
}
@@ -370,7 +438,7 @@ bool Heap::InToSpace(Object* object) {
}
// static
-bool Heap::InToSpace(MaybeObject* object) {
+bool Heap::InToSpace(MaybeObject object) {
HeapObject* heap_object;
return object->GetHeapObject(&heap_object) && InToSpace(heap_object);
}
@@ -380,20 +448,17 @@ bool Heap::InToSpace(HeapObject* heap_object) {
return MemoryChunk::FromHeapObject(heap_object)->IsFlagSet(Page::IN_TO_SPACE);
}
+// static
+bool Heap::InToSpace(HeapObjectPtr heap_object) {
+ return MemoryChunk::FromHeapObject(heap_object)->IsFlagSet(Page::IN_TO_SPACE);
+}
+
bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
bool Heap::InReadOnlySpace(Object* object) {
return read_only_space_->Contains(object);
}
-bool Heap::InNewSpaceSlow(Address address) {
- return new_space_->ContainsSlow(address);
-}
-
-bool Heap::InOldSpaceSlow(Address address) {
- return old_space_->ContainsSlow(address);
-}
-
// static
Heap* Heap::FromWritableHeapObject(const HeapObject* obj) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
@@ -407,6 +472,19 @@ Heap* Heap::FromWritableHeapObject(const HeapObject* obj) {
return heap;
}
+// static
+Heap* Heap::FromWritableHeapObject(const HeapObjectPtr* obj) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*obj);
+ // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
+ // find a heap. The exception is when the ReadOnlySpace is writeable, during
+ // bootstrapping, so explicitly allow this case.
+ SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE ||
+ static_cast<ReadOnlySpace*>(chunk->owner())->writable());
+ Heap* heap = chunk->heap();
+ SLOW_DCHECK(heap != nullptr);
+ return heap;
+}
+
bool Heap::ShouldBePromoted(Address old_address) {
Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_->age_mark();
@@ -415,12 +493,11 @@ bool Heap::ShouldBePromoted(Address old_address) {
}
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
- CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
- static_cast<size_t>(byte_size / kPointerSize));
+ CopyWords(dst, src, static_cast<size_t>(byte_size / kPointerSize));
}
template <Heap::FindMementoMode mode>
-AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
+AllocationMemento* Heap::FindAllocationMemento(Map map, HeapObject* object) {
Address object_address = object->address();
Address memento_address = object_address + object->SizeFromMap(map);
Address last_memento_word_address = memento_address + kPointerSize;
@@ -429,7 +506,7 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
return nullptr;
}
HeapObject* candidate = HeapObject::FromAddress(memento_address);
- Map* candidate_map = candidate->map();
+ Map candidate_map = candidate->map();
// This fast check may peek at an uninitialized word. However, the slow check
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
@@ -480,7 +557,7 @@ AllocationMemento* Heap::FindAllocationMemento(Map* map, HeapObject* object) {
UNREACHABLE();
}
-void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
+void Heap::UpdateAllocationSite(Map map, HeapObject* object,
PretenuringFeedbackMap* pretenuring_feedback) {
DCHECK_NE(pretenuring_feedback, &global_pretenuring_feedback_);
DCHECK(
@@ -503,13 +580,7 @@ void Heap::UpdateAllocationSite(Map* map, HeapObject* object,
(*pretenuring_feedback)[reinterpret_cast<AllocationSite*>(key)]++;
}
-Isolate* Heap::isolate() {
- return reinterpret_cast<Isolate*>(
- reinterpret_cast<intptr_t>(this) -
- reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
-}
-
-void Heap::ExternalStringTable::AddString(String* string) {
+void Heap::ExternalStringTable::AddString(String string) {
DCHECK(string->IsExternalString());
DCHECK(!Contains(string));
@@ -527,7 +598,8 @@ Oddball* Heap::ToBoolean(bool condition) {
uint64_t Heap::HashSeed() {
uint64_t seed;
- hash_seed()->copy_out(0, reinterpret_cast<byte*>(&seed), kInt64Size);
+ ReadOnlyRoots(this).hash_seed()->copy_out(0, reinterpret_cast<byte*>(&seed),
+ kInt64Size);
DCHECK(FLAG_randomize_hashes || seed == 0);
return seed;
}
@@ -576,6 +648,13 @@ void Heap::IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
// trigger garbage collections.
}
+bool Heap::IsWithinLargeObject(Address address) {
+ if (new_lo_space()->FindPage(address) || lo_space()->FindPage(address) ||
+ code_lo_space()->FindPage(address))
+ return true;
+ return false;
+}
+
void Heap::DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount) {
base::CheckedDecrement(&backing_store_bytes_, amount);
@@ -595,12 +674,11 @@ CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
if (heap_->write_protect_code_memory()) {
heap_->increment_code_space_memory_modification_scope_depth();
heap_->code_space()->SetReadAndWritable();
- LargePage* page = heap_->lo_space()->first_page();
+ LargePage* page = heap_->code_lo_space()->first_page();
while (page != nullptr) {
- if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
- CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndWritable();
- }
+ DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
+ page->SetReadAndWritable();
page = page->next_page();
}
}
@@ -610,12 +688,11 @@ CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
if (heap_->write_protect_code_memory()) {
heap_->decrement_code_space_memory_modification_scope_depth();
heap_->code_space()->SetReadAndExecutable();
- LargePage* page = heap_->lo_space()->first_page();
+ LargePage* page = heap_->code_lo_space()->first_page();
while (page != nullptr) {
- if (page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
- CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
- page->SetReadAndExecutable();
- }
+ DCHECK(page->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
+ CHECK(heap_->memory_allocator()->IsMemoryChunkExecutable(page));
+ page->SetReadAndExecutable();
page = page->next_page();
}
}
@@ -646,8 +723,7 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (scope_active_) {
DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
- (chunk_->owner()->identity() == LO_SPACE &&
- chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)));
+ (chunk_->owner()->identity() == CODE_LO_SPACE));
chunk_->SetReadAndWritable();
}
}
diff --git a/chromium/v8/src/heap/heap-write-barrier-inl.h b/chromium/v8/src/heap/heap-write-barrier-inl.h
index b20e65d1f18..ee1ef5b1c61 100644
--- a/chromium/v8/src/heap/heap-write-barrier-inl.h
+++ b/chromium/v8/src/heap/heap-write-barrier-inl.h
@@ -12,7 +12,9 @@
#include "src/globals.h"
#include "src/objects-inl.h"
+#include "src/objects/heap-object.h"
#include "src/objects/maybe-object-inl.h"
+#include "src/objects/slots.h"
namespace v8 {
namespace internal {
@@ -72,7 +74,7 @@ inline void MarkingBarrierInternal(HeapObject* object, Address slot,
} // namespace heap_internals
-inline void WriteBarrierForCode(Code* host, RelocInfo* rinfo, Object* value) {
+inline void WriteBarrierForCode(Code host, RelocInfo* rinfo, Object* value) {
DCHECK(!HasWeakHeapObjectTag(value));
if (!value->IsHeapObject()) return;
HeapObject* object = HeapObject::cast(value);
@@ -80,28 +82,47 @@ inline void WriteBarrierForCode(Code* host, RelocInfo* rinfo, Object* value) {
MarkingBarrierForCode(host, rinfo, object);
}
-inline void WriteBarrierForCode(Code* host) {
+inline void WriteBarrierForCode(Code host) {
Heap::WriteBarrierForCodeSlow(host);
}
-inline void GenerationalBarrier(HeapObject* object, Object** slot,
+inline void GenerationalBarrier(HeapObject* object, ObjectSlot slot,
+ Object* value) {
+ DCHECK(!HasWeakHeapObjectTag(*slot));
+ DCHECK(!HasWeakHeapObjectTag(value));
+ if (!value->IsHeapObject()) return;
+ heap_internals::GenerationalBarrierInternal(object, slot.address(),
+ HeapObject::cast(value));
+}
+
+inline void GenerationalBarrier(HeapObject* object, MaybeObjectSlot slot,
+ MaybeObject value) {
+ HeapObject* value_heap_object;
+ if (!value->GetHeapObject(&value_heap_object)) return;
+ heap_internals::GenerationalBarrierInternal(object, slot.address(),
+ value_heap_object);
+}
+
+inline void GenerationalBarrier(HeapObjectPtr* object, ObjectSlot slot,
Object* value) {
DCHECK(!HasWeakHeapObjectTag(*slot));
DCHECK(!HasWeakHeapObjectTag(value));
if (!value->IsHeapObject()) return;
heap_internals::GenerationalBarrierInternal(
- object, reinterpret_cast<Address>(slot), HeapObject::cast(value));
+ reinterpret_cast<HeapObject*>(object->ptr()), slot.address(),
+ HeapObject::cast(value));
}
-inline void GenerationalBarrier(HeapObject* object, MaybeObject** slot,
- MaybeObject* value) {
+inline void GenerationalBarrier(HeapObjectPtr* object, MaybeObjectSlot slot,
+ MaybeObject value) {
HeapObject* value_heap_object;
if (!value->GetHeapObject(&value_heap_object)) return;
heap_internals::GenerationalBarrierInternal(
- object, reinterpret_cast<Address>(slot), value_heap_object);
+ reinterpret_cast<HeapObject*>(object->ptr()), slot.address(),
+ value_heap_object);
}
-inline void GenerationalBarrierForElements(Heap* heap, FixedArray* array,
+inline void GenerationalBarrierForElements(Heap* heap, FixedArray array,
int offset, int length) {
heap_internals::MemoryChunk* array_chunk =
heap_internals::MemoryChunk::FromHeapObject(array);
@@ -110,7 +131,7 @@ inline void GenerationalBarrierForElements(Heap* heap, FixedArray* array,
Heap::GenerationalBarrierForElementsSlow(heap, array, offset, length);
}
-inline void GenerationalBarrierForCode(Code* host, RelocInfo* rinfo,
+inline void GenerationalBarrierForCode(Code host, RelocInfo* rinfo,
HeapObject* object) {
heap_internals::MemoryChunk* object_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
@@ -118,20 +139,39 @@ inline void GenerationalBarrierForCode(Code* host, RelocInfo* rinfo,
Heap::GenerationalBarrierForCodeSlow(host, rinfo, object);
}
-inline void MarkingBarrier(HeapObject* object, Object** slot, Object* value) {
- DCHECK_IMPLIES(slot != nullptr, !HasWeakHeapObjectTag(*slot));
+inline void MarkingBarrier(HeapObject* object, ObjectSlot slot, Object* value) {
+ DCHECK_IMPLIES(slot.address() != kNullAddress, !HasWeakHeapObjectTag(*slot));
+ DCHECK(!HasWeakHeapObjectTag(value));
+ if (!value->IsHeapObject()) return;
+ heap_internals::MarkingBarrierInternal(object, slot.address(),
+ HeapObject::cast(value));
+}
+
+inline void MarkingBarrier(HeapObject* object, MaybeObjectSlot slot,
+ MaybeObject value) {
+ HeapObject* value_heap_object;
+ if (!value->GetHeapObject(&value_heap_object)) return;
+ heap_internals::MarkingBarrierInternal(object, slot.address(),
+ value_heap_object);
+}
+
+inline void MarkingBarrier(HeapObjectPtr* object, ObjectSlot slot,
+ Object* value) {
+ DCHECK_IMPLIES(slot.address() != kNullAddress, !HasWeakHeapObjectTag(*slot));
DCHECK(!HasWeakHeapObjectTag(value));
if (!value->IsHeapObject()) return;
heap_internals::MarkingBarrierInternal(
- object, reinterpret_cast<Address>(slot), HeapObject::cast(value));
+ reinterpret_cast<HeapObject*>(object->ptr()), slot.address(),
+ HeapObject::cast(value));
}
-inline void MarkingBarrier(HeapObject* object, MaybeObject** slot,
- MaybeObject* value) {
+inline void MarkingBarrier(HeapObjectPtr* object, MaybeObjectSlot slot,
+ MaybeObject value) {
HeapObject* value_heap_object;
if (!value->GetHeapObject(&value_heap_object)) return;
heap_internals::MarkingBarrierInternal(
- object, reinterpret_cast<Address>(slot), value_heap_object);
+ reinterpret_cast<HeapObject*>(object->ptr()), slot.address(),
+ value_heap_object);
}
inline void MarkingBarrierForElements(Heap* heap, HeapObject* object) {
@@ -142,7 +182,7 @@ inline void MarkingBarrierForElements(Heap* heap, HeapObject* object) {
Heap::MarkingBarrierForElementsSlow(heap, object);
}
-inline void MarkingBarrierForCode(Code* host, RelocInfo* rinfo,
+inline void MarkingBarrierForCode(Code host, RelocInfo* rinfo,
HeapObject* object) {
DCHECK(!HasWeakHeapObjectTag(object));
heap_internals::MemoryChunk* object_chunk =
diff --git a/chromium/v8/src/heap/heap-write-barrier.h b/chromium/v8/src/heap/heap-write-barrier.h
index 4eaeaae8a4e..7063ccd52cf 100644
--- a/chromium/v8/src/heap/heap-write-barrier.h
+++ b/chromium/v8/src/heap/heap-write-barrier.h
@@ -5,6 +5,8 @@
#ifndef V8_HEAP_HEAP_WRITE_BARRIER_H_
#define V8_HEAP_HEAP_WRITE_BARRIER_H_
+#include "include/v8-internal.h"
+
namespace v8 {
namespace internal {
@@ -12,8 +14,11 @@ class Code;
class FixedArray;
class Heap;
class HeapObject;
+class HeapObjectPtr;
class MaybeObject;
+class MaybeObjectSlot;
class Object;
+class ObjectSlot;
class RelocInfo;
// Note: In general it is preferred to use the macros defined in
@@ -27,23 +32,38 @@ class RelocInfo;
} while (false)
// Combined write barriers.
-void WriteBarrierForCode(Code* host, RelocInfo* rinfo, Object* value);
-void WriteBarrierForCode(Code* host);
+void WriteBarrierForCode(Code host, RelocInfo* rinfo, Object* value);
+void WriteBarrierForCode(Code host);
// Generational write barrier.
-void GenerationalBarrier(HeapObject* object, Object** slot, Object* value);
-void GenerationalBarrier(HeapObject* object, MaybeObject** slot,
- MaybeObject* value);
-void GenerationalBarrierForElements(Heap* heap, FixedArray* array, int offset,
+void GenerationalBarrier(HeapObject* object, ObjectSlot slot, Object* value);
+void GenerationalBarrier(HeapObject* object, MaybeObjectSlot slot,
+ MaybeObject value);
+// This takes a HeapObjectPtr* (as opposed to a plain HeapObjectPtr)
+// to keep the WRITE_BARRIER macro syntax-compatible to the HeapObject*
+// version above.
+// TODO(3770): This should probably take a HeapObjectPtr eventually.
+void GenerationalBarrier(HeapObjectPtr* object, ObjectSlot slot, Object* value);
+void GenerationalBarrier(HeapObjectPtr* object, MaybeObjectSlot slot,
+ MaybeObject value);
+void GenerationalBarrierForElements(Heap* heap, FixedArray array, int offset,
int length);
-void GenerationalBarrierForCode(Code* host, RelocInfo* rinfo,
+void GenerationalBarrierForCode(Code host, RelocInfo* rinfo,
HeapObject* object);
// Marking write barrier.
-void MarkingBarrier(HeapObject* object, Object** slot, Object* value);
-void MarkingBarrier(HeapObject* object, MaybeObject** slot, MaybeObject* value);
+void MarkingBarrier(HeapObject* object, ObjectSlot slot, Object* value);
+void MarkingBarrier(HeapObject* object, MaybeObjectSlot slot,
+ MaybeObject value);
+// This takes a HeapObjectPtr* (as opposed to a plain HeapObjectPtr)
+// to keep the WRITE_BARRIER macro syntax-compatible to the HeapObject*
+// version above.
+// TODO(3770): This should probably take a HeapObjectPtr eventually.
+void MarkingBarrier(HeapObjectPtr* object, ObjectSlot slot, Object* value);
+void MarkingBarrier(HeapObjectPtr* object, MaybeObjectSlot slot,
+ MaybeObject value);
void MarkingBarrierForElements(Heap* heap, HeapObject* object);
-void MarkingBarrierForCode(Code* host, RelocInfo* rinfo, HeapObject* object);
+void MarkingBarrierForCode(Code host, RelocInfo* rinfo, HeapObject* object);
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/heap.cc b/chromium/v8/src/heap/heap.cc
index b509d211425..98a3ec8cbcf 100644
--- a/chromium/v8/src/heap/heap.cc
+++ b/chromium/v8/src/heap/heap.cc
@@ -10,7 +10,6 @@
#include "src/accessors.h"
#include "src/api-inl.h"
#include "src/assembler-inl.h"
-#include "src/ast/context-slot-cache.h"
#include "src/base/bits.h"
#include "src/base/once.h"
#include "src/base/utils/random-number-generator.h"
@@ -46,14 +45,16 @@
#include "src/heap/stress-marking-observer.h"
#include "src/heap/stress-scavenge-observer.h"
#include "src/heap/sweeper.h"
-#include "src/instruction-stream.h"
#include "src/interpreter/interpreter.h"
+#include "src/microtask-queue.h"
#include "src/objects/data-handler.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/slots-inl.h"
#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
+#include "src/snapshot/embedded-data.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
@@ -91,12 +92,12 @@ void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
}
-void Heap::SetSerializedObjects(FixedArray* objects) {
+void Heap::SetSerializedObjects(FixedArray objects) {
DCHECK(isolate()->serializer_enabled());
set_serialized_objects(objects);
}
-void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
+void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) {
DCHECK(isolate()->serializer_enabled());
set_serialized_global_proxy_sizes(sizes);
}
@@ -110,8 +111,8 @@ Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
const Heap::GCCallbackTuple& other) = default;
struct Heap::StrongRootsList {
- Object** start;
- Object** end;
+ ObjectSlot start;
+ ObjectSlot end;
StrongRootsList* next;
};
@@ -129,7 +130,8 @@ class IdleScavengeObserver : public AllocationObserver {
};
Heap::Heap()
- : initial_max_old_generation_size_(max_old_generation_size_),
+ : isolate_(isolate()),
+ initial_max_old_generation_size_(max_old_generation_size_),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
memory_pressure_level_(MemoryPressureLevel::kNone),
@@ -148,9 +150,8 @@ Heap::Heap()
}
size_t Heap::MaxReserved() {
- const double kFactor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
- return static_cast<size_t>(
- (2 * max_semi_space_size_ + max_old_generation_size_) * kFactor);
+ return static_cast<size_t>(2 * max_semi_space_size_ +
+ max_old_generation_size_);
}
size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
@@ -176,7 +177,7 @@ size_t Heap::OldGenerationCapacity() {
space = spaces.next()) {
total += space->Capacity();
}
- return total + lo_space_->SizeOfObjects();
+ return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
}
size_t Heap::CommittedOldGenerationMemory() {
@@ -188,14 +189,13 @@ size_t Heap::CommittedOldGenerationMemory() {
space = spaces.next()) {
total += space->CommittedMemory();
}
- return total + lo_space_->Size();
+ return total + lo_space_->Size() + code_lo_space_->Size();
}
-size_t Heap::CommittedMemoryOfHeapAndUnmapper() {
+size_t Heap::CommittedMemoryOfUnmapper() {
if (!HasBeenSetUp()) return 0;
- return CommittedMemory() +
- memory_allocator()->unmapper()->CommittedBufferedMemory();
+ return memory_allocator()->unmapper()->CommittedBufferedMemory();
}
size_t Heap::CommittedMemory() {
@@ -240,6 +240,8 @@ size_t Heap::Available() {
for (SpaceIterator it(this); it.has_next();) {
total += it.next()->Available();
}
+
+ total += memory_allocator()->Available();
return total;
}
@@ -363,6 +365,15 @@ void Heap::PrintShortHeapStatistics() {
lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
lo_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
+ "Code large object space, used: %6" PRIuS
+ " KB"
+ ", available: %6" PRIuS
+ " KB"
+ ", committed: %6" PRIuS " KB\n",
+ lo_space_->SizeOfObjects() / KB,
+ code_lo_space_->Available() / KB,
+ code_lo_space_->CommittedMemory() / KB);
+ PrintIsolate(isolate_,
"All spaces, used: %6" PRIuS
" KB"
", available: %6" PRIuS
@@ -371,11 +382,11 @@ void Heap::PrintShortHeapStatistics() {
this->SizeOfObjects() / KB, this->Available() / KB,
this->CommittedMemory() / KB);
PrintIsolate(isolate_,
- "Unmapper buffering %d chunks of committed: %6" PRIuS " KB\n",
- memory_allocator()->unmapper()->NumberOfChunks(),
- CommittedMemoryOfHeapAndUnmapper() / KB);
+ "Unmapper buffering %zu chunks of committed: %6" PRIuS " KB\n",
+ memory_allocator()->unmapper()->NumberOfCommittedChunks(),
+ CommittedMemoryOfUnmapper() / KB);
PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
- external_memory_ / KB);
+ isolate()->isolate_data()->external_memory_ / KB);
PrintIsolate(isolate_, "Backing store memory: %6" PRIuS " KB\n",
backing_store_bytes_ / KB);
PrintIsolate(isolate_, "External memory global %zu KB\n",
@@ -429,9 +440,9 @@ bool Heap::IsRetainingPathTarget(HeapObject* object,
RetainingPathOption* option) {
WeakArrayList* targets = retaining_path_targets();
int length = targets->length();
- MaybeObject* object_to_check = HeapObjectReference::Weak(object);
+ MaybeObject object_to_check = HeapObjectReference::Weak(object);
for (int i = 0; i < length; i++) {
- MaybeObject* target = targets->Get(i);
+ MaybeObject target = targets->Get(i);
DCHECK(target->IsWeakOrCleared());
if (target == object_to_check) {
DCHECK(retaining_path_target_option_.count(i));
@@ -597,6 +608,8 @@ const char* Heap::GetSpaceName(int idx) {
return "large_object_space";
case NEW_LO_SPACE:
return "new_large_object_space";
+ case CODE_LO_SPACE:
+ return "code_large_object_space";
case RO_SPACE:
return "read_only_space";
default:
@@ -605,18 +618,6 @@ const char* Heap::GetSpaceName(int idx) {
return nullptr;
}
-void Heap::SetRootCodeStubs(SimpleNumberDictionary* value) {
- roots_[RootIndex::kCodeStubs] = value;
-}
-
-void Heap::RepairFreeListsAfterDeserialization() {
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
- space->RepairFreeListsAfterDeserialization();
- }
-}
-
void Heap::MergeAllocationSitePretenuringFeedback(
const PretenuringFeedbackMap& local_pretenuring_feedback) {
AllocationSite* site = nullptr;
@@ -827,14 +828,8 @@ void Heap::ProcessPretenuringFeedback() {
}
}
-void Heap::InvalidateCodeEmbeddedObjects(Code* code) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
- CodePageMemoryModificationScope modification_scope(chunk);
- code->InvalidateEmbeddedObjects(this);
-}
-
-void Heap::InvalidateCodeDeoptimizationData(Code* code) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
+void Heap::InvalidateCodeDeoptimizationData(Code code) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(code->ptr());
CodePageMemoryModificationScope modification_scope(chunk);
code->set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
}
@@ -941,6 +936,38 @@ void Heap::GarbageCollectionEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
ReduceNewSpaceSize();
}
+
+ if (FLAG_harmony_weak_refs) {
+ // TODO(marja): (spec): The exact condition on when to schedule the cleanup
+ // task is unclear. This version schedules the cleanup task for a factory
+ // whenever the GC has discovered new dirty WeakCells for it (at that point
+ // it might have leftover dirty WeakCells since an earlier invocation of the
+ // cleanup function didn't iterate through them). See
+ // https://github.com/tc39/proposal-weakrefs/issues/34
+ HandleScope handle_scope(isolate());
+ while (
+ !isolate()->heap()->dirty_js_weak_factories()->IsUndefined(isolate())) {
+ // Enqueue one microtask per JSWeakFactory.
+ Handle<JSWeakFactory> weak_factory(
+ JSWeakFactory::cast(isolate()->heap()->dirty_js_weak_factories()),
+ isolate());
+ isolate()->heap()->set_dirty_js_weak_factories(weak_factory->next());
+ weak_factory->set_next(ReadOnlyRoots(isolate()).undefined_value());
+ Handle<Context> context(weak_factory->native_context(), isolate());
+ // GC has no native context, but we use the creation context of the
+ // JSWeakFactory for the EnqueueTask operation. This is consitent with the
+ // Promise implementation, assuming the JSFactory creation context is the
+ // "caller's context" in promise functions. An alternative would be to use
+ // the native context of the cleanup function. This difference shouldn't
+ // be observable from JavaScript, since we enter the native context of the
+ // cleanup function before calling it. TODO(marja): Revisit when the spec
+ // clarifies this. See also
+ // https://github.com/tc39/proposal-weakrefs/issues/38 .
+ Handle<WeakFactoryCleanupJobTask> task =
+ isolate()->factory()->NewWeakFactoryCleanupJobTask(weak_factory);
+ isolate()->EnqueueMicrotask(task);
+ }
+ }
}
class GCCallbacksScope {
@@ -982,10 +1009,12 @@ void Heap::HandleGCRequest() {
void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
+ DCHECK(FLAG_idle_time_scavenge);
+ DCHECK_NOT_NULL(scavenge_job_);
scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
}
-HistogramTimer* Heap::GCTypePriorityTimer(GarbageCollector collector) {
+TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) {
if (IsYoungGenerationCollector(collector)) {
if (isolate_->IsIsolateInBackground()) {
return isolate_->counters()->gc_scavenger_background();
@@ -1013,7 +1042,7 @@ HistogramTimer* Heap::GCTypePriorityTimer(GarbageCollector collector) {
}
}
-HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
+TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) {
if (IsYoungGenerationCollector(collector)) {
return isolate_->counters()->gc_scavenger();
} else {
@@ -1177,8 +1206,9 @@ void Heap::ReportExternalMemoryPressure() {
static_cast<GCCallbackFlags>(
kGCCallbackFlagSynchronousPhantomCallbackProcessing |
kGCCallbackFlagCollectAllExternalMemory);
- if (external_memory_ >
- (external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
+ if (isolate()->isolate_data()->external_memory_ >
+ (isolate()->isolate_data()->external_memory_at_last_mark_compact_ +
+ external_memory_hard_limit())) {
CollectAllGarbage(
kReduceMemoryFootprintMask,
GarbageCollectionReason::kExternalMemoryPressure,
@@ -1200,10 +1230,12 @@ void Heap::ReportExternalMemoryPressure() {
// Incremental marking is turned on an has already been started.
const double kMinStepSize = 5;
const double kMaxStepSize = 10;
- const double ms_step =
- Min(kMaxStepSize,
- Max(kMinStepSize, static_cast<double>(external_memory_) /
- external_memory_limit_ * kMinStepSize));
+ const double ms_step = Min(
+ kMaxStepSize,
+ Max(kMinStepSize,
+ static_cast<double>(isolate()->isolate_data()->external_memory_) /
+ isolate()->isolate_data()->external_memory_limit_ *
+ kMinStepSize));
const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
// Extend the gc callback flags with external memory flags.
current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
@@ -1276,17 +1308,17 @@ bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollectionPrologue();
{
- HistogramTimer* gc_type_timer = GCTypeTimer(collector);
- HistogramTimerScope histogram_timer_scope(gc_type_timer);
+ TimedHistogram* gc_type_timer = GCTypeTimer(collector);
+ TimedHistogramScope histogram_timer_scope(gc_type_timer, isolate_);
TRACE_EVENT0("v8", gc_type_timer->name());
- HistogramTimer* gc_type_priority_timer = GCTypePriorityTimer(collector);
- OptionalHistogramTimerScopeMode mode =
+ TimedHistogram* gc_type_priority_timer = GCTypePriorityTimer(collector);
+ OptionalTimedHistogramScopeMode mode =
isolate_->IsMemorySavingsModeActive()
- ? OptionalHistogramTimerScopeMode::DONT_TAKE_TIME
- : OptionalHistogramTimerScopeMode::TAKE_TIME;
- OptionalHistogramTimerScope histogram_timer_priority_scope(
- gc_type_priority_timer, mode);
+ ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME
+ : OptionalTimedHistogramScopeMode::TAKE_TIME;
+ OptionalTimedHistogramScope histogram_timer_priority_scope(
+ gc_type_priority_timer, isolate_, mode);
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, gc_callback_flags);
@@ -1388,42 +1420,47 @@ void Heap::StartIdleIncrementalMarking(
gc_callback_flags);
}
-void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
- int len, WriteBarrierMode mode) {
+void Heap::MoveElements(FixedArray array, int dst_index, int src_index, int len,
+ WriteBarrierMode mode) {
if (len == 0) return;
DCHECK(array->map() != ReadOnlyRoots(this).fixed_cow_array_map());
- Object** dst = array->data_start() + dst_index;
- Object** src = array->data_start() + src_index;
+ ObjectSlot dst = array->RawFieldOfElementAt(dst_index);
+ ObjectSlot src = array->RawFieldOfElementAt(src_index);
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
if (dst < src) {
for (int i = 0; i < len; i++) {
- base::AsAtomicPointer::Relaxed_Store(
- dst + i, base::AsAtomicPointer::Relaxed_Load(src + i));
+ dst.Relaxed_Store(src.Relaxed_Load());
+ ++dst;
+ ++src;
}
} else {
- for (int i = len - 1; i >= 0; i--) {
- base::AsAtomicPointer::Relaxed_Store(
- dst + i, base::AsAtomicPointer::Relaxed_Load(src + i));
+ // Copy backwards.
+ dst += len - 1;
+ src += len - 1;
+ for (int i = 0; i < len; i++) {
+ dst.Relaxed_Store(src.Relaxed_Load());
+ --dst;
+ --src;
}
}
} else {
- MemMove(dst, src, len * kPointerSize);
+ MemMove(dst.ToVoidPtr(), src.ToVoidPtr(), len * kPointerSize);
}
if (mode == SKIP_WRITE_BARRIER) return;
FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
}
-
#ifdef VERIFY_HEAP
// Helper class for verifying the string table.
class StringTableVerifier : public ObjectVisitor {
public:
explicit StringTableVerifier(Isolate* isolate) : isolate_(isolate) {}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) {
+ for (ObjectSlot p = start; p < end; ++p) {
DCHECK(!HasWeakHeapObjectTag(*p));
if ((*p)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*p);
@@ -1433,8 +1470,8 @@ class StringTableVerifier : public ObjectVisitor {
}
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
+ void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
UNREACHABLE();
}
@@ -1499,7 +1536,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
AllocationResult allocation;
int size = chunk.size;
DCHECK_LE(static_cast<size_t>(size),
- MemoryAllocator::PageAreaSize(
+ MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
static_cast<AllocationSpace>(space)));
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
@@ -1604,6 +1641,11 @@ bool Heap::PerformGarbageCollection(
{
GCCallbacksScope scope(this);
+ // Temporary override any embedder stack state as callbacks may create their
+ // own state on the stack and recursively trigger GC.
+ EmbedderStackStateScope embedder_scope(
+ local_embedder_heap_tracer(),
+ EmbedderHeapTracer::EmbedderStackState::kUnknown);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
@@ -1670,15 +1712,35 @@ bool Heap::PerformGarbageCollection(
isolate_->counters()->objs_since_last_young()->Set(0);
- gc_post_processing_depth_++;
{
- AllowHeapAllocation allow_allocation;
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
+ // First round weak callbacks are not supposed to allocate and trigger
+ // nested GCs.
freed_global_handles =
- isolate_->global_handles()->PostGarbageCollectionProcessing(
- collector, gc_callback_flags);
+ isolate_->global_handles()->InvokeFirstPassWeakCallbacks();
+ }
+
+ if (collector == MARK_COMPACTOR) {
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE);
+ // TraceEpilogue may trigger operations that invalidate global handles. It
+ // has to be called *after* all other operations that potentially touch and
+ // reset global handles. It is also still part of the main garbage
+ // collection pause and thus needs to be called *before* any operation that
+ // can potentially trigger recursive garbage
+ local_embedder_heap_tracer()->TraceEpilogue();
+ }
+
+ {
+ TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
+ gc_post_processing_depth_++;
+ {
+ AllowHeapAllocation allow_allocation;
+ freed_global_handles +=
+ isolate_->global_handles()->PostGarbageCollectionProcessing(
+ collector, gc_callback_flags);
+ }
+ gc_post_processing_depth_--;
}
- gc_post_processing_depth_--;
isolate_->eternal_handles()->PostGarbageCollectionProcessing();
@@ -1691,8 +1753,11 @@ bool Heap::PerformGarbageCollection(
size_t old_gen_size = OldGenerationSizeOfObjects();
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
- external_memory_at_last_mark_compact_ = external_memory_;
- external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
+ isolate()->isolate_data()->external_memory_at_last_mark_compact_ =
+ isolate()->isolate_data()->external_memory_;
+ isolate()->isolate_data()->external_memory_limit_ =
+ isolate()->isolate_data()->external_memory_ +
+ kExternalAllocationSoftLimit;
double max_factor =
heap_controller()->MaxGrowingFactor(max_old_generation_size_);
@@ -1824,7 +1889,6 @@ void Heap::MarkCompactEpilogue() {
void Heap::MarkCompactPrologue() {
TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
- isolate_->context_slot_cache()->Clear();
isolate_->descriptor_lookup_cache()->Clear();
RegExpResultsCache::Clear(string_split_cache());
RegExpResultsCache::Clear(regexp_multiple_cache());
@@ -1855,7 +1919,7 @@ void Heap::CheckNewSpaceExpansionCriteria() {
void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
- base::LockGuard<base::Mutex> guard(relocation_mutex());
+ base::MutexGuard guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_);
@@ -1898,7 +1962,7 @@ void Heap::EvacuateYoungGeneration() {
void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
- base::LockGuard<base::Mutex> guard(relocation_mutex());
+ base::MutexGuard guard(relocation_mutex());
ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
@@ -1951,7 +2015,7 @@ void Heap::ComputeFastPromotionMode() {
void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
if (unprotected_memory_chunks_registry_enabled_) {
- base::LockGuard<base::Mutex> guard(&unprotected_memory_chunks_mutex_);
+ base::MutexGuard guard(&unprotected_memory_chunks_mutex_);
if (unprotected_memory_chunks_.insert(chunk).second) {
chunk->SetReadAndWritable();
}
@@ -1976,37 +2040,37 @@ void Heap::ProtectUnprotectedMemoryChunks() {
unprotected_memory_chunks_.clear();
}
-bool Heap::ExternalStringTable::Contains(HeapObject* obj) {
+bool Heap::ExternalStringTable::Contains(String string) {
for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- if (new_space_strings_[i] == obj) return true;
+ if (new_space_strings_[i] == string) return true;
}
for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- if (old_space_strings_[i] == obj) return true;
+ if (old_space_strings_[i] == string) return true;
}
return false;
}
-String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
+String Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
+ ObjectSlot p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
- String* string = String::cast(*p);
+ String string = String::cast(*p);
if (!string->IsExternalString()) {
// Original external string has been internalized.
DCHECK(string->IsThinString());
- return nullptr;
+ return String();
}
heap->FinalizeExternalString(string);
- return nullptr;
+ return String();
}
// String is still reachable.
- String* new_string = String::cast(first_word.ToForwardingAddress());
+ String new_string = String::cast(first_word.ToForwardingAddress());
if (new_string->IsThinString()) {
// Filtering Thin strings out of the external string table.
- return nullptr;
+ return String();
} else if (new_string->IsExternalString()) {
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString,
@@ -2017,16 +2081,16 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
}
// Internalization can replace external strings with non-external strings.
- return new_string->IsExternalString() ? new_string : nullptr;
+ return new_string->IsExternalString() ? new_string : String();
}
void Heap::ExternalStringTable::VerifyNewSpace() {
#ifdef DEBUG
- std::set<String*> visited_map;
+ std::set<String> visited_map;
std::map<MemoryChunk*, size_t> size_map;
ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
for (size_t i = 0; i < new_space_strings_.size(); ++i) {
- String* obj = String::cast(new_space_strings_[i]);
+ String obj = String::cast(new_space_strings_[i]);
MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
DCHECK(mc->InNewSpace());
DCHECK(heap_->InNewSpace(obj));
@@ -2045,12 +2109,12 @@ void Heap::ExternalStringTable::VerifyNewSpace() {
void Heap::ExternalStringTable::Verify() {
#ifdef DEBUG
- std::set<String*> visited_map;
+ std::set<String> visited_map;
std::map<MemoryChunk*, size_t> size_map;
ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
VerifyNewSpace();
for (size_t i = 0; i < old_space_strings_.size(); ++i) {
- String* obj = String::cast(old_space_strings_[i]);
+ String obj = String::cast(old_space_strings_[i]);
MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
DCHECK(!mc->InNewSpace());
DCHECK(!heap_->InNewSpace(obj));
@@ -2071,20 +2135,20 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
Heap::ExternalStringTableUpdaterCallback updater_func) {
if (new_space_strings_.empty()) return;
- Object** start = new_space_strings_.data();
- Object** end = start + new_space_strings_.size();
- Object** last = start;
+ ObjectSlot start(new_space_strings_.data());
+ ObjectSlot end(new_space_strings_.data() + new_space_strings_.size());
+ ObjectSlot last = start;
- for (Object** p = start; p < end; ++p) {
- String* target = updater_func(heap_, p);
+ for (ObjectSlot p = start; p < end; ++p) {
+ String target = updater_func(heap_, p);
- if (target == nullptr) continue;
+ if (target.is_null()) continue;
DCHECK(target->IsExternalString());
if (InNewSpace(target)) {
// String is still in new space. Update the table entry.
- *last = target;
+ last.store(target);
++last;
} else {
// String got promoted. Move it to the old string list.
@@ -2092,8 +2156,8 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
}
}
- DCHECK_LE(last, end);
- new_space_strings_.resize(static_cast<size_t>(last - start));
+ DCHECK(last <= end);
+ new_space_strings_.resize(last - start);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyNewSpace();
@@ -2111,18 +2175,20 @@ void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
if (!new_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
- new_space_strings_.data(),
- new_space_strings_.data() + new_space_strings_.size());
+ v->VisitRootPointers(
+ Root::kExternalStringsTable, nullptr,
+ ObjectSlot(new_space_strings_.data()),
+ ObjectSlot(new_space_strings_.data() + new_space_strings_.size()));
}
}
void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
IterateNewSpaceStrings(v);
if (!old_space_strings_.empty()) {
- v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
- old_space_strings_.data(),
- old_space_strings_.data() + old_space_strings_.size());
+ v->VisitRootPointers(
+ Root::kExternalStringsTable, nullptr,
+ ObjectSlot(old_space_strings_.data()),
+ ObjectSlot(old_space_strings_.data() + old_space_strings_.size()));
}
}
@@ -2134,9 +2200,9 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
void Heap::ExternalStringTable::UpdateReferences(
Heap::ExternalStringTableUpdaterCallback updater_func) {
if (old_space_strings_.size() > 0) {
- Object** start = old_space_strings_.data();
- Object** end = start + old_space_strings_.size();
- for (Object** p = start; p < end; ++p) *p = updater_func(heap_, p);
+ ObjectSlot start(old_space_strings_.data());
+ ObjectSlot end(old_space_strings_.data() + old_space_strings_.size());
+ for (ObjectSlot p = start; p < end; ++p) p.store(updater_func(heap_, p));
}
UpdateNewSpaceReferences(updater_func);
@@ -2160,7 +2226,8 @@ void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
- Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
+ Object* head =
+ VisitWeakList2<Context>(this, native_contexts_list(), retainer);
// Update the head of the list of contexts.
set_native_contexts_list(head);
}
@@ -2244,9 +2311,9 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
explicit ExternalStringTableVisitorAdapter(
Isolate* isolate, v8::ExternalResourceVisitor* visitor)
: isolate_(isolate), visitor_(visitor) {}
- void VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) {
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot p = start; p < end; ++p) {
DCHECK((*p)->IsExternalString());
visitor_->VisitExternalString(
Utils::ToLocal(Handle<String>(String::cast(*p), isolate_)));
@@ -2361,13 +2428,6 @@ void Heap::CreateFixedStubs() {
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate());
-
- // Create stubs that should be there, so we don't unexpectedly have to
- // create them if we need them during the creation of another stub.
- // Stub creation mixes raw pointers and handles in an unsafe manner so
- // we cannot create stubs while we are creating stubs.
- CodeStub::GenerateStubsAheadOfTime(isolate());
-
// gcc-4.4 has problem generating correct code of following snippet:
// { JSEntryStub stub;
// js_entry_code_ = *stub.GetCode();
@@ -2381,44 +2441,6 @@ void Heap::CreateFixedStubs() {
Heap::CreateJSRunMicrotasksEntryStub();
}
-bool Heap::RootCanBeWrittenAfterInitialization(RootIndex root_index) {
- switch (root_index) {
- case RootIndex::kNumberStringCache:
- case RootIndex::kCodeStubs:
- case RootIndex::kScriptList:
- case RootIndex::kMaterializedObjects:
- case RootIndex::kDetachedContexts:
- case RootIndex::kRetainedMaps:
- case RootIndex::kRetainingPathTargets:
- case RootIndex::kFeedbackVectorsForProfilingTools:
- case RootIndex::kNoScriptSharedFunctionInfos:
- case RootIndex::kSerializedObjects:
- case RootIndex::kSerializedGlobalProxySizes:
- case RootIndex::kPublicSymbolTable:
- case RootIndex::kApiSymbolTable:
- case RootIndex::kApiPrivateSymbolTable:
- case RootIndex::kMessageListeners:
-// Smi values
-#define SMI_ENTRY(type, name, Name) case RootIndex::k##Name:
- SMI_ROOT_LIST(SMI_ENTRY)
-#undef SMI_ENTRY
- // String table
- case RootIndex::kStringTable:
- return true;
-
- default:
- return false;
- }
-}
-
-bool Heap::RootCanBeTreatedAsConstant(RootIndex root_index) {
- bool can_be = !RootCanBeWrittenAfterInitialization(root_index) &&
- !InNewSpace(root(root_index));
- DCHECK_IMPLIES(can_be, IsImmovable(HeapObject::cast(root(root_index))));
- return can_be;
-}
-
-
void Heap::FlushNumberStringCache() {
// Flush the number to string cache.
int len = number_string_cache()->length();
@@ -2434,11 +2456,11 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
filler->set_map_after_allocation(
- reinterpret_cast<Map*>(root(RootIndex::kOnePointerFillerMap)),
+ Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
SKIP_WRITE_BARRIER);
} else if (size == 2 * kPointerSize) {
filler->set_map_after_allocation(
- reinterpret_cast<Map*>(root(RootIndex::kTwoPointerFillerMap)),
+ Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
SKIP_WRITE_BARRIER);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
Memory<Address>(addr + kPointerSize) =
@@ -2447,7 +2469,7 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
} else {
DCHECK_GT(size, 2 * kPointerSize);
filler->set_map_after_allocation(
- reinterpret_cast<Map*>(root(RootIndex::kFreeSpaceMap)),
+ Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
SKIP_WRITE_BARRIER);
FreeSpace::cast(filler)->relaxed_write_size(size);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
@@ -2461,7 +2483,7 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are nullptr.
- DCHECK((filler->map() == nullptr && !deserialization_complete_) ||
+ DCHECK((filler->map().is_null() && !deserialization_complete_) ||
filler->map()->IsMap());
return filler;
}
@@ -2475,7 +2497,7 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
Address address = object->address();
- if (lo_space()->Contains(object)) return false;
+ if (IsLargeObject(object)) return false;
// We can move the object start if the page was already swept.
return Page::FromAddress(address)->SweepingDone();
@@ -2483,7 +2505,19 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
bool Heap::IsImmovable(HeapObject* object) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- return chunk->NeverEvacuate() || chunk->owner()->identity() == LO_SPACE;
+ return chunk->NeverEvacuate() || IsLargeObject(object);
+}
+
+bool Heap::IsLargeObject(HeapObject* object) {
+ return lo_space()->Contains(object) || code_lo_space()->Contains(object) ||
+ new_lo_space()->Contains(object);
+}
+
+bool Heap::IsInYoungGeneration(HeapObject* object) {
+ if (MemoryChunk::FromHeapObject(object)->IsInNewLargeObjectSpace()) {
+ return !object->map_word().IsForwardingAddress();
+ }
+ return Heap::InNewSpace(object);
}
#ifdef ENABLE_SLOW_DCHECKS
@@ -2491,18 +2525,18 @@ namespace {
class LeftTrimmerVerifierRootVisitor : public RootVisitor {
public:
- explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase* to_check)
+ explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase to_check)
: to_check_(to_check) {}
- void VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) override {
- for (Object** p = start; p < end; ++p) {
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot p = start; p < end; ++p) {
DCHECK_NE(*p, to_check_);
}
}
private:
- FixedArrayBase* to_check_;
+ FixedArrayBase to_check_;
DISALLOW_COPY_AND_ASSIGN(LeftTrimmerVerifierRootVisitor);
};
@@ -2520,25 +2554,25 @@ bool MayContainRecordedSlots(HeapObject* object) {
}
} // namespace
-FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
- int elements_to_trim) {
+FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
+ int elements_to_trim) {
if (elements_to_trim == 0) {
// This simplifies reasoning in the rest of the function.
return object;
}
- CHECK_NOT_NULL(object);
+ CHECK(!object.is_null());
DCHECK(CanMoveObjectStart(object));
// Add custom visitor to concurrent marker if new left-trimmable type
// is added.
DCHECK(object->IsFixedArray() || object->IsFixedDoubleArray());
const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
const int bytes_to_trim = elements_to_trim * element_size;
- Map* map = object->map();
+ Map map = object->map();
- // For now this trick is only applied to objects in new and paged space.
- // In large object space the object's start must coincide with chunk
- // and thus the trick is just not applicable.
- DCHECK(!lo_space()->Contains(object));
+ // For now this trick is only applied to fixed arrays which may be in new
+ // space or old space. In a large object space the object's start must
+ // coincide with chunk and thus the trick is just not applicable.
+ DCHECK(!IsLargeObject(object));
DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
@@ -2570,7 +2604,7 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
RELAXED_WRITE_FIELD(object, bytes_to_trim + kPointerSize,
Smi::FromInt(len - elements_to_trim));
- FixedArrayBase* new_object =
+ FixedArrayBase new_object =
FixedArrayBase::cast(HeapObject::FromAddress(new_start));
// Remove recorded slots for the new map and length offset.
@@ -2603,6 +2637,7 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Make sure the stack or other roots (e.g., Handles) don't contain pointers
// to the original FixedArray (which is now the filler object).
LeftTrimmerVerifierRootVisitor root_visitor(object);
+ ReadOnlyRoots(this).Iterate(&root_visitor);
IterateRoots(&root_visitor, VISIT_ALL);
}
#endif // ENABLE_SLOW_DCHECKS
@@ -2610,7 +2645,7 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
return new_object;
}
-void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
+void Heap::RightTrimFixedArray(FixedArrayBase object, int elements_to_trim) {
const int len = object->length();
DCHECK_LE(elements_to_trim, len);
DCHECK_GE(elements_to_trim, 0);
@@ -2639,12 +2674,12 @@ void Heap::RightTrimWeakFixedArray(WeakFixedArray* object,
// collection: When marking, we record the weak slots, and shrinking
// invalidates them.
DCHECK_EQ(gc_state(), MARK_COMPACT);
- CreateFillerForArray<WeakFixedArray>(object, elements_to_trim,
- elements_to_trim * kPointerSize);
+ CreateFillerForArray<WeakFixedArray*>(object, elements_to_trim,
+ elements_to_trim * kPointerSize);
}
template <typename T>
-void Heap::CreateFillerForArray(T* object, int elements_to_trim,
+void Heap::CreateFillerForArray(T object, int elements_to_trim,
int bytes_to_trim) {
DCHECK(object->IsFixedArrayBase() || object->IsByteArray() ||
object->IsWeakFixedArray());
@@ -2679,10 +2714,8 @@ void Heap::CreateFillerForArray(T* object, int elements_to_trim,
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- // We do not create a filler for objects in large object space.
- // TODO(hpayer): We should shrink the large object page if the size
- // of the object changed significantly.
- if (!lo_space()->Contains(object)) {
+ // We do not create a filler for objects in a large object space.
+ if (!IsLargeObject(object)) {
HeapObject* filler =
CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
DCHECK_NOT_NULL(filler);
@@ -2931,7 +2964,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// object space for side effects.
IncrementalMarking::MarkingState* marking_state =
incremental_marking()->marking_state();
- for (int i = OLD_SPACE; i < Serializer<>::kNumberOfSpaces; i++) {
+ for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
@@ -2946,9 +2979,6 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
}
}
}
- // We potentially deserialized wrappers which require registering with the
- // embedder as the marker will not find them.
- local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
// Large object space doesn't use reservations, so it needs custom handling.
for (HeapObject* object : large_objects) {
@@ -2984,26 +3014,26 @@ void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
// Helper class for collecting slot addresses.
class SlotCollectingVisitor final : public ObjectVisitor {
public:
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
+ void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override {
+ VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
- for (MaybeObject** p = start; p < end; p++) {
+ void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ for (MaybeObjectSlot p = start; p < end; ++p) {
slots_.push_back(p);
}
}
int number_of_slots() { return static_cast<int>(slots_.size()); }
- MaybeObject** slot(int i) { return slots_[i]; }
+ MaybeObjectSlot slot(int i) { return slots_[i]; }
private:
- std::vector<MaybeObject**> slots_;
+ std::vector<MaybeObjectSlot> slots_;
};
-void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
+void Heap::VerifyObjectLayoutChange(HeapObject* object, Map new_map) {
if (!FLAG_verify_heap) return;
// Check that Heap::NotifyObjectLayout was called for object transitions
@@ -3026,7 +3056,7 @@ void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
object->set_map_word(old_map_word);
DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
for (int i = 0; i < new_visitor.number_of_slots(); i++) {
- DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
+ DCHECK(new_visitor.slot(i) == old_visitor.slot(i));
}
}
} else {
@@ -3211,8 +3241,8 @@ void Heap::CollectGarbageOnMemoryPressure() {
double end = MonotonicallyIncreasingTimeInMs();
// Estimate how much memory we can free.
- int64_t potential_garbage =
- (CommittedMemory() - SizeOfObjects()) + external_memory_;
+ int64_t potential_garbage = (CommittedMemory() - SizeOfObjects()) +
+ isolate()->isolate_data()->external_memory_;
// If we can potentially free large amount of memory, then start GC right
// away instead of waiting for memory reducer.
if (potential_garbage >= kGarbageThresholdInBytes &&
@@ -3257,7 +3287,7 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
void Heap::EagerlyFreeExternalMemory() {
for (Page* page : *old_space()) {
if (!page->SweepingDone()) {
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::MutexGuard guard(page->mutex());
if (!page->SweepingDone()) {
ArrayBufferTracker::FreeDead(
page, mark_compact_collector()->non_atomic_marking_state());
@@ -3314,7 +3344,7 @@ void Heap::CollectCodeStatistics() {
// somehow ends up in those spaces, we would miss it here.
CodeStatistics::CollectCodeStatistics(code_space_, isolate());
CodeStatistics::CollectCodeStatistics(old_space_, isolate());
- CodeStatistics::CollectCodeStatistics(lo_space_, isolate());
+ CodeStatistics::CollectCodeStatistics(code_lo_space_, isolate());
}
#ifdef DEBUG
@@ -3397,18 +3427,8 @@ bool Heap::Contains(HeapObject* value) {
return HasBeenSetUp() &&
(new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
code_space_->Contains(value) || map_space_->Contains(value) ||
- lo_space_->Contains(value) || read_only_space_->Contains(value));
-}
-
-bool Heap::ContainsSlow(Address addr) {
- if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
- return false;
- }
- return HasBeenSetUp() &&
- (new_space_->ToSpaceContainsSlow(addr) ||
- old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
- map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr) ||
- read_only_space_->Contains(addr));
+ lo_space_->Contains(value) || read_only_space_->Contains(value) ||
+ code_lo_space_->Contains(value) || new_lo_space_->Contains(value));
}
bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
@@ -3428,6 +3448,8 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
return map_space_->Contains(value);
case LO_SPACE:
return lo_space_->Contains(value);
+ case CODE_LO_SPACE:
+ return code_lo_space_->Contains(value);
case NEW_LO_SPACE:
return new_lo_space_->Contains(value);
case RO_SPACE:
@@ -3453,6 +3475,8 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
return map_space_->ContainsSlow(addr);
case LO_SPACE:
return lo_space_->ContainsSlow(addr);
+ case CODE_LO_SPACE:
+ return code_lo_space_->ContainsSlow(addr);
case NEW_LO_SPACE:
return new_lo_space_->ContainsSlow(addr);
case RO_SPACE:
@@ -3469,6 +3493,7 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
case MAP_SPACE:
case LO_SPACE:
case NEW_LO_SPACE:
+ case CODE_LO_SPACE:
case RO_SPACE:
return true;
default:
@@ -3476,23 +3501,6 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
}
}
-bool Heap::RootIsImmortalImmovable(RootIndex root_index) {
- switch (root_index) {
-#define IMMORTAL_IMMOVABLE_ROOT(name) case RootIndex::k##name:
- IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
-#undef IMMORTAL_IMMOVABLE_ROOT
-#define INTERNALIZED_STRING(_, name, value) case RootIndex::k##name:
- INTERNALIZED_STRING_LIST_GENERATOR(INTERNALIZED_STRING, /* not used */)
-#undef INTERNALIZED_STRING
-#define STRING_TYPE(NAME, size, name, Name) case RootIndex::k##Name##Map:
- STRING_TYPE_LIST(STRING_TYPE)
-#undef STRING_TYPE
- return true;
- default:
- return false;
- }
-}
-
#ifdef VERIFY_HEAP
class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
public:
@@ -3500,14 +3508,14 @@ class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
: VerifyPointersVisitor(heap) {}
protected:
- void VerifyPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
+ void VerifyPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
if (host != nullptr) {
CHECK(heap_->InReadOnlySpace(host->map()));
}
VerifyPointersVisitor::VerifyPointers(host, start, end);
- for (MaybeObject** current = start; current < end; current++) {
+ for (MaybeObjectSlot current = start; current < end; ++current) {
HeapObject* object;
if ((*current)->GetHeapObject(&object)) {
CHECK(heap_->InReadOnlySpace(object));
@@ -3538,6 +3546,8 @@ void Heap::Verify() {
code_space_->Verify(isolate(), &no_dirty_regions_visitor);
lo_space_->Verify(isolate());
+ code_lo_space_->Verify(isolate());
+ new_lo_space_->Verify(isolate());
VerifyReadOnlyPointersVisitor read_only_visitor(this);
read_only_space_->Verify(isolate(), &read_only_visitor);
@@ -3549,29 +3559,33 @@ class SlotVerifyingVisitor : public ObjectVisitor {
std::set<std::pair<SlotType, Address> >* typed)
: untyped_(untyped), typed_(typed) {}
- virtual bool ShouldHaveBeenRecorded(HeapObject* host,
- MaybeObject* target) = 0;
+ virtual bool ShouldHaveBeenRecorded(HeapObject* host, MaybeObject target) = 0;
+ // TODO(3770): Drop this after the migration.
+ bool ShouldHaveBeenRecorded(Code host, MaybeObject target) {
+ return ShouldHaveBeenRecorded(reinterpret_cast<HeapObject*>(host.ptr()),
+ target);
+ }
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override {
#ifdef DEBUG
- for (Object** slot = start; slot < end; slot++) {
+ for (ObjectSlot slot = start; slot < end; ++slot) {
DCHECK(!HasWeakHeapObjectTag(*slot));
}
#endif // DEBUG
- VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
+ VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
- for (MaybeObject** slot = start; slot < end; slot++) {
+ void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ for (MaybeObjectSlot slot = start; slot < end; ++slot) {
if (ShouldHaveBeenRecorded(host, *slot)) {
- CHECK_GT(untyped_->count(reinterpret_cast<Address>(slot)), 0);
+ CHECK_GT(untyped_->count(slot.address()), 0);
}
}
}
- void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
CHECK(
@@ -3581,7 +3595,7 @@ class SlotVerifyingVisitor : public ObjectVisitor {
}
}
- void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
Object* target = rinfo->target_object();
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
@@ -3604,7 +3618,7 @@ class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
std::set<std::pair<SlotType, Address>>* typed)
: SlotVerifyingVisitor(untyped, typed) {}
- bool ShouldHaveBeenRecorded(HeapObject* host, MaybeObject* target) override {
+ bool ShouldHaveBeenRecorded(HeapObject* host, MaybeObject target) override {
DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InNewSpace(target),
Heap::InToSpace(target));
return target->IsStrongOrWeak() && Heap::InNewSpace(target) &&
@@ -3616,14 +3630,15 @@ template <RememberedSetType direction>
void CollectSlots(MemoryChunk* chunk, Address start, Address end,
std::set<Address>* untyped,
std::set<std::pair<SlotType, Address> >* typed) {
- RememberedSet<direction>::Iterate(chunk,
- [start, end, untyped](Address slot) {
- if (start <= slot && slot < end) {
- untyped->insert(slot);
- }
- return KEEP_SLOT;
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<direction>::Iterate(
+ chunk,
+ [start, end, untyped](MaybeObjectSlot slot) {
+ if (start <= slot.address() && slot.address() < end) {
+ untyped->insert(slot.address());
+ }
+ return KEEP_SLOT;
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
RememberedSet<direction>::IterateTyped(
chunk, [start, end, typed](SlotType type, Address host, Address slot) {
if (start <= slot && slot < end) {
@@ -3683,30 +3698,29 @@ void Heap::ZapFromSpace() {
void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
#ifdef DEBUG
- for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
- reinterpret_cast<Object**>(start_address)[i] = Smi::FromInt(kCodeZapValue);
+ DCHECK(IsAligned(start_address, kIntSize));
+ for (int i = 0; i < size_in_bytes / kIntSize; i++) {
+ Memory<int>(start_address + i * kIntSize) = kCodeZapValue;
}
#endif
}
-Code* Heap::builtin(int index) {
+Code Heap::builtin(int index) {
DCHECK(Builtins::IsBuiltinId(index));
- // Code::cast cannot be used here since we access builtins
- // during the marking phase of mark sweep. See IC::Clear.
- return reinterpret_cast<Code*>(builtins_[index]);
+ return Code::cast(ObjectPtr(isolate()->builtins_table()[index]));
}
Address Heap::builtin_address(int index) {
DCHECK(Builtins::IsBuiltinId(index) || index == Builtins::builtin_count);
- return reinterpret_cast<Address>(&builtins_[index]);
+ return reinterpret_cast<Address>(&isolate()->builtins_table()[index]);
}
-void Heap::set_builtin(int index, HeapObject* builtin) {
+void Heap::set_builtin(int index, Code builtin) {
DCHECK(Builtins::IsBuiltinId(index));
- DCHECK(Internals::HasHeapObjectTag(builtin));
+ DCHECK(Internals::HasHeapObjectTag(builtin.ptr()));
// The given builtin may be completely uninitialized thus we cannot check its
// type here.
- builtins_[index] = builtin;
+ isolate()->builtins_table()[index] = builtin.ptr();
}
void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
@@ -3719,7 +3733,7 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
v->VisitRootPointer(Root::kStringTable, nullptr,
- &roots_[RootIndex::kStringTable]);
+ ObjectSlot(&roots_table()[RootIndex::kStringTable]));
v->Synchronize(VisitorSynchronization::kStringTable);
if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
mode != VISIT_FOR_SERIALIZATION) {
@@ -3734,8 +3748,9 @@ void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
void Heap::IterateSmiRoots(RootVisitor* v) {
// Acquire execution access since we are going to read stack limit values.
ExecutionAccess access(isolate());
- v->VisitRootPointers(Root::kSmiRootList, nullptr, roots_.smi_roots_begin(),
- roots_.smi_roots_end());
+ v->VisitRootPointers(Root::kSmiRootList, nullptr,
+ roots_table().smi_roots_begin(),
+ roots_table().smi_roots_end());
v->Synchronize(VisitorSynchronization::kSmiRootList);
}
@@ -3750,17 +3765,17 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
void VisitRootPointer(Root root, const char* description,
- Object** p) override {
+ ObjectSlot p) override {
FixHandle(p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++) FixHandle(p);
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot p = start; p < end; ++p) FixHandle(p);
}
private:
- inline void FixHandle(Object** p) {
+ inline void FixHandle(ObjectSlot p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* current = reinterpret_cast<HeapObject*>(*p);
const MapWord map_word = current->map_word();
@@ -3781,7 +3796,7 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
DCHECK(current->IsFixedArrayBase());
#endif // DEBUG
- *p = nullptr;
+ p.store(nullptr);
}
}
@@ -3792,13 +3807,9 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
mode == VISIT_ALL_IN_MINOR_MC_MARK ||
mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
- // Garbage collection can skip over the read-only roots.
- const bool isGC = mode != VISIT_ALL && mode != VISIT_FOR_SERIALIZATION &&
- mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION;
- Object** start =
- isGC ? roots_.read_only_roots_end() : roots_.strong_roots_begin();
- v->VisitRootPointers(Root::kStrongRootList, nullptr, start,
- roots_.strong_roots_end());
+ v->VisitRootPointers(Root::kStrongRootList, nullptr,
+ roots_table().strong_roots_begin(),
+ roots_table().strong_roots_end());
v->Synchronize(VisitorSynchronization::kStrongRootList);
isolate_->bootstrapper()->Iterate(v);
@@ -3826,8 +3837,17 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
if (!isMinorGC) {
IterateBuiltins(v);
v->Synchronize(VisitorSynchronization::kBuiltins);
- isolate_->interpreter()->IterateDispatchTable(v);
- v->Synchronize(VisitorSynchronization::kDispatchTable);
+
+ // The dispatch table is set up directly from the builtins using
+ // IntitializeDispatchTable so there is no need to iterate to create it.
+ if (mode != VISIT_FOR_SERIALIZATION) {
+ // Currently we iterate the dispatch table to update pointers to possibly
+ // moved Code objects for bytecode handlers.
+ // TODO(v8:6666): Remove iteration once builtins are embedded (and thus
+ // immovable) in every build configuration.
+ isolate_->interpreter()->IterateDispatchTable(v);
+ v->Synchronize(VisitorSynchronization::kDispatchTable);
+ }
}
// Iterate over global handles.
@@ -3837,7 +3857,6 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// global handles need to be added manually.
break;
case VISIT_ONLY_STRONG:
- case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
isolate_->global_handles()->IterateStrongRoots(v);
break;
case VISIT_ALL_IN_SCAVENGE:
@@ -3849,7 +3868,6 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
case VISIT_ALL_IN_MINOR_MC_UPDATE:
// Global handles are processed manually by the minor MC.
break;
- case VISIT_ALL_BUT_READ_ONLY:
case VISIT_ALL_IN_SWEEP_NEWSPACE:
case VISIT_ALL:
isolate_->global_handles()->IterateAllRoots(v);
@@ -3878,11 +3896,21 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
}
v->Synchronize(VisitorSynchronization::kStrongRoots);
- // Iterate over the partial snapshot cache unless serializing.
+ // Iterate over pending Microtasks stored in MicrotaskQueues.
+ MicrotaskQueue* default_microtask_queue = isolate_->default_microtask_queue();
+ if (default_microtask_queue) {
+ MicrotaskQueue* microtask_queue = default_microtask_queue;
+ do {
+ microtask_queue->IterateMicrotasks(v);
+ microtask_queue = microtask_queue->next();
+ } while (microtask_queue != default_microtask_queue);
+ }
+
+ // Iterate over the partial snapshot cache unless serializing or
+ // deserializing.
if (mode != VISIT_FOR_SERIALIZATION) {
SerializerDeserializer::Iterate(isolate_, v);
- // We don't do a v->Synchronize call here because the serializer and the
- // deserializer are deliberately out of sync here.
+ v->Synchronize(VisitorSynchronization::kPartialSnapshotCache);
}
}
@@ -3892,7 +3920,8 @@ void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
void Heap::IterateBuiltins(RootVisitor* v) {
for (int i = 0; i < Builtins::builtin_count; i++) {
- v->VisitRootPointer(Root::kBuiltins, Builtins::name(i), &builtins_[i]);
+ v->VisitRootPointer(Root::kBuiltins, Builtins::name(i),
+ ObjectSlot(builtin_address(i)));
}
}
@@ -4031,6 +4060,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->map_space_size = map_space_->SizeOfObjects();
*stats->map_space_capacity = map_space_->Capacity();
*stats->lo_space_size = lo_space_->Size();
+ *stats->code_lo_space_size = code_lo_space_->Size();
isolate_->global_handles()->RecordStats(stats);
*stats->memory_allocator_size = memory_allocator()->Size();
*stats->memory_allocator_capacity =
@@ -4072,9 +4102,14 @@ size_t Heap::OldGenerationSizeOfObjects() {
}
uint64_t Heap::PromotedExternalMemorySize() {
- if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
- return static_cast<uint64_t>(external_memory_ -
- external_memory_at_last_mark_compact_);
+ IsolateData* isolate_data = isolate()->isolate_data();
+ if (isolate_data->external_memory_ <=
+ isolate_data->external_memory_at_last_mark_compact_) {
+ return 0;
+ }
+ return static_cast<uint64_t>(
+ isolate_data->external_memory_ -
+ isolate_data->external_memory_at_last_mark_compact_);
}
bool Heap::ShouldOptimizeForLoadTime() {
@@ -4155,7 +4190,8 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (FLAG_stress_marking > 0) {
double gained_since_last_gc =
PromotedSinceLastGC() +
- (external_memory_ - external_memory_at_last_mark_compact_);
+ (isolate()->isolate_data()->external_memory_ -
+ isolate()->isolate_data()->external_memory_at_last_mark_compact_);
double size_before_gc =
OldGenerationObjectsAndPromotedExternalMemorySize() -
gained_since_last_gc;
@@ -4296,7 +4332,7 @@ HeapObject* Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
// TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
// parameter and just do what's necessary.
HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
- AllocationResult alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
+ AllocationResult alloc = code_lo_space()->AllocateRaw(size);
HeapObject* result;
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
@@ -4306,7 +4342,7 @@ HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
for (int i = 0; i < 2; i++) {
CollectGarbage(alloc.RetrySpace(),
GarbageCollectionReason::kAllocationFailure);
- alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
+ alloc = code_lo_space()->AllocateRaw(size);
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
@@ -4316,7 +4352,7 @@ HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
{
AlwaysAllocateScope scope(isolate());
- alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
+ alloc = code_lo_space()->AllocateRaw(size);
}
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
@@ -4360,7 +4396,7 @@ void Heap::SetUp() {
new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
mark_compact_collector_->weak_objects());
- if (FLAG_concurrent_marking) {
+ if (FLAG_concurrent_marking || FLAG_parallel_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
@@ -4385,6 +4421,7 @@ void Heap::SetUp() {
space_[MAP_SPACE] = map_space_ = new MapSpace(this);
space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this);
space_[NEW_LO_SPACE] = new_lo_space_ = new NewLargeObjectSpace(this);
+ space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
i++) {
@@ -4404,7 +4441,6 @@ void Heap::SetUp() {
live_object_stats_ = new ObjectStats(this);
dead_object_stats_ = new ObjectStats(this);
}
- scavenge_job_ = new ScavengeJob();
local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer(isolate());
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
@@ -4419,9 +4455,12 @@ void Heap::SetUp() {
}
#endif // ENABLE_MINOR_MC
- idle_scavenge_observer_ = new IdleScavengeObserver(
- *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
- new_space()->AddAllocationObserver(idle_scavenge_observer_);
+ if (FLAG_idle_time_scavenge) {
+ scavenge_job_ = new ScavengeJob();
+ idle_scavenge_observer_ = new IdleScavengeObserver(
+ *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
+ new_space()->AddAllocationObserver(idle_scavenge_observer_);
+ }
SetGetExternallyAllocatedMemoryInBytesCallback(
DefaultGetExternallyAllocatedMemoryInBytesCallback);
@@ -4438,11 +4477,10 @@ void Heap::SetUp() {
}
write_protect_code_memory_ = FLAG_write_protect_code_memory;
-
- external_reference_table_.Init(isolate_);
}
void Heap::InitializeHashSeed() {
+ DCHECK(!deserialization_complete_);
uint64_t new_hash_seed;
if (FLAG_hash_seed == 0) {
int64_t rnd = isolate()->random_number_generator()->NextInt64();
@@ -4450,7 +4488,8 @@ void Heap::InitializeHashSeed() {
} else {
new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
}
- hash_seed()->copy_in(0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
+ ReadOnlyRoots(this).hash_seed()->copy_in(
+ 0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
}
void Heap::SetStackLimits() {
@@ -4461,15 +4500,15 @@ void Heap::SetStackLimits() {
// Set up the special root array entries containing the stack limits.
// These are actually addresses, but the tag makes the GC ignore it.
- roots_[RootIndex::kStackLimit] = reinterpret_cast<Object*>(
+ roots_table()[RootIndex::kStackLimit] = reinterpret_cast<Object*>(
(isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
- roots_[RootIndex::kRealStackLimit] = reinterpret_cast<Object*>(
+ roots_table()[RootIndex::kRealStackLimit] = reinterpret_cast<Object*>(
(isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
}
void Heap::ClearStackLimits() {
- roots_[RootIndex::kStackLimit] = Smi::kZero;
- roots_[RootIndex::kRealStackLimit] = Smi::kZero;
+ roots_table()[RootIndex::kStackLimit] = Smi::kZero;
+ roots_table()[RootIndex::kRealStackLimit] = Smi::kZero;
}
int Heap::NextAllocationTimeout(int current_timeout) {
@@ -4529,25 +4568,12 @@ EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
return local_embedder_heap_tracer()->remote_tracer();
}
-void Heap::TracePossibleWrapper(JSObject* js_object) {
- DCHECK(js_object->IsApiWrapper());
- if (js_object->GetEmbedderFieldCount() >= 2 &&
- js_object->GetEmbedderField(0) &&
- js_object->GetEmbedderField(0) != ReadOnlyRoots(this).undefined_value() &&
- js_object->GetEmbedderField(1) != ReadOnlyRoots(this).undefined_value()) {
- DCHECK_EQ(0,
- reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2);
- local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
- reinterpret_cast<void*>(js_object->GetEmbedderField(0)),
- reinterpret_cast<void*>(js_object->GetEmbedderField(1))));
- }
-}
-
-void Heap::RegisterExternallyReferencedObject(Object** object) {
+void Heap::RegisterExternallyReferencedObject(Address* location) {
// The embedder is not aware of whether numbers are materialized as heap
// objects are just passed around as Smis.
- if (!(*object)->IsHeapObject()) return;
- HeapObject* heap_object = HeapObject::cast(*object);
+ ObjectPtr object(*location);
+ if (!object->IsHeapObject()) return;
+ HeapObject* heap_object = HeapObject::cast(object);
DCHECK(Contains(heap_object));
if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
incremental_marking()->WhiteToGreyAndPush(heap_object);
@@ -4582,9 +4608,13 @@ void Heap::TearDown() {
}
}
- new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
- delete idle_scavenge_observer_;
- idle_scavenge_observer_ = nullptr;
+ if (FLAG_idle_time_scavenge) {
+ new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
+ delete idle_scavenge_observer_;
+ idle_scavenge_observer_ = nullptr;
+ delete scavenge_job_;
+ scavenge_job_ = nullptr;
+ }
if (FLAG_stress_marking > 0) {
RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
@@ -4655,9 +4685,6 @@ void Heap::TearDown() {
delete local_embedder_heap_tracer_;
local_embedder_heap_tracer_ = nullptr;
- delete scavenge_job_;
- scavenge_job_ = nullptr;
-
isolate_->global_handles()->TearDown();
external_string_table_.TearDown();
@@ -4760,7 +4787,7 @@ Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
// fill in the new array.
int copy_to = 0;
for (int i = 0; i < array->length(); i++) {
- MaybeObject* element = array->Get(i);
+ MaybeObject element = array->Get(i);
if (element->IsCleared()) continue;
new_array->Set(copy_to++, element);
}
@@ -4834,14 +4861,14 @@ void Heap::CompactRetainedMaps(WeakArrayList* retained_maps) {
int new_number_of_disposed_maps = 0;
// This loop compacts the array by removing cleared weak cells.
for (int i = 0; i < length; i += 2) {
- MaybeObject* maybe_object = retained_maps->Get(i);
+ MaybeObject maybe_object = retained_maps->Get(i);
if (maybe_object->IsCleared()) {
continue;
}
DCHECK(maybe_object->IsWeak());
- MaybeObject* age = retained_maps->Get(i + 1);
+ MaybeObject age = retained_maps->Get(i + 1);
DCHECK(age->IsSmi());
if (i != new_length) {
retained_maps->Set(new_length, maybe_object);
@@ -4868,11 +4895,10 @@ void Heap::FatalProcessOutOfMemory(const char* location) {
class PrintHandleVisitor : public RootVisitor {
public:
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++)
- PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
- reinterpret_cast<void*>(*p));
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot p = start; p < end; ++p)
+ PrintF(" handle %p to %p\n", p.ToVoidPtr(), reinterpret_cast<void*>(*p));
}
};
@@ -4891,8 +4917,8 @@ class CheckHandleCountVisitor : public RootVisitor {
~CheckHandleCountVisitor() override {
CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override {
handle_count_ += end - start;
}
@@ -4920,25 +4946,23 @@ Address Heap::store_buffer_overflow_function_address() {
return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
}
-void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
- Address slot_addr = reinterpret_cast<Address>(slot);
- Page* page = Page::FromAddress(slot_addr);
+void Heap::ClearRecordedSlot(HeapObject* object, ObjectSlot slot) {
+ Page* page = Page::FromAddress(slot.address());
if (!page->InNewSpace()) {
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
- store_buffer()->DeleteEntry(slot_addr);
+ store_buffer()->DeleteEntry(slot.address());
}
}
#ifdef DEBUG
-void Heap::VerifyClearedSlot(HeapObject* object, Object** slot) {
+void Heap::VerifyClearedSlot(HeapObject* object, ObjectSlot slot) {
if (InNewSpace(object)) return;
- Address slot_addr = reinterpret_cast<Address>(slot);
- Page* page = Page::FromAddress(slot_addr);
+ Page* page = Page::FromAddress(slot.address());
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
- CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot_addr));
+ CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
// Old to old slots are filtered with invalidated slots.
- CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot_addr),
+ CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
page->RegisteredObjectWithInvalidatedSlots(object));
}
#endif
@@ -5027,21 +5051,19 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
explicit MarkingVisitor(UnreachableObjectsFilter* filter)
: filter_(filter) {}
- void VisitPointers(HeapObject* host, Object** start,
- Object** end) override {
- MarkPointers(reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
+ void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override {
+ MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
+ void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
MarkPointers(start, end);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- MarkPointers(reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override {
+ MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
}
void TransitiveClosure() {
@@ -5053,9 +5075,9 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
private:
- void MarkPointers(MaybeObject** start, MaybeObject** end) {
+ void MarkPointers(MaybeObjectSlot start, MaybeObjectSlot end) {
// Treat weak references as strong.
- for (MaybeObject** p = start; p < end; p++) {
+ for (MaybeObjectSlot p = start; p < end; ++p) {
HeapObject* heap_object;
if ((*p)->GetHeapObject(&heap_object)) {
if (filter_->MarkAsReachable(heap_object)) {
@@ -5072,7 +5094,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
void MarkReachableObjects() {
MarkingVisitor visitor(this);
- heap_->IterateRoots(&visitor, VISIT_ALL_BUT_READ_ONLY);
+ heap_->IterateRoots(&visitor, VISIT_ALL);
visitor.TransitiveClosure();
}
@@ -5083,8 +5105,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
HeapIterator::HeapIterator(Heap* heap,
HeapIterator::HeapObjectsFiltering filtering)
- : no_heap_allocation_(),
- heap_(heap),
+ : heap_(heap),
filtering_(filtering),
filter_(nullptr),
space_iterator_(nullptr),
@@ -5230,7 +5251,7 @@ void Heap::RememberUnmappedPage(Address page, bool compacted) {
remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
}
-void Heap::RegisterStrongRoots(Object** start, Object** end) {
+void Heap::RegisterStrongRoots(ObjectSlot start, ObjectSlot end) {
StrongRootsList* list = new StrongRootsList();
list->next = strong_roots_list_;
list->start = start;
@@ -5238,8 +5259,7 @@ void Heap::RegisterStrongRoots(Object** start, Object** end) {
strong_roots_list_ = list;
}
-
-void Heap::UnregisterStrongRoots(Object** start) {
+void Heap::UnregisterStrongRoots(ObjectSlot start) {
StrongRootsList* prev = nullptr;
StrongRootsList* list = strong_roots_list_;
while (list != nullptr) {
@@ -5258,10 +5278,53 @@ void Heap::UnregisterStrongRoots(Object** start) {
}
}
-void Heap::SetBuiltinsConstantsTable(FixedArray* cache) {
+void Heap::SetBuiltinsConstantsTable(FixedArray cache) {
set_builtins_constants_table(cache);
}
+void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
+ DCHECK_EQ(Builtins::kInterpreterEntryTrampoline, code->builtin_index());
+ set_interpreter_entry_trampoline_for_profiling(code);
+}
+
+void Heap::AddDirtyJSWeakFactory(
+ JSWeakFactory* weak_factory,
+ std::function<void(HeapObject* object, ObjectSlot slot, Object* target)>
+ gc_notify_updated_slot) {
+ DCHECK(dirty_js_weak_factories()->IsUndefined(isolate()) ||
+ dirty_js_weak_factories()->IsJSWeakFactory());
+ DCHECK(weak_factory->next()->IsUndefined(isolate()));
+ DCHECK(!weak_factory->scheduled_for_cleanup());
+ weak_factory->set_scheduled_for_cleanup(true);
+ weak_factory->set_next(dirty_js_weak_factories());
+ gc_notify_updated_slot(
+ weak_factory,
+ HeapObject::RawField(weak_factory, JSWeakFactory::kNextOffset),
+ dirty_js_weak_factories());
+ set_dirty_js_weak_factories(weak_factory);
+ // Roots are rescanned after objects are moved, so no need to record a slot
+ // for the root pointing to the first JSWeakFactory.
+}
+
+void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
+ DCHECK(FLAG_harmony_weak_refs);
+ DCHECK(weak_refs_keep_during_job()->IsUndefined() ||
+ weak_refs_keep_during_job()->IsOrderedHashSet());
+ Handle<OrderedHashSet> table;
+ if (weak_refs_keep_during_job()->IsUndefined(isolate())) {
+ table = isolate()->factory()->NewOrderedHashSet();
+ } else {
+ table =
+ handle(OrderedHashSet::cast(weak_refs_keep_during_job()), isolate());
+ }
+ table = OrderedHashSet::Add(isolate(), table, target);
+ set_weak_refs_keep_during_job(*table);
+}
+
+void Heap::ClearKeepDuringJobSet() {
+ set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
+}
+
size_t Heap::NumberOfTrackedHeapObjectTypes() {
return ObjectStats::OBJECT_STATS_COUNT;
}
@@ -5310,7 +5373,7 @@ size_t Heap::NumberOfNativeContexts() {
Object* context = native_contexts_list();
while (!context->IsUndefined(isolate())) {
++result;
- Context* native_context = Context::cast(context);
+ Context native_context = Context::cast(context);
context = native_context->next_context_link();
}
return result;
@@ -5343,28 +5406,28 @@ const char* AllocationSpaceName(AllocationSpace space) {
return nullptr;
}
-void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
- Object** end) {
- VerifyPointers(host, reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
+void VerifyPointersVisitor::VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) {
+ VerifyPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
-void VerifyPointersVisitor::VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) {
+void VerifyPointersVisitor::VisitPointers(HeapObject* host,
+ MaybeObjectSlot start,
+ MaybeObjectSlot end) {
VerifyPointers(host, start, end);
}
void VerifyPointersVisitor::VisitRootPointers(Root root,
const char* description,
- Object** start, Object** end) {
- VerifyPointers(nullptr, reinterpret_cast<MaybeObject**>(start),
- reinterpret_cast<MaybeObject**>(end));
+ ObjectSlot start,
+ ObjectSlot end) {
+ VerifyPointers(nullptr, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
void VerifyPointersVisitor::VerifyPointers(HeapObject* host,
- MaybeObject** start,
- MaybeObject** end) {
- for (MaybeObject** current = start; current < end; current++) {
+ MaybeObjectSlot start,
+ MaybeObjectSlot end) {
+ for (MaybeObjectSlot current = start; current < end; ++current) {
HeapObject* object;
if ((*current)->GetHeapObject(&object)) {
CHECK(heap_->Contains(object));
@@ -5376,8 +5439,8 @@ void VerifyPointersVisitor::VerifyPointers(HeapObject* host,
}
void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
+ ObjectSlot start, ObjectSlot end) {
+ for (ObjectSlot current = start; current < end; ++current) {
CHECK((*current)->IsSmi());
}
}
@@ -5408,6 +5471,7 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
return dst == CODE_SPACE && type == CODE_TYPE;
case MAP_SPACE:
case LO_SPACE:
+ case CODE_LO_SPACE:
case NEW_LO_SPACE:
case RO_SPACE:
return false;
@@ -5439,7 +5503,7 @@ void AllocationObserver::AllocationStep(int bytes_allocated,
namespace {
-Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
+Map GcSafeMapOfCodeSpaceObject(HeapObject* object) {
MapWord map_word = object->map_word();
return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress()->map()
: map_word.ToMap();
@@ -5449,17 +5513,17 @@ int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
}
-Code* GcSafeCastToCode(Heap* heap, HeapObject* object, Address inner_pointer) {
- Code* code = reinterpret_cast<Code*>(object);
- DCHECK_NOT_NULL(code);
+Code GcSafeCastToCode(Heap* heap, HeapObject* object, Address inner_pointer) {
+ Code code = Code::unchecked_cast(object);
+ DCHECK(!code.is_null());
DCHECK(heap->GcSafeCodeContains(code, inner_pointer));
return code;
}
} // namespace
-bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
- Map* map = GcSafeMapOfCodeSpaceObject(code);
+bool Heap::GcSafeCodeContains(Code code, Address addr) {
+ Map map = GcSafeMapOfCodeSpaceObject(code);
DCHECK(map == ReadOnlyRoots(this).code_map());
if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
Address start = code->address();
@@ -5467,12 +5531,12 @@ bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
return start <= addr && addr < end;
}
-Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
- Code* code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
- if (code != nullptr) return code;
+Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
+ Code code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
+ if (!code.is_null()) return code;
// Check if the inner pointer points into a large object chunk.
- LargePage* large_page = lo_space()->FindPage(inner_pointer);
+ LargePage* large_page = code_lo_space()->FindPage(inner_pointer);
if (large_page != nullptr) {
return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
}
@@ -5498,13 +5562,14 @@ Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
HeapObject* obj = HeapObject::FromAddress(addr);
int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
Address next_addr = addr + obj_size;
- if (next_addr > inner_pointer)
+ if (next_addr > inner_pointer) {
return GcSafeCastToCode(this, obj, inner_pointer);
+ }
addr = next_addr;
}
}
-void Heap::WriteBarrierForCodeSlow(Code* code) {
+void Heap::WriteBarrierForCodeSlow(Code code) {
for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
!it.done(); it.next()) {
GenerationalBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
@@ -5518,19 +5583,19 @@ void Heap::GenerationalBarrierSlow(HeapObject* object, Address slot,
heap->store_buffer()->InsertEntry(slot);
}
-void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray* array,
+void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
int offset, int length) {
for (int i = 0; i < length; i++) {
if (!InNewSpace(array->get(offset + i))) continue;
heap->store_buffer()->InsertEntry(
- reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
+ array->RawFieldOfElementAt(offset + i).address());
}
}
-void Heap::GenerationalBarrierForCodeSlow(Code* host, RelocInfo* rinfo,
+void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
HeapObject* object) {
DCHECK(InNewSpace(object));
- Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
+ Page* source_page = Page::FromAddress(host.ptr());
RelocInfo::Mode rmode = rinfo->rmode();
Address addr = rinfo->pc();
SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
@@ -5543,15 +5608,15 @@ void Heap::GenerationalBarrierForCodeSlow(Code* host, RelocInfo* rinfo,
slot_type = OBJECT_SLOT;
}
}
- RememberedSet<OLD_TO_NEW>::InsertTyped(
- source_page, reinterpret_cast<Address>(host), slot_type, addr);
+ RememberedSet<OLD_TO_NEW>::InsertTyped(source_page, host.ptr(), slot_type,
+ addr);
}
void Heap::MarkingBarrierSlow(HeapObject* object, Address slot,
HeapObject* value) {
Heap* heap = Heap::FromWritableHeapObject(object);
- heap->incremental_marking()->RecordWriteSlow(
- object, reinterpret_cast<HeapObjectReference**>(slot), value);
+ heap->incremental_marking()->RecordWriteSlow(object, HeapObjectSlot(slot),
+ value);
}
void Heap::MarkingBarrierForElementsSlow(Heap* heap, HeapObject* object) {
@@ -5561,7 +5626,7 @@ void Heap::MarkingBarrierForElementsSlow(Heap* heap, HeapObject* object) {
}
}
-void Heap::MarkingBarrierForCodeSlow(Code* host, RelocInfo* rinfo,
+void Heap::MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
HeapObject* object) {
Heap* heap = Heap::FromWritableHeapObject(host);
DCHECK(heap->incremental_marking()->IsMarking());
@@ -5604,5 +5669,12 @@ void Heap::SetEmbedderStackStateForNextFinalizaton(
stack_state);
}
+#ifdef DEBUG
+void Heap::IncrementObjectCounters() {
+ isolate_->counters()->objs_since_last_full()->Increment();
+ isolate_->counters()->objs_since_last_young()->Increment();
+}
+#endif // DEBUG
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/heap.h b/chromium/v8/src/heap/heap.h
index c99f0d424e6..0cc919f6785 100644
--- a/chromium/v8/src/heap/heap.h
+++ b/chromium/v8/src/heap/heap.h
@@ -19,13 +19,13 @@
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/base/atomic-utils.h"
-#include "src/external-reference-table.h"
#include "src/globals.h"
#include "src/heap-symbols.h"
#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/smi.h"
#include "src/objects/string-table.h"
-#include "src/roots.h"
#include "src/visitors.h"
namespace v8 {
@@ -52,130 +52,10 @@ class JSArrayBuffer;
class ExternalString;
using v8::MemoryPressureLevel;
-// Adapts PRIVATE_SYMBOL_LIST_GERNATOR entry to IMMORTAL_IMMOVABLE_ROOT_LIST
-// entry
-#define PRIVATE_SYMBOL_LIST_TO_IMMORTAL_IMMOVABLE_LIST_ADAPTER(V, name) V(name)
-
-// Heap roots that are known to be immortal immovable, for which we can safely
-// skip write barriers. This list is not complete and has omissions.
-#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
- V(ArgumentsMarker) \
- V(ArgumentsMarkerMap) \
- V(ArrayBufferNeuteringProtector) \
- V(ArrayIteratorProtector) \
- V(AwaitContextMap) \
- V(BigIntMap) \
- V(BlockContextMap) \
- V(ObjectBoilerplateDescriptionMap) \
- V(BooleanMap) \
- V(ByteArrayMap) \
- V(BytecodeArrayMap) \
- V(CatchContextMap) \
- V(CellMap) \
- V(CodeMap) \
- V(DebugEvaluateContextMap) \
- V(DescriptorArrayMap) \
- V(EphemeronHashTableMap) \
- V(EmptyByteArray) \
- V(EmptyDescriptorArray) \
- V(EmptyFixedArray) \
- V(EmptyFixedFloat32Array) \
- V(EmptyFixedFloat64Array) \
- V(EmptyFixedInt16Array) \
- V(EmptyFixedInt32Array) \
- V(EmptyFixedInt8Array) \
- V(EmptyFixedUint16Array) \
- V(EmptyFixedUint32Array) \
- V(EmptyFixedUint8Array) \
- V(EmptyFixedUint8ClampedArray) \
- V(EmptyOrderedHashMap) \
- V(EmptyOrderedHashSet) \
- V(EmptyPropertyCell) \
- V(EmptyScopeInfo) \
- V(EmptyScript) \
- V(EmptySloppyArgumentsElements) \
- V(EmptySlowElementDictionary) \
- V(EvalContextMap) \
- V(Exception) \
- V(FalseValue) \
- V(FixedArrayMap) \
- V(FixedCOWArrayMap) \
- V(FixedDoubleArrayMap) \
- V(ForeignMap) \
- V(FreeSpaceMap) \
- V(FunctionContextMap) \
- V(GlobalDictionaryMap) \
- V(GlobalPropertyCellMap) \
- V(HashTableMap) \
- V(HeapNumberMap) \
- V(HoleNanValue) \
- V(InfinityValue) \
- V(IsConcatSpreadableProtector) \
- V(JSMessageObjectMap) \
- V(JsConstructEntryCode) \
- V(JsEntryCode) \
- V(ManyClosuresCell) \
- V(ManyClosuresCellMap) \
- V(MetaMap) \
- V(MinusInfinityValue) \
- V(MinusZeroValue) \
- V(ModuleContextMap) \
- V(ModuleInfoMap) \
- V(MutableHeapNumberMap) \
- V(NameDictionaryMap) \
- V(NanValue) \
- V(NativeContextMap) \
- V(NoClosuresCellMap) \
- V(NoElementsProtector) \
- V(NullMap) \
- V(NullValue) \
- V(NumberDictionaryMap) \
- V(OneClosureCellMap) \
- V(OnePointerFillerMap) \
- V(OptimizedOut) \
- V(OrderedHashMapMap) \
- V(OrderedHashSetMap) \
- V(PreParsedScopeDataMap) \
- V(PropertyArrayMap) \
- V(ScopeInfoMap) \
- V(ScriptContextMap) \
- V(ScriptContextTableMap) \
- V(SelfReferenceMarker) \
- V(SharedFunctionInfoMap) \
- V(SimpleNumberDictionaryMap) \
- V(SloppyArgumentsElementsMap) \
- V(SmallOrderedHashMapMap) \
- V(SmallOrderedHashSetMap) \
- V(ArraySpeciesProtector) \
- V(TypedArraySpeciesProtector) \
- V(PromiseSpeciesProtector) \
- V(StaleRegister) \
- V(StringIteratorProtector) \
- V(StringLengthProtector) \
- V(StringTableMap) \
- V(SymbolMap) \
- V(TerminationException) \
- V(TheHoleMap) \
- V(TheHoleValue) \
- V(TransitionArrayMap) \
- V(TrueValue) \
- V(TwoPointerFillerMap) \
- V(UndefinedMap) \
- V(UndefinedValue) \
- V(UninitializedMap) \
- V(UninitializedValue) \
- V(UncompiledDataWithoutPreParsedScopeMap) \
- V(UncompiledDataWithPreParsedScopeMap) \
- V(WeakFixedArrayMap) \
- V(WeakArrayListMap) \
- V(WithContextMap) \
- V(empty_string) \
- PRIVATE_SYMBOL_LIST_GENERATOR( \
- PRIVATE_SYMBOL_LIST_TO_IMMORTAL_IMMOVABLE_LIST_ADAPTER, V)
-
class AllocationObserver;
class ArrayBufferCollector;
class ArrayBufferTracker;
+class CodeLargeObjectSpace;
class ConcurrentMarking;
class GCIdleTimeAction;
class GCIdleTimeHandler;
@@ -183,10 +63,12 @@ class GCIdleTimeHeapState;
class GCTracer;
class HeapController;
class HeapObjectAllocationTracker;
+class HeapObjectPtr;
class HeapObjectsFilter;
class HeapStats;
class HistogramTimer;
class Isolate;
+class JSWeakFactory;
class LocalEmbedderHeapTracer;
class MemoryAllocator;
class MemoryReducer;
@@ -202,11 +84,10 @@ class ScavengerCollector;
class Space;
class StoreBuffer;
class StressScavengeObserver;
+class TimedHistogram;
class TracePossibleWrapperReporter;
class WeakObjectRetainer;
-typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-
enum ArrayStorageAllocationMode {
DONT_INITIALIZE_ARRAY_ELEMENTS,
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
@@ -271,7 +152,16 @@ class AllocationResult {
}
// Implicit constructor from Object*.
+ // TODO(3770): This constructor should go away eventually, replaced by
+ // the ObjectPtr alternative below.
AllocationResult(Object* object) // NOLINT
+ : object_(ObjectPtr(object->ptr())) {
+ // AllocationResults can't return Smis, which are used to represent
+ // failure and the space to retry in.
+ CHECK(!object->IsSmi());
+ }
+
+ AllocationResult(ObjectPtr object) // NOLINT
: object_(object) {
// AllocationResults can't return Smis, which are used to represent
// failure and the space to retry in.
@@ -284,18 +174,27 @@ class AllocationResult {
inline HeapObject* ToObjectChecked();
inline AllocationSpace RetrySpace();
- template <typename T>
+ template <typename T, typename = typename std::enable_if<
+ std::is_base_of<Object, T>::value>::type>
bool To(T** obj) {
if (IsRetry()) return false;
*obj = T::cast(object_);
return true;
}
+ template <typename T, typename = typename std::enable_if<
+ std::is_base_of<ObjectPtr, T>::value>::type>
+ bool To(T* obj) {
+ if (IsRetry()) return false;
+ *obj = T::cast(object_);
+ return true;
+ }
+
private:
explicit AllocationResult(AllocationSpace space)
: object_(Smi::FromInt(static_cast<int>(space))) {}
- Object* object_;
+ ObjectPtr object_;
};
STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
@@ -364,11 +263,8 @@ class Heap {
static const int kNoGCFlags = 0;
static const int kReduceMemoryFootprintMask = 1;
- // The roots that have an index less than this are always in old space.
- static const int kOldSpaceRoots = 0x20;
-
// The minimum size of a HeapObject on the heap.
- static const int kMinObjectSizeInWords = 2;
+ static const int kMinObjectSizeInTaggedWords = 2;
static const int kMinPromotedPercentForFastPromotionMode = 90;
@@ -394,15 +290,9 @@ class Heap {
void FatalProcessOutOfMemory(const char* location);
- V8_EXPORT_PRIVATE static bool RootIsImmortalImmovable(RootIndex root_index);
-
// Checks whether the space is valid.
static bool IsValidAllocationSpace(AllocationSpace space);
- // Generated code can embed direct references to non-writable roots if
- // they are in new space.
- static bool RootCanBeWrittenAfterInitialization(RootIndex root_index);
-
// Zapping is needed for verify heap, and always done in debug builds.
static inline bool ShouldZapGarbage() {
#ifdef DEBUG
@@ -448,20 +338,20 @@ class Heap {
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
- V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code* host);
+ V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code host);
V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject* object,
Address slot,
HeapObject* value);
V8_EXPORT_PRIVATE static void GenerationalBarrierForElementsSlow(
- Heap* heap, FixedArray* array, int offset, int length);
+ Heap* heap, FixedArray array, int offset, int length);
V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
- Code* host, RelocInfo* rinfo, HeapObject* value);
+ Code host, RelocInfo* rinfo, HeapObject* value);
V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject* object,
Address slot,
HeapObject* value);
V8_EXPORT_PRIVATE static void MarkingBarrierForElementsSlow(
Heap* heap, HeapObject* object);
- V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code* host,
+ V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code host,
RelocInfo* rinfo,
HeapObject* value);
V8_EXPORT_PRIVATE static bool PageFlagsAreConsistent(HeapObject* object);
@@ -475,12 +365,9 @@ class Heap {
inline Address* OldSpaceAllocationTopAddress();
inline Address* OldSpaceAllocationLimitAddress();
- // FreeSpace objects have a null map after deserialization. Update the map.
- void RepairFreeListsAfterDeserialization();
-
// Move len elements within a given array from src_index index to dst_index
// index.
- void MoveElements(FixedArray* array, int dst_index, int src_index, int len,
+ void MoveElements(FixedArray array, int dst_index, int src_index, int len,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Initialize a filler object to keep the ability to iterate over the heap
@@ -495,18 +382,23 @@ class Heap {
ClearFreedMemoryMode::kDontClearFreedMemory);
template <typename T>
- void CreateFillerForArray(T* object, int elements_to_trim, int bytes_to_trim);
+ void CreateFillerForArray(T object, int elements_to_trim, int bytes_to_trim);
bool CanMoveObjectStart(HeapObject* object);
- static bool IsImmovable(HeapObject* object);
+ bool IsImmovable(HeapObject* object);
+
+ bool IsLargeObject(HeapObject* object);
+ inline bool IsWithinLargeObject(Address address);
+
+ bool IsInYoungGeneration(HeapObject* object);
// Trim the given array from the left. Note that this relocates the object
// start and hence is only valid if there is only a single reference to it.
- FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
+ FixedArrayBase LeftTrimFixedArray(FixedArrayBase obj, int elements_to_trim);
// Trim the given array from the right.
- void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
+ void RightTrimFixedArray(FixedArrayBase obj, int elements_to_trim);
void RightTrimWeakFixedArray(WeakFixedArray* obj, int elements_to_trim);
// Converts the given boolean condition to JavaScript boolean value.
@@ -526,7 +418,9 @@ class Heap {
Object* allocation_sites_list() { return allocation_sites_list_; }
// Used in CreateAllocationSiteStub and the (de)serializer.
- Object** allocation_sites_list_address() { return &allocation_sites_list_; }
+ Address allocation_sites_list_address() {
+ return reinterpret_cast<Address>(&allocation_sites_list_);
+ }
// Traverse all the allocaions_sites [nested_site and weak_next] in the list
// and foreach call the visitor
@@ -588,7 +482,7 @@ class Heap {
// If an object has an AllocationMemento trailing it, return it, otherwise
// return nullptr;
template <FindMementoMode mode>
- inline AllocationMemento* FindAllocationMemento(Map* map, HeapObject* object);
+ inline AllocationMemento* FindAllocationMemento(Map map, HeapObject* object);
// Returns false if not able to reserve.
bool ReserveSpace(Reservation* reservations, std::vector<Address>* maps);
@@ -632,25 +526,18 @@ class Heap {
inline int NextDebuggingId();
inline int GetNextTemplateSerialNumber();
- void SetSerializedObjects(FixedArray* objects);
- void SetSerializedGlobalProxySizes(FixedArray* sizes);
+ void SetSerializedObjects(FixedArray objects);
+ void SetSerializedGlobalProxySizes(FixedArray sizes);
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
- int64_t external_memory() { return external_memory_; }
- void update_external_memory(int64_t delta) { external_memory_ += delta; }
-
- void update_external_memory_concurrently_freed(intptr_t freed) {
- external_memory_concurrently_freed_ += freed;
- }
-
- void account_external_memory_concurrently_freed() {
- external_memory_ -= external_memory_concurrently_freed_;
- external_memory_concurrently_freed_ = 0;
- }
+ V8_INLINE int64_t external_memory();
+ V8_INLINE void update_external_memory(int64_t delta);
+ V8_INLINE void update_external_memory_concurrently_freed(intptr_t freed);
+ V8_INLINE void account_external_memory_concurrently_freed();
size_t backing_store_bytes() const { return backing_store_bytes_; }
@@ -737,6 +624,7 @@ class Heap {
CodeSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
LargeObjectSpace* lo_space() { return lo_space_; }
+ CodeLargeObjectSpace* code_lo_space() { return code_lo_space_; }
NewLargeObjectSpace* new_lo_space() { return new_lo_space_; }
ReadOnlySpace* read_only_space() { return read_only_space_; }
@@ -771,79 +659,24 @@ class Heap {
// ===========================================================================
// Root set access. ==========================================================
// ===========================================================================
- friend class ReadOnlyRoots;
- public:
- RootsTable& roots_table() { return roots_; }
+ // Shortcut to the roots table stored in the Isolate.
+ V8_INLINE RootsTable& roots_table();
// Heap root getters.
-#define ROOT_ACCESSOR(type, name, CamelName) inline type* name();
+#define ROOT_ACCESSOR(type, name, CamelName) inline type name();
MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
- Object* root(RootIndex index) { return roots_[index]; }
- Handle<Object> root_handle(RootIndex index) {
- return Handle<Object>(&roots_[index]);
- }
-
- bool IsRootHandleLocation(Object** handle_location, RootIndex* index) const {
- return roots_.IsRootHandleLocation(handle_location, index);
- }
-
- template <typename T>
- bool IsRootHandle(Handle<T> handle, RootIndex* index) const {
- return roots_.IsRootHandle(handle, index);
- }
-
- // Generated code can embed this address to get access to the roots.
- Object** roots_array_start() { return roots_.roots_; }
-
- ExternalReferenceTable* external_reference_table() {
- DCHECK(external_reference_table_.is_initialized());
- return &external_reference_table_;
- }
-
- static constexpr int roots_to_external_reference_table_offset() {
- return kRootsExternalReferenceTableOffset;
- }
-
- static constexpr int roots_to_builtins_offset() {
- return kRootsBuiltinsOffset;
- }
-
- static constexpr int root_register_addressable_end_offset() {
- return kRootRegisterAddressableEndOffset;
- }
-
- Address root_register_addressable_end() {
- return reinterpret_cast<Address>(roots_array_start()) +
- kRootRegisterAddressableEndOffset;
- }
-
// Sets the stub_cache_ (only used when expanding the dictionary).
- void SetRootCodeStubs(SimpleNumberDictionary* value);
-
- void SetRootMaterializedObjects(FixedArray* objects) {
- roots_[RootIndex::kMaterializedObjects] = objects;
- }
-
- void SetRootScriptList(Object* value) {
- roots_[RootIndex::kScriptList] = value;
- }
-
- void SetRootStringTable(StringTable* value) {
- roots_[RootIndex::kStringTable] = value;
- }
-
- void SetRootNoScriptSharedFunctionInfos(Object* value) {
- roots_[RootIndex::kNoScriptSharedFunctionInfos] = value;
- }
-
- void SetMessageListeners(TemplateList* value) {
- roots_[RootIndex::kMessageListeners] = value;
- }
-
- // Set the stack limit in the roots_ array. Some architectures generate
+ V8_INLINE void SetRootCodeStubs(SimpleNumberDictionary value);
+ V8_INLINE void SetRootMaterializedObjects(FixedArray objects);
+ V8_INLINE void SetRootScriptList(Object* value);
+ V8_INLINE void SetRootStringTable(StringTable value);
+ V8_INLINE void SetRootNoScriptSharedFunctionInfos(Object* value);
+ V8_INLINE void SetMessageListeners(TemplateList value);
+
+ // Set the stack limit in the roots table. Some architectures generate
// code that looks here, because it is faster than loading from the static
// jslimit_/real_jslimit_ variable in the StackGuard.
void SetStackLimits();
@@ -852,13 +685,27 @@ class Heap {
// snapshot blob, we need to reset it before serializing.
void ClearStackLimits();
- // Generated code can treat direct references to this root as constant.
- bool RootCanBeTreatedAsConstant(RootIndex root_index);
+ void RegisterStrongRoots(ObjectSlot start, ObjectSlot end);
+ void UnregisterStrongRoots(ObjectSlot start);
+
+ void SetBuiltinsConstantsTable(FixedArray cache);
- void RegisterStrongRoots(Object** start, Object** end);
- void UnregisterStrongRoots(Object** start);
+ // A full copy of the interpreter entry trampoline, used as a template to
+ // create copies of the builtin at runtime. The copies are used to create
+ // better profiling information for ticks in bytecode execution. Note that
+ // this is always a copy of the full builtin, i.e. not the off-heap
+ // trampoline.
+ // See also: FLAG_interpreted_frames_native_stack.
+ void SetInterpreterEntryTrampolineForProfiling(Code code);
- void SetBuiltinsConstantsTable(FixedArray* cache);
+ // Add weak_factory into the dirty_js_weak_factories list.
+ void AddDirtyJSWeakFactory(
+ JSWeakFactory* weak_factory,
+ std::function<void(HeapObject* object, ObjectSlot slot, Object* target)>
+ gc_notify_updated_slot);
+
+ void AddKeepDuringJobTarget(Handle<JSReceiver> target);
+ void ClearKeepDuringJobSet();
// ===========================================================================
// Inline allocation. ========================================================
@@ -916,15 +763,22 @@ class Heap {
// Builtins. =================================================================
// ===========================================================================
- Code* builtin(int index);
+ Code builtin(int index);
Address builtin_address(int index);
- void set_builtin(int index, HeapObject* builtin);
+ void set_builtin(int index, Code builtin);
// ===========================================================================
// Iterators. ================================================================
// ===========================================================================
+ // None of these methods iterate over the read-only roots. To do this use
+ // ReadOnlyRoots::Iterate. Read-only root iteration is not necessary for
+ // garbage collection and is usually only performed as part of
+ // (de)serialization or heap verification.
+
+ // Iterates over the strong roots and the weak roots.
void IterateRoots(RootVisitor* v, VisitMode mode);
+ // Iterates over the strong roots.
void IterateStrongRoots(RootVisitor* v, VisitMode mode);
// Iterates over entries in the smi roots list. Only interesting to the
// serializer/deserializer, since GC does not care about smis.
@@ -951,11 +805,11 @@ class Heap {
static intptr_t store_buffer_mask_constant();
static Address store_buffer_overflow_function_address();
- void ClearRecordedSlot(HeapObject* object, Object** slot);
+ void ClearRecordedSlot(HeapObject* object, ObjectSlot slot);
void ClearRecordedSlotRange(Address start, Address end);
#ifdef DEBUG
- void VerifyClearedSlot(HeapObject* object, Object** slot);
+ void VerifyClearedSlot(HeapObject* object, ObjectSlot slot);
#endif
// ===========================================================================
@@ -1009,7 +863,7 @@ class Heap {
// This function checks that either
// - the map transition is safe,
// - or it was communicated to GC using NotifyObjectLayoutChange.
- void VerifyObjectLayoutChange(HeapObject* object, Map* new_map);
+ void VerifyObjectLayoutChange(HeapObject* object, Map new_map);
#endif
// ===========================================================================
@@ -1022,13 +876,9 @@ class Heap {
void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
void SetInterpreterEntryReturnPCOffset(int pc_offset);
- // Invalidates references in the given {code} object that are directly
- // embedded within the instruction stream. Mutates write-protected code.
- void InvalidateCodeEmbeddedObjects(Code* code);
-
// Invalidates references in the given {code} object that are referenced
// transitively from the deoptimization data. Mutates write-protected code.
- void InvalidateCodeDeoptimizationData(Code* code);
+ void InvalidateCodeDeoptimizationData(Code code);
void DeoptMarkedAllocationSites();
@@ -1045,8 +895,7 @@ class Heap {
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
EmbedderHeapTracer* GetEmbedderHeapTracer() const;
- void TracePossibleWrapper(JSObject* js_object);
- void RegisterExternallyReferencedObject(Object** object);
+ void RegisterExternallyReferencedObject(Address* location);
void SetEmbedderStackStateForNextFinalizaton(
EmbedderHeapTracer::EmbedderStackState stack_state);
@@ -1055,19 +904,19 @@ class Heap {
// ===========================================================================
// Registers an external string.
- inline void RegisterExternalString(String* string);
+ inline void RegisterExternalString(String string);
// Called when a string's resource is changed. The size of the payload is sent
// as argument of the method.
- inline void UpdateExternalString(String* string, size_t old_payload,
+ inline void UpdateExternalString(String string, size_t old_payload,
size_t new_payload);
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
- inline void FinalizeExternalString(String* string);
+ inline void FinalizeExternalString(String string);
- static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
- Heap* heap, Object** pointer);
+ static String UpdateNewSpaceReferenceInExternalStringTableEntry(
+ Heap* heap, ObjectSlot pointer);
// ===========================================================================
// Methods checking/returning the space of a given object/address. ===========
@@ -1075,14 +924,16 @@ class Heap {
// Returns whether the object resides in new space.
static inline bool InNewSpace(Object* object);
- static inline bool InNewSpace(MaybeObject* object);
+ static inline bool InNewSpace(MaybeObject object);
static inline bool InNewSpace(HeapObject* heap_object);
+ static inline bool InNewSpace(HeapObjectPtr heap_object);
static inline bool InFromSpace(Object* object);
- static inline bool InFromSpace(MaybeObject* object);
+ static inline bool InFromSpace(MaybeObject object);
static inline bool InFromSpace(HeapObject* heap_object);
static inline bool InToSpace(Object* object);
- static inline bool InToSpace(MaybeObject* object);
+ static inline bool InToSpace(MaybeObject object);
static inline bool InToSpace(HeapObject* heap_object);
+ static inline bool InToSpace(HeapObjectPtr heap_object);
// Returns whether the object resides in old space.
inline bool InOldSpace(Object* object);
@@ -1100,14 +951,16 @@ class Heap {
// Slow methods that can be used for verification as they can also be used
// with off-heap Addresses.
- bool ContainsSlow(Address addr);
bool InSpaceSlow(Address addr, AllocationSpace space);
- inline bool InNewSpaceSlow(Address address);
- inline bool InOldSpaceSlow(Address address);
// Find the heap which owns this HeapObject. Should never be called for
// objects in RO space.
static inline Heap* FromWritableHeapObject(const HeapObject* obj);
+ // This takes a HeapObjectPtr* (as opposed to a plain HeapObjectPtr)
+ // to keep the WRITE_BARRIER macro syntax-compatible to the HeapObject*
+ // version above.
+ // TODO(3770): This should probably take a HeapObjectPtr eventually.
+ static inline Heap* FromWritableHeapObject(const HeapObjectPtr* obj);
// ===========================================================================
// Object statistics tracking. ===============================================
@@ -1176,9 +1029,8 @@ class Heap {
// Returns the capacity of the old generation.
size_t OldGenerationCapacity();
- // Returns the amount of memory currently committed for the heap and memory
- // held alive by the unmapper.
- size_t CommittedMemoryOfHeapAndUnmapper();
+ // Returns the amount of memory currently held alive by the unmapper.
+ size_t CommittedMemoryOfUnmapper();
// Returns the amount of memory currently committed for the heap.
size_t CommittedMemory();
@@ -1339,7 +1191,7 @@ class Heap {
// Updates the AllocationSite of a given {object}. The entry (including the
// count) is cached on the local pretenuring feedback.
inline void UpdateAllocationSite(
- Map* map, HeapObject* object,
+ Map map, HeapObject* object,
PretenuringFeedbackMap* pretenuring_feedback);
// Merges local pretenuring feedback into the global one. Note that this
@@ -1390,13 +1242,12 @@ class Heap {
// Stack frame support. ======================================================
// ===========================================================================
- // Returns the Code object for a given interior pointer. Returns nullptr if
- // {inner_pointer} is not contained within a Code object.
- Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
+ // Returns the Code object for a given interior pointer.
+ Code GcSafeFindCodeForInnerPointer(Address inner_pointer);
// Returns true if {addr} is contained within {code} and false otherwise.
// Mostly useful for debugging.
- bool GcSafeCodeContains(HeapObject* code, Address addr);
+ bool GcSafeCodeContains(Code code, Address addr);
// =============================================================================
#ifdef VERIFY_HEAP
@@ -1448,8 +1299,8 @@ class Heap {
private:
class SkipStoreBufferScope;
- typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
- Object** pointer);
+ typedef String (*ExternalStringTableUpdaterCallback)(Heap* heap,
+ ObjectSlot pointer);
// External strings table is a place where all external strings are
// registered. We need to keep track of such strings to properly
@@ -1459,8 +1310,8 @@ class Heap {
explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
// Registers an external string.
- inline void AddString(String* string);
- bool Contains(HeapObject* obj);
+ inline void AddString(String string);
+ bool Contains(String string);
void IterateAll(RootVisitor* v);
void IterateNewSpaceStrings(RootVisitor* v);
@@ -1564,8 +1415,7 @@ class Heap {
return 0;
}
-#define ROOT_ACCESSOR(type, name, CamelName) \
- inline void set_##name(type* value);
+#define ROOT_ACCESSOR(type, name, CamelName) inline void set_##name(type value);
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
@@ -1697,8 +1547,8 @@ class Heap {
// - GCFinalzeMC: finalization of incremental full GC
// - GCFinalizeMCReduceMemory: finalization of incremental full GC with
// memory reduction
- HistogramTimer* GCTypeTimer(GarbageCollector collector);
- HistogramTimer* GCTypePriorityTimer(GarbageCollector collector);
+ TimedHistogram* GCTypeTimer(GarbageCollector collector);
+ TimedHistogram* GCTypePriorityTimer(GarbageCollector collector);
// ===========================================================================
// Pretenuring. ==============================================================
@@ -1882,7 +1732,7 @@ class Heap {
HeapObject* AllocateRawCodeInLargeObjectSpace(int size);
// Allocates a heap object based on the map.
- V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map* map,
+ V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
AllocationSpace space);
// Takes a code object and checks if it is on memory which is not subject to
@@ -1894,7 +1744,7 @@ class Heap {
V8_WARN_UNUSED_RESULT AllocationResult
AllocatePartialMap(InstanceType instance_type, int instance_size);
- void FinalizePartialMap(Map* map);
+ void FinalizePartialMap(Map map);
// Allocate empty fixed typed array of given type.
V8_WARN_UNUSED_RESULT AllocationResult
@@ -1914,14 +1764,9 @@ class Heap {
bool IsRetainingPathTarget(HeapObject* object, RetainingPathOption* option);
void PrintRetainingPath(HeapObject* object, RetainingPathOption option);
- // The amount of external memory registered through the API.
- int64_t external_memory_ = 0;
-
- // The limit when to trigger memory pressure from the API.
- int64_t external_memory_limit_ = kExternalAllocationSoftLimit;
-
- // Caches the amount of external memory registered at the last MC.
- int64_t external_memory_at_last_mark_compact_ = 0;
+#ifdef DEBUG
+ void IncrementObjectCounters();
+#endif // DEBUG
// The amount of memory that has been freed concurrently.
std::atomic<intptr_t> external_memory_concurrently_freed_{0};
@@ -1930,29 +1775,6 @@ class Heap {
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_ = nullptr;
- RootsTable roots_;
-
- // This table is accessed from builtin code compiled into the snapshot, and
- // thus its offset from roots_ must remain static. This is verified in
- // Isolate::Init() using runtime checks.
- static constexpr int kRootsExternalReferenceTableOffset =
- static_cast<int>(RootIndex::kRootListLength) * kPointerSize;
- ExternalReferenceTable external_reference_table_;
-
- // As external references above, builtins are accessed through an offset from
- // the roots register. Its offset from roots_ must remain static. This is
- // verified in Isolate::Init() using runtime checks.
- static constexpr int kRootsBuiltinsOffset =
- kRootsExternalReferenceTableOffset +
- ExternalReferenceTable::SizeInBytes();
- Object* builtins_[Builtins::builtin_count];
-
- // kRootRegister may be used to address any location that starts at the
- // Isolate and ends at this point. Fields past this point are not guaranteed
- // to live at a static offset from kRootRegister.
- static constexpr int kRootRegisterAddressableEndOffset =
- kRootsBuiltinsOffset + Builtins::builtin_count * kPointerSize;
-
size_t code_range_size_ = 0;
size_t max_semi_space_size_ = 8 * (kPointerSize / 4) * MB;
size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;
@@ -1996,6 +1818,7 @@ class Heap {
CodeSpace* code_space_ = nullptr;
MapSpace* map_space_ = nullptr;
LargeObjectSpace* lo_space_ = nullptr;
+ CodeLargeObjectSpace* code_lo_space_ = nullptr;
NewLargeObjectSpace* new_lo_space_ = nullptr;
ReadOnlySpace* read_only_space_ = nullptr;
// Map from the space id to the space.
@@ -2227,6 +2050,7 @@ class Heap {
friend class ObjectStatsCollector;
friend class Page;
friend class PagedSpace;
+ friend class ReadOnlyRoots;
friend class Scavenger;
friend class ScavengerCollector;
friend class Space;
@@ -2267,21 +2091,22 @@ class HeapStats {
size_t* map_space_size; // 9
size_t* map_space_capacity; // 10
size_t* lo_space_size; // 11
- size_t* global_handle_count; // 12
- size_t* weak_global_handle_count; // 13
- size_t* pending_global_handle_count; // 14
- size_t* near_death_global_handle_count; // 15
- size_t* free_global_handle_count; // 16
- size_t* memory_allocator_size; // 17
- size_t* memory_allocator_capacity; // 18
- size_t* malloced_memory; // 19
- size_t* malloced_peak_memory; // 20
- size_t* objects_per_type; // 21
- size_t* size_per_type; // 22
- int* os_error; // 23
- char* last_few_messages; // 24
- char* js_stacktrace; // 25
- intptr_t* end_marker; // 26
+ size_t* code_lo_space_size; // 12
+ size_t* global_handle_count; // 13
+ size_t* weak_global_handle_count; // 14
+ size_t* pending_global_handle_count; // 15
+ size_t* near_death_global_handle_count; // 16
+ size_t* free_global_handle_count; // 17
+ size_t* memory_allocator_size; // 18
+ size_t* memory_allocator_capacity; // 19
+ size_t* malloced_memory; // 20
+ size_t* malloced_peak_memory; // 21
+ size_t* objects_per_type; // 22
+ size_t* size_per_type; // 23
+ int* os_error; // 24
+ char* last_few_messages; // 25
+ char* js_stacktrace; // 26
+ intptr_t* end_marker; // 27
};
@@ -2330,7 +2155,7 @@ class CodePageMemoryModificationScope {
// Disallow any GCs inside this scope, as a relocation of the underlying
// object would change the {MemoryChunk} that this scope targets.
- DisallowHeapAllocation no_heap_allocation_;
+ DISALLOW_HEAP_ALLOCATION(no_heap_allocation_);
};
// Visitor class to verify interior pointers in spaces that do not contain
@@ -2341,15 +2166,16 @@ class CodePageMemoryModificationScope {
class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
public:
explicit VerifyPointersVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override;
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override;
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override;
+ void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override;
+ void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override;
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override;
protected:
- virtual void VerifyPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end);
+ virtual void VerifyPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end);
Heap* heap_;
};
@@ -2358,8 +2184,8 @@ class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
// Verify that all objects are Smis.
class VerifySmisVisitor : public RootVisitor {
public:
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override;
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override;
};
// Space iterator for iterating over all the paged spaces of the heap: Map
@@ -2421,7 +2247,7 @@ class HeapIterator {
private:
HeapObject* NextObject();
- DisallowHeapAllocation no_heap_allocation_;
+ DISALLOW_HEAP_ALLOCATION(no_heap_allocation_);
Heap* heap_;
HeapObjectsFiltering filtering_;
diff --git a/chromium/v8/src/heap/incremental-marking-inl.h b/chromium/v8/src/heap/incremental-marking-inl.h
index e19d62f4d40..14513f61b93 100644
--- a/chromium/v8/src/heap/incremental-marking-inl.h
+++ b/chromium/v8/src/heap/incremental-marking-inl.h
@@ -33,25 +33,23 @@ void IncrementalMarking::TransferColor(HeapObject* from, HeapObject* to) {
}
}
-void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
+void IncrementalMarking::RecordWrite(HeapObject* obj, ObjectSlot slot,
Object* value) {
- DCHECK_IMPLIES(slot != nullptr, !HasWeakHeapObjectTag(*slot));
+ DCHECK_IMPLIES(slot.address() != kNullAddress, !HasWeakHeapObjectTag(*slot));
DCHECK(!HasWeakHeapObjectTag(value));
if (IsMarking() && value->IsHeapObject()) {
- RecordWriteSlow(obj, reinterpret_cast<HeapObjectReference**>(slot),
- HeapObject::cast(value));
+ RecordWriteSlow(obj, HeapObjectSlot(slot), HeapObject::cast(value));
}
}
void IncrementalMarking::RecordMaybeWeakWrite(HeapObject* obj,
- MaybeObject** slot,
- MaybeObject* value) {
+ MaybeObjectSlot slot,
+ MaybeObject value) {
// When writing a weak reference, treat it as strong for the purposes of the
// marking barrier.
HeapObject* heap_object;
if (IsMarking() && value->GetHeapObject(&heap_object)) {
- RecordWriteSlow(obj, reinterpret_cast<HeapObjectReference**>(slot),
- heap_object);
+ RecordWriteSlow(obj, HeapObjectSlot(slot), heap_object);
}
}
diff --git a/chromium/v8/src/heap/incremental-marking-job.cc b/chromium/v8/src/heap/incremental-marking-job.cc
index 96eff0508ef..836b491d8f5 100644
--- a/chromium/v8/src/heap/incremental-marking-job.cc
+++ b/chromium/v8/src/heap/incremental-marking-job.cc
@@ -5,6 +5,7 @@
#include "src/heap/incremental-marking-job.h"
#include "src/base/platform/time.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking.h"
@@ -15,6 +16,29 @@
namespace v8 {
namespace internal {
+class IncrementalMarkingJob::Task : public CancelableTask {
+ public:
+ static void Step(Heap* heap,
+ EmbedderHeapTracer::EmbedderStackState stack_state);
+
+ Task(Isolate* isolate, IncrementalMarkingJob* job,
+ EmbedderHeapTracer::EmbedderStackState stack_state)
+ : CancelableTask(isolate),
+ isolate_(isolate),
+ job_(job),
+ stack_state_(stack_state) {}
+
+ // CancelableTask overrides.
+ void RunInternal() override;
+
+ Isolate* isolate() const { return isolate_; }
+
+ private:
+ Isolate* const isolate_;
+ IncrementalMarkingJob* const job_;
+ const EmbedderHeapTracer::EmbedderStackState stack_state_;
+};
+
void IncrementalMarkingJob::Start(Heap* heap) {
DCHECK(!heap->incremental_marking()->IsStopped());
ScheduleTask(heap);
@@ -26,19 +50,32 @@ void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
task_pending_ = true;
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
- taskrunner->PostTask(base::make_unique<Task>(heap->isolate(), this));
+ if (taskrunner->NonNestableTasksEnabled()) {
+ taskrunner->PostNonNestableTask(base::make_unique<Task>(
+ heap->isolate(), this,
+ EmbedderHeapTracer::EmbedderStackState::kEmpty));
+ } else {
+ taskrunner->PostTask(base::make_unique<Task>(
+ heap->isolate(), this,
+ EmbedderHeapTracer::EmbedderStackState::kUnknown));
+ }
}
}
-void IncrementalMarkingJob::Task::Step(Heap* heap) {
+void IncrementalMarkingJob::Task::Step(
+ Heap* heap, EmbedderHeapTracer::EmbedderStackState stack_state) {
const int kIncrementalMarkingDelayMs = 1;
double deadline =
heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
heap->incremental_marking()->AdvanceIncrementalMarking(
deadline, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::StepOrigin::kTask);
- heap->FinalizeIncrementalMarkingIfComplete(
- GarbageCollectionReason::kFinalizeMarkingViaTask);
+ {
+ EmbedderStackStateScope scope(heap->local_embedder_heap_tracer(),
+ stack_state);
+ heap->FinalizeIncrementalMarkingIfComplete(
+ GarbageCollectionReason::kFinalizeMarkingViaTask);
+ }
}
void IncrementalMarkingJob::Task::RunInternal() {
@@ -61,7 +98,7 @@ void IncrementalMarkingJob::Task::RunInternal() {
job_->task_pending_ = false;
if (!incremental_marking->IsStopped()) {
- Step(heap);
+ Step(heap, stack_state_);
if (!incremental_marking->IsStopped()) {
job_->ScheduleTask(heap);
}
diff --git a/chromium/v8/src/heap/incremental-marking-job.h b/chromium/v8/src/heap/incremental-marking-job.h
index 902989b6133..a2202c7504c 100644
--- a/chromium/v8/src/heap/incremental-marking-job.h
+++ b/chromium/v8/src/heap/incremental-marking-job.h
@@ -18,31 +18,18 @@ class Isolate;
// step and posts another task until the marking is completed.
class IncrementalMarkingJob {
public:
- class Task : public CancelableTask {
- public:
- explicit Task(Isolate* isolate, IncrementalMarkingJob* job)
- : CancelableTask(isolate), isolate_(isolate), job_(job) {}
- static void Step(Heap* heap);
- // CancelableTask overrides.
- void RunInternal() override;
+ IncrementalMarkingJob() = default;
- Isolate* isolate() { return isolate_; }
-
- private:
- Isolate* isolate_;
- IncrementalMarkingJob* job_;
- };
-
- IncrementalMarkingJob() : task_pending_(false) {}
-
- bool TaskPending() { return task_pending_; }
+ bool TaskPending() const { return task_pending_; }
void Start(Heap* heap);
void ScheduleTask(Heap* heap);
private:
- bool task_pending_;
+ class Task;
+
+ bool task_pending_ = false;
};
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/incremental-marking.cc b/chromium/v8/src/heap/incremental-marking.cc
index 239f416eaf3..c62513328dd 100644
--- a/chromium/v8/src/heap/incremental-marking.cc
+++ b/chromium/v8/src/heap/incremental-marking.cc
@@ -8,6 +8,7 @@
#include "src/compilation-cache.h"
#include "src/conversions.h"
#include "src/heap/concurrent-marking.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
@@ -18,6 +19,7 @@
#include "src/heap/objects-visiting.h"
#include "src/heap/sweeper.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/slots-inl.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
#include "src/visitors.h"
@@ -45,7 +47,7 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
HeapObject* object = HeapObject::FromAddress(addr);
if (incremental_marking_.marking_state()->IsWhite(object) &&
!(Heap::InNewSpace(object) || heap->new_lo_space()->Contains(object))) {
- if (heap->lo_space()->Contains(object)) {
+ if (heap->IsLargeObject(object)) {
incremental_marking_.marking_state()->WhiteToBlack(object);
} else {
Page::FromAddress(addr)->CreateBlackArea(addr, addr + size);
@@ -95,26 +97,27 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
return is_compacting_ && need_recording;
}
-void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
- HeapObjectReference** slot,
+void IncrementalMarking::RecordWriteSlow(HeapObject* obj, HeapObjectSlot slot,
Object* value) {
- if (BaseRecordWrite(obj, value) && slot != nullptr) {
+ if (BaseRecordWrite(obj, value) && slot.address() != kNullAddress) {
// Object is not going to be rescanned we need to record the slot.
heap_->mark_compact_collector()->RecordSlot(obj, slot,
HeapObject::cast(value));
}
}
-int IncrementalMarking::RecordWriteFromCode(HeapObject* obj, MaybeObject** slot,
+int IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
+ Address slot_address,
Isolate* isolate) {
DCHECK(obj->IsHeapObject());
+ MaybeObjectSlot slot(slot_address);
isolate->heap()->incremental_marking()->RecordMaybeWeakWrite(obj, slot,
*slot);
// Called by RecordWriteCodeStubAssembler, which doesnt accept void type
return 0;
}
-void IncrementalMarking::RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
+void IncrementalMarking::RecordWriteIntoCode(Code host, RelocInfo* rinfo,
HeapObject* value) {
DCHECK(IsMarking());
if (BaseRecordWrite(host, value)) {
@@ -134,7 +137,7 @@ bool IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
// Marking left-trimmable fixed array black is unsafe because left-trimming
// re-pushes only grey arrays onto the marking worklist.
- DCHECK(!obj->IsFixedArrayBase());
+ DCHECK(!obj->IsFixedArray() && !obj->IsFixedDoubleArray());
// Color the object black and push it into the bailout deque.
marking_state()->WhiteToGrey(obj);
if (marking_state()->GreyToBlack(obj)) {
@@ -217,17 +220,17 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
: heap_(incremental_marking->heap()) {}
void VisitRootPointer(Root root, const char* description,
- Object** p) override {
+ ObjectSlot p) override {
MarkObjectByPointer(p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot p = start; p < end; ++p) MarkObjectByPointer(p);
}
private:
- void MarkObjectByPointer(Object** p) {
+ void MarkObjectByPointer(ObjectSlot p) {
Object* obj = *p;
if (!obj->IsHeapObject()) return;
@@ -262,6 +265,10 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(false);
}
+
+ for (LargePage* p : *heap_->code_lo_space()) {
+ p->SetOldGenerationPageFlags(false);
+ }
}
@@ -288,6 +295,10 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
for (LargePage* p : *heap_->lo_space()) {
p->SetOldGenerationPageFlags(true);
}
+
+ for (LargePage* p : *heap_->code_lo_space()) {
+ p->SetOldGenerationPageFlags(true);
+ }
}
@@ -384,7 +395,7 @@ void IncrementalMarking::StartMarking() {
{
TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
+ GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
heap_->local_embedder_heap_tracer()->TracePrologue();
}
@@ -466,7 +477,7 @@ void IncrementalMarking::MarkRoots() {
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
}
-bool IncrementalMarking::ShouldRetainMap(Map* map, int age) {
+bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
if (age == 0) {
// The map has aged. Do not retain this map.
return false;
@@ -495,14 +506,14 @@ void IncrementalMarking::RetainMaps() {
// We do not age and retain disposed maps to avoid memory leaks.
int number_of_disposed_maps = heap()->number_of_disposed_maps_;
for (int i = 0; i < length; i += 2) {
- MaybeObject* value = retained_maps->Get(i);
+ MaybeObject value = retained_maps->Get(i);
HeapObject* map_heap_object;
if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
continue;
}
- int age = Smi::ToInt(retained_maps->Get(i + 1)->cast<Smi>());
+ int age = retained_maps->Get(i + 1).ToSmi().value();
int new_age;
- Map* map = Map::cast(map_heap_object);
+ Map map = Map::cast(map_heap_object);
if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
marking_state()->IsWhite(map)) {
if (ShouldRetainMap(map, age)) {
@@ -565,7 +576,7 @@ void IncrementalMarking::FinalizeIncrementally() {
void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
if (!IsMarking()) return;
- Map* filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
+ Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
#ifdef ENABLE_MINOR_MC
MinorMarkCompactCollector::MarkingState* minor_marking_state =
@@ -574,7 +585,12 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
void* minor_marking_state = nullptr;
#endif // ENABLE_MINOR_MC
- marking_worklist()->Update([this, filler_map, minor_marking_state](
+ marking_worklist()->Update([
+#ifdef DEBUG
+ // this is referred inside DCHECK.
+ this,
+#endif
+ filler_map, minor_marking_state](
HeapObject* obj, HeapObject** out) -> bool {
DCHECK(obj->IsHeapObject());
// Only pointers to from space have to be updated.
@@ -631,7 +647,8 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
}
namespace {
-template <typename T>
+template <typename T, typename = typename std::enable_if<
+ std::is_base_of<HeapObject, T>::value>::type>
T* ForwardingAddress(T* heap_obj) {
MapWord map_word = heap_obj->map_word();
@@ -643,31 +660,45 @@ T* ForwardingAddress(T* heap_obj) {
return heap_obj;
}
}
+
+// TODO(3770): Replacement for the above.
+template <typename T, typename = typename std::enable_if<
+ std::is_base_of<HeapObjectPtr, T>::value>::type>
+T ForwardingAddress(T heap_obj) {
+ MapWord map_word = heap_obj->map_word();
+
+ if (map_word.IsForwardingAddress()) {
+ return T::cast(map_word.ToForwardingAddress());
+ } else if (Heap::InNewSpace(heap_obj)) {
+ return T();
+ } else {
+ return heap_obj;
+ }
+}
} // namespace
void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
weak_objects_->weak_references.Update(
- [](std::pair<HeapObject*, HeapObjectReference**> slot_in,
- std::pair<HeapObject*, HeapObjectReference**>* slot_out) -> bool {
+ [](std::pair<HeapObject*, HeapObjectSlot> slot_in,
+ std::pair<HeapObject*, HeapObjectSlot>* slot_out) -> bool {
HeapObject* heap_obj = slot_in.first;
HeapObject* forwarded = ForwardingAddress(heap_obj);
if (forwarded) {
- ptrdiff_t distance_to_slot =
- reinterpret_cast<Address>(slot_in.second) -
- reinterpret_cast<Address>(slot_in.first);
+ ptrdiff_t distance_to_slot = slot_in.second.address() -
+ reinterpret_cast<Address>(slot_in.first);
Address new_slot =
reinterpret_cast<Address>(forwarded) + distance_to_slot;
slot_out->first = forwarded;
- slot_out->second = reinterpret_cast<HeapObjectReference**>(new_slot);
+ slot_out->second = HeapObjectSlot(new_slot);
return true;
}
return false;
});
weak_objects_->weak_objects_in_code.Update(
- [](std::pair<HeapObject*, Code*> slot_in,
- std::pair<HeapObject*, Code*>* slot_out) -> bool {
+ [](std::pair<HeapObject*, Code> slot_in,
+ std::pair<HeapObject*, Code>* slot_out) -> bool {
HeapObject* heap_obj = slot_in.first;
HeapObject* forwarded = ForwardingAddress(heap_obj);
@@ -680,10 +711,10 @@ void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
return false;
});
weak_objects_->ephemeron_hash_tables.Update(
- [](EphemeronHashTable* slot_in, EphemeronHashTable** slot_out) -> bool {
- EphemeronHashTable* forwarded = ForwardingAddress(slot_in);
+ [](EphemeronHashTable slot_in, EphemeronHashTable* slot_out) -> bool {
+ EphemeronHashTable forwarded = ForwardingAddress(slot_in);
- if (forwarded) {
+ if (!forwarded.is_null()) {
*slot_out = forwarded;
return true;
}
@@ -723,7 +754,7 @@ bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject* obj) {
return chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR);
}
-int IncrementalMarking::VisitObject(Map* map, HeapObject* obj) {
+int IncrementalMarking::VisitObject(Map map, HeapObject* obj) {
DCHECK(marking_state()->IsGrey(obj) || marking_state()->IsBlack(obj));
if (!marking_state()->GreyToBlack(obj)) {
// The object can already be black in these cases:
@@ -734,7 +765,8 @@ int IncrementalMarking::VisitObject(Map* map, HeapObject* obj) {
// unsafe layout change.
// 4. The object is materizalized by the deoptimizer.
DCHECK(obj->IsHashTable() || obj->IsPropertyArray() ||
- obj->IsFixedArray() || obj->IsJSObject() || obj->IsString());
+ obj->IsFixedArray() || obj->IsContext() || obj->IsJSObject() ||
+ obj->IsString());
}
DCHECK(marking_state()->IsBlack(obj));
WhiteToGreyAndPush(map);
@@ -756,7 +788,7 @@ void IncrementalMarking::RevisitObject(HeapObject* obj) {
if (page->owner()->identity() == LO_SPACE) {
page->ResetProgressBar();
}
- Map* map = obj->map();
+ Map map = obj->map();
WhiteToGreyAndPush(map);
IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
marking_state());
@@ -785,39 +817,35 @@ intptr_t IncrementalMarking::ProcessMarkingWorklist(
int size = VisitObject(obj->map(), obj);
bytes_processed += size - unscanned_bytes_of_large_object_;
}
- // Report all found wrappers to the embedder. This is necessary as the
- // embedder could potentially invalidate wrappers as soon as V8 is done
- // with its incremental marking processing. Any cached wrappers could
- // result in broken pointers at this point.
- heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
return bytes_processed;
}
void IncrementalMarking::EmbedderStep(double duration_ms) {
- constexpr int kObjectsToProcessBeforeInterrupt = 100;
-
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
+ constexpr size_t kObjectsToProcessBeforeInterrupt = 500;
- const double deadline =
- heap_->MonotonicallyIncreasingTimeInMs() + duration_ms;
-
- HeapObject* object;
- int cnt = 0;
- while (marking_worklist()->embedder()->Pop(0, &object)) {
- heap_->TracePossibleWrapper(JSObject::cast(object));
- if (++cnt == kObjectsToProcessBeforeInterrupt) {
- cnt = 0;
- if (heap_->MonotonicallyIncreasingTimeInMs() > deadline) {
- break;
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
+ double deadline = heap_->MonotonicallyIncreasingTimeInMs() + duration_ms;
+ bool empty_worklist;
+ do {
+ {
+ LocalEmbedderHeapTracer::ProcessingScope scope(
+ heap_->local_embedder_heap_tracer());
+ HeapObject* object;
+ size_t cnt = 0;
+ empty_worklist = true;
+ while (marking_worklist()->embedder()->Pop(0, &object)) {
+ scope.TracePossibleWrapper(JSObject::cast(object));
+ if (++cnt == kObjectsToProcessBeforeInterrupt) {
+ cnt = 0;
+ empty_worklist = false;
+ break;
+ }
}
}
- }
-
- heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
- if (!heap_->local_embedder_heap_tracer()
- ->ShouldFinalizeIncrementalMarking()) {
heap_->local_embedder_heap_tracer()->Trace(deadline);
- }
+ } while (!empty_worklist &&
+ (heap_->MonotonicallyIncreasingTimeInMs() < deadline));
+ heap_->local_embedder_heap_tracer()->SetEmbedderWorklistEmpty(empty_worklist);
}
void IncrementalMarking::Hurry() {
@@ -927,6 +955,11 @@ void IncrementalMarking::Epilogue() {
finalize_marking_completed_ = false;
}
+bool IncrementalMarking::ShouldDoEmbedderStep() {
+ return state_ == MARKING && FLAG_incremental_marking_wrappers &&
+ heap_->local_embedder_heap_tracer()->InUse();
+}
+
double IncrementalMarking::AdvanceIncrementalMarking(
double deadline_in_ms, CompletionAction completion_action,
StepOrigin step_origin) {
@@ -935,27 +968,22 @@ double IncrementalMarking::AdvanceIncrementalMarking(
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
DCHECK(!IsStopped());
- DCHECK_EQ(
- 0, heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
double remaining_time_in_ms = 0.0;
- intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
- kStepSizeInMs,
- heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
-
- const bool incremental_wrapper_tracing =
- state_ == MARKING && FLAG_incremental_marking_wrappers &&
- heap_->local_embedder_heap_tracer()->InUse();
do {
- if (incremental_wrapper_tracing && trace_wrappers_toggle_) {
+ if (ShouldDoEmbedderStep() && trace_wrappers_toggle_) {
EmbedderStep(kStepSizeInMs);
} else {
+ const intptr_t step_size_in_bytes =
+ GCIdleTimeHandler::EstimateMarkingStepSize(
+ kStepSizeInMs,
+ heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
Step(step_size_in_bytes, completion_action, step_origin);
}
trace_wrappers_toggle_ = !trace_wrappers_toggle_;
remaining_time_in_ms =
deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
- } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
+ } while (remaining_time_in_ms > kStepSizeInMs && !IsComplete() &&
!marking_worklist()->IsEmpty());
return remaining_time_in_ms;
}
@@ -985,24 +1013,18 @@ size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
}
size_t IncrementalMarking::StepSizeToMakeProgress() {
- // We increase step size gradually based on the time passed in order to
- // leave marking work to standalone tasks. The ramp up duration and the
- // target step count are chosen based on benchmarks.
- const int kRampUpIntervalMs = 300;
const size_t kTargetStepCount = 256;
const size_t kTargetStepCountAtOOM = 32;
+ const size_t kMaxStepSizeInByte = 256 * KB;
size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
if (!heap()->CanExpandOldGeneration(oom_slack)) {
return heap()->OldGenerationSizeOfObjects() / kTargetStepCountAtOOM;
}
- size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
- IncrementalMarking::kMinStepSizeInBytes);
- double time_passed_ms =
- heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
- double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
- return static_cast<size_t>(factor * step_size);
+ return Min(Max(initial_old_generation_size_ / kTargetStepCount,
+ IncrementalMarking::kMinStepSizeInBytes),
+ kMaxStepSizeInByte);
}
void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
@@ -1013,49 +1035,54 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
return;
}
- size_t bytes_to_process =
- StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
-
- if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes) {
- HistogramTimerScope incremental_marking_scope(
- heap_->isolate()->counters()->gc_incremental_marking());
- TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
- TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
- // The first step after Scavenge will see many allocated bytes.
- // Cap the step size to distribute the marking work more uniformly.
- size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
- kMaxStepSizeInMs,
- heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
- bytes_to_process = Min(bytes_to_process, max_step_size);
- size_t bytes_processed = 0;
- if (FLAG_concurrent_marking) {
- bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
- StepOrigin::kV8, WorklistToProcess::kBailout);
- bytes_to_process = (bytes_processed >= bytes_to_process)
- ? 0
- : bytes_to_process - bytes_processed;
- size_t current_bytes_marked_concurrently =
- heap()->concurrent_marking()->TotalMarkedBytes();
- // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
- // short period of time when a concurrent marking task is finishing.
- if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
- bytes_marked_ahead_of_schedule_ +=
- current_bytes_marked_concurrently - bytes_marked_concurrently_;
- bytes_marked_concurrently_ = current_bytes_marked_concurrently;
+ HistogramTimerScope incremental_marking_scope(
+ heap_->isolate()->counters()->gc_incremental_marking());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
+
+ if (ShouldDoEmbedderStep() && trace_wrappers_toggle_) {
+ EmbedderStep(kMaxStepSizeInMs);
+ } else {
+ size_t bytes_to_process =
+ StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
+ if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes) {
+ // The first step after Scavenge will see many allocated bytes.
+ // Cap the step size to distribute the marking work more uniformly.
+ size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+ kMaxStepSizeInMs,
+ heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+ bytes_to_process = Min(bytes_to_process, max_step_size);
+ size_t bytes_processed = 0;
+ if (FLAG_concurrent_marking) {
+ bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
+ StepOrigin::kV8, WorklistToProcess::kBailout);
+ bytes_to_process = (bytes_processed >= bytes_to_process)
+ ? 0
+ : bytes_to_process - bytes_processed;
+ size_t current_bytes_marked_concurrently =
+ heap()->concurrent_marking()->TotalMarkedBytes();
+ // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
+ // short period of time when a concurrent marking task is finishing.
+ if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
+ bytes_marked_ahead_of_schedule_ +=
+ current_bytes_marked_concurrently - bytes_marked_concurrently_;
+ bytes_marked_concurrently_ = current_bytes_marked_concurrently;
+ }
}
+ if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
+ // Steps performed in tasks and concurrently have put us ahead of
+ // schedule. We skip processing of marking dequeue here and thus shift
+ // marking time from inside V8 to standalone tasks.
+ bytes_marked_ahead_of_schedule_ -= bytes_to_process;
+ bytes_processed += bytes_to_process;
+ bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
+ }
+ bytes_processed += Step(bytes_to_process, GC_VIA_STACK_GUARD,
+ StepOrigin::kV8, WorklistToProcess::kAll);
+ bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
}
- if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
- // Steps performed in tasks and concurrently have put us ahead of
- // schedule. We skip processing of marking dequeue here and thus shift
- // marking time from inside V8 to standalone tasks.
- bytes_marked_ahead_of_schedule_ -= bytes_to_process;
- bytes_processed += bytes_to_process;
- bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
- }
- bytes_processed += Step(bytes_to_process, GC_VIA_STACK_GUARD,
- StepOrigin::kV8, WorklistToProcess::kAll);
- bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
}
+ trace_wrappers_toggle_ = !trace_wrappers_toggle_;
}
size_t IncrementalMarking::Step(size_t bytes_to_process,
diff --git a/chromium/v8/src/heap/incremental-marking.h b/chromium/v8/src/heap/incremental-marking.h
index ee774c230f8..2d97a27bdad 100644
--- a/chromium/v8/src/heap/incremental-marking.h
+++ b/chromium/v8/src/heap/incremental-marking.h
@@ -177,11 +177,15 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
size_t Step(size_t bytes_to_process, CompletionAction action,
StepOrigin step_origin,
WorklistToProcess worklist_to_process = WorklistToProcess::kAll);
+
+ bool ShouldDoEmbedderStep();
void EmbedderStep(double duration);
inline void RestartIfNotMarking();
- static int RecordWriteFromCode(HeapObject* obj, MaybeObject** slot,
+ // {slot_address} is a raw Address instead of a MaybeObjectSlot because
+ // this is called from generated code via ExternalReference.
+ static int RecordWriteFromCode(HeapObject* obj, Address slot_address,
Isolate* isolate);
// Record a slot for compaction. Returns false for objects that are
@@ -191,14 +195,13 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// cannot be interpreted correctly if the underlying object does not survive
// the incremental cycle (stays white).
V8_INLINE bool BaseRecordWrite(HeapObject* obj, Object* value);
- V8_INLINE void RecordWrite(HeapObject* obj, Object** slot, Object* value);
- V8_INLINE void RecordMaybeWeakWrite(HeapObject* obj, MaybeObject** slot,
- MaybeObject* value);
+ V8_INLINE void RecordWrite(HeapObject* obj, ObjectSlot slot, Object* value);
+ V8_INLINE void RecordMaybeWeakWrite(HeapObject* obj, MaybeObjectSlot slot,
+ MaybeObject value);
void RevisitObject(HeapObject* obj);
- void RecordWriteSlow(HeapObject* obj, HeapObjectReference** slot,
- Object* value);
- void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, HeapObject* value);
+ void RecordWriteSlow(HeapObject* obj, HeapObjectSlot slot, Object* value);
+ void RecordWriteIntoCode(Code host, RelocInfo* rinfo, HeapObject* value);
// Returns true if the function succeeds in transitioning the object
// from white to grey.
@@ -211,8 +214,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool IsCompacting() { return IsMarking() && is_compacting_; }
- void ActivateGeneratedStub(Code* stub);
-
void NotifyIncompleteScanOfObject(int unscanned_bytes) {
unscanned_bytes_of_large_object_ = unscanned_bytes;
}
@@ -259,7 +260,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinishBlackAllocation();
void MarkRoots();
- bool ShouldRetainMap(Map* map, int age);
+ bool ShouldRetainMap(Map map, int age);
// Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
// increase chances of reusing of map transition tree in future.
void RetainMaps();
@@ -280,7 +281,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
V8_INLINE bool IsFixedArrayWithProgressBar(HeapObject* object);
// Visits the object and returns its size.
- V8_INLINE int VisitObject(Map* map, HeapObject* obj);
+ V8_INLINE int VisitObject(Map map, HeapObject* obj);
void IncrementIdleMarkingDelayCounter();
diff --git a/chromium/v8/src/heap/item-parallel-job.cc b/chromium/v8/src/heap/item-parallel-job.cc
index b536ccc5d42..85dd55c593b 100644
--- a/chromium/v8/src/heap/item-parallel-job.cc
+++ b/chromium/v8/src/heap/item-parallel-job.cc
@@ -119,7 +119,7 @@ void ItemParallelJob::Run(const std::shared_ptr<Counters>& async_counters) {
// Wait for background tasks.
for (size_t i = 0; i < num_tasks; i++) {
if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
- CancelableTaskManager::kTaskAborted) {
+ TryAbortResult::kTaskAborted) {
pending_tasks_->Wait();
}
}
diff --git a/chromium/v8/src/heap/mark-compact-inl.h b/chromium/v8/src/heap/mark-compact-inl.h
index 449ca43e507..81ad9a45d72 100644
--- a/chromium/v8/src/heap/mark-compact-inl.h
+++ b/chromium/v8/src/heap/mark-compact-inl.h
@@ -10,6 +10,8 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/remembered-set.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/slots-inl.h"
namespace v8 {
namespace internal {
@@ -48,8 +50,8 @@ MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitBytecodeArray(Map* map,
- BytecodeArray* array) {
+ MarkingState>::VisitBytecodeArray(Map map,
+ BytecodeArray array) {
int size = BytecodeArray::BodyDescriptor::SizeOf(map, array);
BytecodeArray::BodyDescriptor::IterateBody(map, array, size, this);
array->MakeOlder();
@@ -59,8 +61,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitFixedArray(Map* map,
- FixedArray* object) {
+ MarkingState>::VisitFixedArray(Map map, FixedArray object) {
return (fixed_array_mode == FixedArrayVisitationMode::kRegular)
? Parent::VisitFixedArray(map, object)
: VisitFixedArrayIncremental(map, object);
@@ -71,10 +72,10 @@ template <FixedArrayVisitationMode fixed_array_mode,
template <typename T>
V8_INLINE int
MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitEmbedderTracingSubclass(Map* map,
- T* object) {
+ MarkingState>::VisitEmbedderTracingSubclass(Map map, T* object) {
if (heap_->local_embedder_heap_tracer()->InUse()) {
- heap_->TracePossibleWrapper(object);
+ marking_worklist()->embedder()->Push(MarkCompactCollectorBase::kMainThread,
+ object);
}
int size = T::BodyDescriptor::SizeOf(map, object);
T::BodyDescriptor::IterateBody(map, object, size, this);
@@ -84,14 +85,14 @@ MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitJSApiObject(Map* map, JSObject* object) {
+ MarkingState>::VisitJSApiObject(Map map, JSObject* object) {
return VisitEmbedderTracingSubclass(map, object);
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitJSArrayBuffer(Map* map,
+ MarkingState>::VisitJSArrayBuffer(Map map,
JSArrayBuffer* object) {
return VisitEmbedderTracingSubclass(map, object);
}
@@ -99,15 +100,14 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitJSDataView(Map* map,
- JSDataView* object) {
+ MarkingState>::VisitJSDataView(Map map, JSDataView* object) {
return VisitEmbedderTracingSubclass(map, object);
}
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitJSTypedArray(Map* map,
+ MarkingState>::VisitJSTypedArray(Map map,
JSTypedArray* object) {
return VisitEmbedderTracingSubclass(map, object);
}
@@ -115,16 +115,16 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
- VisitEphemeronHashTable(Map* map, EphemeronHashTable* table) {
+ VisitEphemeronHashTable(Map map, EphemeronHashTable table) {
collector_->AddEphemeronHashTable(table);
for (int i = 0; i < table->Capacity(); i++) {
- Object** key_slot =
+ ObjectSlot key_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
HeapObject* key = HeapObject::cast(table->KeyAt(i));
collector_->RecordSlot(table, key_slot, key);
- Object** value_slot =
+ ObjectSlot value_slot =
table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
if (marking_state()->IsBlackOrGrey(key)) {
@@ -152,7 +152,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitMap(Map* map, Map* object) {
+ MarkingState>::VisitMap(Map map, Map object) {
// When map collection is enabled we have to mark through map's transitions
// and back pointers in a special way to make these links weak.
int size = Map::BodyDescriptor::SizeOf(map, object);
@@ -167,7 +167,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitTransitionArray(Map* map,
+ MarkingState>::VisitTransitionArray(Map map,
TransitionArray* array) {
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
@@ -177,8 +177,33 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+int MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitJSWeakCell(Map map,
+ JSWeakCell* weak_cell) {
+ if (weak_cell->target()->IsHeapObject()) {
+ HeapObject* target = HeapObject::cast(weak_cell->target());
+ if (marking_state()->IsBlackOrGrey(target)) {
+ // Record the slot inside the JSWeakCell, since the IterateBody below
+ // won't visit it.
+ ObjectSlot slot =
+ HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
+ collector_->RecordSlot(weak_cell, slot, target);
+ } else {
+ // JSWeakCell points to a potentially dead object. We have to process
+ // them when we know the liveness of the whole transitive closure.
+ collector_->AddWeakCell(weak_cell);
+ }
+ }
+ int size = JSWeakCell::BodyDescriptor::SizeOf(map, weak_cell);
+ JSWeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
+ return size;
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitPointer(HeapObject* host, Object** p) {
+ MarkingState>::VisitPointer(HeapObject* host,
+ ObjectSlot p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* target_object = HeapObject::cast(*p);
collector_->RecordSlot(host, p, target_object);
@@ -189,24 +214,21 @@ template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitPointer(HeapObject* host,
- MaybeObject** p) {
+ MaybeObjectSlot p) {
HeapObject* target_object;
if ((*p)->GetHeapObjectIfStrong(&target_object)) {
- collector_->RecordSlot(host, reinterpret_cast<HeapObjectReference**>(p),
- target_object);
+ collector_->RecordSlot(host, HeapObjectSlot(p), target_object);
MarkObject(host, target_object);
} else if ((*p)->GetHeapObjectIfWeak(&target_object)) {
if (marking_state()->IsBlackOrGrey(target_object)) {
// Weak references with live values are directly processed here to reduce
// the processing time of weak cells during the main GC pause.
- collector_->RecordSlot(host, reinterpret_cast<HeapObjectReference**>(p),
- target_object);
+ collector_->RecordSlot(host, HeapObjectSlot(p), target_object);
} else {
// If we do not know about liveness of values of weak cells, we have to
// process them when we know the liveness of the whole transitive
// closure.
- collector_->AddWeakReference(host,
- reinterpret_cast<HeapObjectReference**>(p));
+ collector_->AddWeakReference(host, HeapObjectSlot(p));
}
}
}
@@ -215,8 +237,9 @@ template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitPointers(HeapObject* host,
- Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
+ ObjectSlot start,
+ ObjectSlot end) {
+ for (ObjectSlot p = start; p < end; ++p) {
VisitPointer(host, p);
}
}
@@ -225,9 +248,9 @@ template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitPointers(HeapObject* host,
- MaybeObject** start,
- MaybeObject** end) {
- for (MaybeObject** p = start; p < end; p++) {
+ MaybeObjectSlot start,
+ MaybeObjectSlot end) {
+ for (MaybeObjectSlot p = start; p < end; ++p) {
VisitPointer(host, p);
}
}
@@ -235,7 +258,7 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitEmbeddedPointer(Code* host,
+ MarkingState>::VisitEmbeddedPointer(Code host,
RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* object = HeapObject::cast(rinfo->target_object());
@@ -250,10 +273,10 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::VisitCodeTarget(Code* host,
+ MarkingState>::VisitCodeTarget(Code host,
RelocInfo* rinfo) {
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
collector_->RecordRelocSlot(host, rinfo, target);
MarkObject(host, target);
}
@@ -290,8 +313,8 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
- VisitFixedArrayIncremental(Map* map, FixedArray* object) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ VisitFixedArrayIncremental(Map map, FixedArray object) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
DCHECK(!FLAG_use_marking_progress_bar ||
@@ -306,7 +329,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
// Ensure that the object is either grey or black before pushing it
// into marking worklist.
marking_state()->WhiteToGrey(object);
- if (FLAG_concurrent_marking) {
+ if (FLAG_concurrent_marking || FLAG_parallel_marking) {
marking_worklist()->PushBailout(object);
} else {
marking_worklist()->Push(object);
@@ -336,7 +359,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
- MarkingState>::MarkMapContents(Map* map) {
+ MarkingState>::MarkMapContents(Map map) {
// Since descriptor arrays are potentially shared, ensure that only the
// descriptors that belong to this map are marked. The first time a non-empty
// descriptor array is marked, its header is also visited. The slot holding
@@ -345,15 +368,16 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
// just mark the entire descriptor array.
if (!map->is_prototype_map()) {
DescriptorArray* descriptors = map->instance_descriptors();
- if (MarkObjectWithoutPush(map, descriptors) && descriptors->length() > 0) {
- VisitPointers(descriptors, descriptors->GetFirstElementAddress(),
- descriptors->GetDescriptorEndSlot(0));
+ if (MarkObjectWithoutPush(map, descriptors)) {
+ VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
+ descriptors->GetDescriptorSlot(0));
}
int start = 0;
int end = map->NumberOfOwnDescriptors();
if (start < end) {
- VisitPointers(descriptors, descriptors->GetDescriptorStartSlot(start),
- descriptors->GetDescriptorEndSlot(end));
+ VisitPointers(descriptors,
+ MaybeObjectSlot(descriptors->GetDescriptorSlot(start)),
+ MaybeObjectSlot(descriptors->GetDescriptorSlot(end)));
}
}
@@ -401,20 +425,18 @@ void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) {
}
}
-void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
+void MarkCompactCollector::RecordSlot(HeapObject* object, ObjectSlot slot,
HeapObject* target) {
- RecordSlot(object, reinterpret_cast<HeapObjectReference**>(slot), target);
+ RecordSlot(object, HeapObjectSlot(slot), target);
}
-void MarkCompactCollector::RecordSlot(HeapObject* object,
- HeapObjectReference** slot,
+void MarkCompactCollector::RecordSlot(HeapObject* object, HeapObjectSlot slot,
HeapObject* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
- RememberedSet<OLD_TO_OLD>::Insert(source_page,
- reinterpret_cast<Address>(slot));
+ RememberedSet<OLD_TO_OLD>::Insert(source_page, slot.address());
}
}
@@ -485,14 +507,13 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
second_bit_index = 1u << (trailing_zeros + 1);
}
- Map* map = nullptr;
+ Map map;
if (current_cell_ & second_bit_index) {
// We found a black object. If the black object is within a black area,
// make sure that we skip all set bits in the black area until the
// object ends.
HeapObject* black_object = HeapObject::FromAddress(addr);
- map =
- base::AsAtomicPointer::Relaxed_Load(reinterpret_cast<Map**>(addr));
+ map = Map::cast(ObjectSlot(addr).Acquire_Load());
size = black_object->SizeFromMap(map);
Address end = addr + size - kPointerSize;
// One word filler objects do not borrow the second mark bit. We have
@@ -519,8 +540,7 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
object = black_object;
}
} else if ((mode == kGreyObjects || mode == kAllLiveObjects)) {
- map =
- base::AsAtomicPointer::Relaxed_Load(reinterpret_cast<Map**>(addr));
+ map = Map::cast(ObjectSlot(addr).Acquire_Load());
object = HeapObject::FromAddress(addr);
size = object->SizeFromMap(map);
}
diff --git a/chromium/v8/src/heap/mark-compact.cc b/chromium/v8/src/heap/mark-compact.cc
index 6f46bc57bf8..7cd94b557ce 100644
--- a/chromium/v8/src/heap/mark-compact.cc
+++ b/chromium/v8/src/heap/mark-compact.cc
@@ -29,6 +29,9 @@
#include "src/heap/worklist.h"
#include "src/ic/stub-cache.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/js-objects-inl.h"
+#include "src/objects/maybe-object.h"
+#include "src/objects/slots-inl.h"
#include "src/transitions-inl.h"
#include "src/utils-inl.h"
#include "src/v8.h"
@@ -44,7 +47,7 @@ const char* Marking::kImpossibleBitPattern = "01";
// The following has to hold in order for {MarkingState::MarkBitFrom} to not
// produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
-STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
+STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
// =============================================================================
// Verifiers
@@ -62,24 +65,25 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0;
- virtual void VerifyPointers(Object** start, Object** end) = 0;
- virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0;
+ virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
+ virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
virtual bool IsMarked(HeapObject* object) = 0;
virtual bool IsBlackOrGrey(HeapObject* object) = 0;
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override {
VerifyPointers(start, end);
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
+ void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
VerifyPointers(start, end);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override {
VerifyPointers(start, end);
}
@@ -87,6 +91,7 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
void VerifyMarkingOnPage(const Page* page, Address start, Address end);
void VerifyMarking(NewSpace* new_space);
void VerifyMarking(PagedSpace* paged_space);
+ void VerifyMarking(LargeObjectSpace* lo_space);
Heap* heap_;
};
@@ -146,6 +151,15 @@ void MarkingVerifier::VerifyMarking(PagedSpace* space) {
}
}
+void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
+ LargeObjectIterator it(lo_space);
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+ if (IsBlackOrGrey(obj)) {
+ obj->Iterate(this);
+ }
+ }
+}
+
class FullMarkingVerifier : public MarkingVerifier {
public:
explicit FullMarkingVerifier(Heap* heap)
@@ -159,13 +173,8 @@ class FullMarkingVerifier : public MarkingVerifier {
VerifyMarking(heap_->old_space());
VerifyMarking(heap_->code_space());
VerifyMarking(heap_->map_space());
-
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
- if (marking_state_->IsBlackOrGrey(obj)) {
- obj->Iterate(this);
- }
- }
+ VerifyMarking(heap_->lo_space());
+ VerifyMarking(heap_->code_lo_space());
}
protected:
@@ -181,8 +190,8 @@ class FullMarkingVerifier : public MarkingVerifier {
return marking_state_->IsBlackOrGrey(object);
}
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
+ for (ObjectSlot current = start; current < end; ++current) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
CHECK(marking_state_->IsBlackOrGrey(object));
@@ -190,8 +199,8 @@ class FullMarkingVerifier : public MarkingVerifier {
}
}
- void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
- for (MaybeObject** current = start; current < end; current++) {
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
+ for (MaybeObjectSlot current = start; current < end; ++current) {
HeapObject* object;
if ((*current)->GetHeapObjectIfStrong(&object)) {
CHECK(marking_state_->IsBlackOrGrey(object));
@@ -199,11 +208,11 @@ class FullMarkingVerifier : public MarkingVerifier {
}
}
- void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
if (!host->IsWeakObject(rinfo->target_object())) {
Object* p = rinfo->target_object();
- VisitPointer(host, &p);
+ VisitPointer(host, ObjectSlot(&p));
}
}
@@ -215,17 +224,18 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
public:
virtual void Run() = 0;
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override {
VerifyPointers(start, end);
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
+ void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
VerifyPointers(start, end);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override {
VerifyPointers(start, end);
}
@@ -234,8 +244,8 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
inline Heap* heap() { return heap_; }
- virtual void VerifyPointers(Object** start, Object** end) = 0;
- virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0;
+ virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
+ virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
void VerifyRoots(VisitMode mode);
void VerifyEvacuationOnPage(Address start, Address end);
@@ -295,8 +305,8 @@ class FullEvacuationVerifier : public EvacuationVerifier {
}
protected:
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
+ for (ObjectSlot current = start; current < end; ++current) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
if (Heap::InNewSpace(object)) {
@@ -306,8 +316,8 @@ class FullEvacuationVerifier : public EvacuationVerifier {
}
}
}
- void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
- for (MaybeObject** current = start; current < end; current++) {
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
+ for (MaybeObjectSlot current = start; current < end; ++current) {
HeapObject* object;
if ((*current)->GetHeapObjectIfStrong(&object)) {
if (Heap::InNewSpace(object)) {
@@ -494,6 +504,14 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
}
}
+void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
+ LargeObjectIterator it(space);
+ for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
+ CHECK(non_atomic_marking_state()->IsWhite(obj));
+ CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
+ MemoryChunk::FromAddress(obj->address())));
+ }
+}
void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->old_space());
@@ -503,13 +521,9 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
// Read-only space should always be black since we never collect any objects
// in it or linked from it.
VerifyMarkbitsAreDirty(heap_->read_only_space());
-
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
- CHECK(non_atomic_marking_state()->IsWhite(obj));
- CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
- MemoryChunk::FromAddress(obj->address())));
- }
+ VerifyMarkbitsAreClean(heap_->lo_space());
+ VerifyMarkbitsAreClean(heap_->code_lo_space());
+ VerifyMarkbitsAreClean(heap_->new_lo_space());
}
#endif // VERIFY_HEAP
@@ -753,7 +767,7 @@ void MarkCompactCollector::Prepare() {
heap()->memory_allocator()->unmapper()->PrepareForMarkCompact();
if (!was_marked_incrementally_) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
heap_->local_embedder_heap_tracer()->TracePrologue();
}
@@ -779,7 +793,9 @@ void MarkCompactCollector::Prepare() {
void MarkCompactCollector::FinishConcurrentMarking(
ConcurrentMarking::StopRequest stop_request) {
- if (FLAG_concurrent_marking) {
+ // FinishConcurrentMarking is called for both, concurrent and parallel,
+ // marking. It is safe to call this function when tasks are already finished.
+ if (FLAG_parallel_marking || FLAG_concurrent_marking) {
heap()->concurrent_marking()->Stop(stop_request);
heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
}
@@ -819,6 +835,7 @@ void MarkCompactCollector::Finish() {
// Clear the marking state of live large objects.
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
+ heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
#ifdef DEBUG
DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
@@ -845,17 +862,18 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
explicit RootMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- void VisitRootPointer(Root root, const char* description, Object** p) final {
+ void VisitRootPointer(Root root, const char* description,
+ ObjectSlot p) final {
MarkObjectByPointer(root, p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) final {
- for (Object** p = start; p < end; p++) MarkObjectByPointer(root, p);
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) final {
+ for (ObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p);
}
private:
- V8_INLINE void MarkObjectByPointer(Root root, Object** p) {
+ V8_INLINE void MarkObjectByPointer(Root root, ObjectSlot p) {
if (!(*p)->IsHeapObject()) return;
collector_->MarkRootObject(root, HeapObject::cast(*p));
@@ -879,19 +897,19 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- void VisitPointer(HeapObject* host, Object** p) final {
+ void VisitPointer(HeapObject* host, ObjectSlot p) final {
MarkObject(host, *p);
}
- void VisitPointers(HeapObject* host, Object** start, Object** end) final {
- for (Object** p = start; p < end; p++) {
+ void VisitPointers(HeapObject* host, ObjectSlot start, ObjectSlot end) final {
+ for (ObjectSlot p = start; p < end; ++p) {
DCHECK(!HasWeakHeapObjectTag(*p));
MarkObject(host, *p);
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
+ void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
// At the moment, custom roots cannot contain weak pointers.
UNREACHABLE();
}
@@ -912,19 +930,20 @@ class InternalizedStringTableCleaner : public ObjectVisitor {
InternalizedStringTableCleaner(Heap* heap, HeapObject* table)
: heap_(heap), pointers_removed_(0), table_(table) {}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
Object* the_hole = ReadOnlyRoots(heap_).the_hole_value();
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
- for (Object** p = start; p < end; p++) {
+ for (ObjectSlot p = start; p < end; ++p) {
Object* o = *p;
if (o->IsHeapObject()) {
HeapObject* heap_object = HeapObject::cast(o);
if (marking_state->IsWhite(heap_object)) {
pointers_removed_++;
// Set the entry to the_hole_value (as deleted).
- *p = the_hole;
+ p.store(the_hole);
} else {
// StringTable contains only old space strings.
DCHECK(!Heap::InNewSpace(o));
@@ -934,8 +953,8 @@ class InternalizedStringTableCleaner : public ObjectVisitor {
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
+ void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
UNREACHABLE();
}
@@ -953,25 +972,25 @@ class ExternalStringTableCleaner : public RootVisitor {
public:
explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override {
// Visit all HeapObject pointers in [start, end).
MarkCompactCollector::NonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
Object* the_hole = ReadOnlyRoots(heap_).the_hole_value();
- for (Object** p = start; p < end; p++) {
+ for (ObjectSlot p = start; p < end; ++p) {
Object* o = *p;
if (o->IsHeapObject()) {
HeapObject* heap_object = HeapObject::cast(o);
if (marking_state->IsWhite(heap_object)) {
if (o->IsExternalString()) {
- heap_->FinalizeExternalString(String::cast(*p));
+ heap_->FinalizeExternalString(String::cast(o));
} else {
// The original external string may have been internalized.
DCHECK(o->IsThinString());
}
// Set the entry to the_hole_value (as deleted).
- *p = the_hole;
+ p.store(the_hole);
}
}
}
@@ -1024,43 +1043,42 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- inline void VisitPointer(HeapObject* host, Object** p) final {
+ inline void VisitPointer(HeapObject* host, ObjectSlot p) final {
DCHECK(!HasWeakHeapObjectTag(*p));
- RecordMigratedSlot(host, reinterpret_cast<MaybeObject*>(*p),
- reinterpret_cast<Address>(p));
+ RecordMigratedSlot(host, MaybeObject::FromObject(*p), p.address());
}
- inline void VisitPointer(HeapObject* host, MaybeObject** p) final {
- RecordMigratedSlot(host, *p, reinterpret_cast<Address>(p));
+ inline void VisitPointer(HeapObject* host, MaybeObjectSlot p) final {
+ RecordMigratedSlot(host, *p, p.address());
}
- inline void VisitPointers(HeapObject* host, Object** start,
- Object** end) final {
+ inline void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) final {
while (start < end) {
VisitPointer(host, start);
++start;
}
}
- inline void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
+ inline void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
while (start < end) {
VisitPointer(host, start);
++start;
}
}
- inline void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
+ inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
// The target is always in old space, we don't have to record the slot in
// the old-to-new remembered set.
DCHECK(!Heap::InNewSpace(target));
collector_->RecordRelocSlot(host, rinfo, target);
}
- inline void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
+ inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* object = HeapObject::cast(rinfo->target_object());
@@ -1069,16 +1087,16 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
}
// Entries that are skipped for recording.
- inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {}
+ inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
inline void VisitExternalReference(Foreign* host, Address* p) final {}
- inline void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) final {}
- inline void VisitInternalReference(Code* host, RelocInfo* rinfo) final {}
+ inline void VisitRuntimeEntry(Code host, RelocInfo* rinfo) final {}
+ inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
protected:
- inline virtual void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
+ inline virtual void RecordMigratedSlot(HeapObject* host, MaybeObject value,
Address slot) {
if (value->IsStrongOrWeak()) {
- Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
+ Page* p = Page::FromAddress(value.ptr());
if (p->InNewSpace()) {
DCHECK_IMPLIES(p->InToSpace(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
@@ -1147,7 +1165,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
Address dst_addr = dst->address();
Address src_addr = src->address();
DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
- DCHECK(dest != LO_SPACE);
+ DCHECK_NE(dest, LO_SPACE);
+ DCHECK_NE(dest, CODE_LO_SPACE);
if (dest == OLD_SPACE) {
DCHECK_OBJECT_SIZE(size);
DCHECK(IsAligned(size, kPointerSize));
@@ -1214,7 +1233,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
bool AbortCompactionForTesting(HeapObject* object) {
if (FLAG_stress_compaction) {
const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
- kPageAlignmentMask & ~kPointerAlignmentMask;
+ kPageAlignmentMask & ~kObjectAlignmentMask;
if ((object->address() & kPageAlignmentMask) == mask) {
Page* page = Page::FromAddress(object->address());
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
@@ -1273,16 +1292,14 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
inline bool TryEvacuateWithoutCopy(HeapObject* object) {
if (is_incremental_marking_) return false;
- Map* map = object->map();
+ Map map = object->map();
// Some objects can be evacuated without creating a copy.
if (map->visitor_id() == kVisitThinString) {
HeapObject* actual = ThinString::cast(object)->unchecked_actual();
if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
- base::Relaxed_Store(
- reinterpret_cast<base::AtomicWord*>(object->address()),
- reinterpret_cast<base::AtomicWord>(
- MapWord::FromForwardingAddress(actual).ToMap()));
+ object->map_slot().Relaxed_Store(
+ MapWord::FromForwardingAddress(actual).ToMap());
return true;
}
// TODO(mlippautz): Handle ConsString.
@@ -1404,7 +1421,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
Heap* heap_;
};
-bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, Object** p) {
+bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, ObjectSlot p) {
Object* o = *p;
if (!o->IsHeapObject()) return false;
HeapObject* heap_object = HeapObject::cast(o);
@@ -1414,7 +1431,7 @@ bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, Object** p) {
void MarkCompactCollector::MarkStringTable(
ObjectVisitor* custom_root_body_visitor) {
- StringTable* string_table = heap()->string_table();
+ StringTable string_table = heap()->string_table();
// Mark the string table itself.
if (marking_state()->WhiteToBlack(string_table)) {
// Explicitly mark the prefix.
@@ -1457,7 +1474,6 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
if (FLAG_parallel_marking) {
- DCHECK(FLAG_concurrent_marking);
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
@@ -1471,6 +1487,7 @@ void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
work_to_do = work_to_do || !marking_worklist()->IsEmpty() ||
heap()->concurrent_marking()->ephemeron_marked() ||
+ !marking_worklist()->IsEmbedderEmpty() ||
!heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
++iterations;
}
@@ -1597,12 +1614,15 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
void MarkCompactCollector::PerformWrapperTracing() {
if (heap_->local_embedder_heap_tracer()->InUse()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
- HeapObject* object;
- while (marking_worklist()->embedder()->Pop(kMainThread, &object)) {
- heap_->TracePossibleWrapper(JSObject::cast(object));
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
+ {
+ LocalEmbedderHeapTracer::ProcessingScope scope(
+ heap_->local_embedder_heap_tracer());
+ HeapObject* object;
+ while (marking_worklist()->embedder()->Pop(kMainThread, &object)) {
+ scope.TracePossibleWrapper(JSObject::cast(object));
+ }
}
- heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
heap_->local_embedder_heap_tracer()->Trace(
std::numeric_limits<double>::infinity());
}
@@ -1627,7 +1647,7 @@ void MarkCompactCollector::ProcessMarkingWorklistInternal() {
kTrackNewlyDiscoveredObjects) {
AddNewlyDiscovered(object);
}
- Map* map = object->map();
+ Map map = object->map();
MarkObject(object, map);
visitor.Visit(map, object);
}
@@ -1668,7 +1688,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
return;
}
if (it.frame()->type() == StackFrame::OPTIMIZED) {
- Code* code = it.frame()->LookupCode();
+ Code code = it.frame()->LookupCode();
if (!code->CanDeoptAt(it.frame()->pc())) {
Code::BodyDescriptor::IterateBody(code->map(), code, visitor);
}
@@ -1737,7 +1757,6 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
if (FLAG_parallel_marking) {
- DCHECK(FLAG_concurrent_marking);
heap_->concurrent_marking()->RescheduleTasksIfNeeded();
}
ProcessMarkingWorklist();
@@ -1756,14 +1775,16 @@ void MarkCompactCollector::MarkLiveObjects() {
// opportunistic as it may not discover graphs that are only reachable
// through ephemerons.
{
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPERS);
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
do {
// PerformWrapperTracing() also empties the work items collected by
// concurrent markers. As a result this call needs to happen at least
// once.
PerformWrapperTracing();
ProcessMarkingWorklist();
- } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone());
+ } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
+ !marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmpty());
}
@@ -1807,10 +1828,6 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeronMarking();
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
- heap()->local_embedder_heap_tracer()->TraceEpilogue();
- }
DCHECK(marking_worklist()->IsEmbedderEmpty());
DCHECK(marking_worklist()->IsEmpty());
}
@@ -1835,7 +1852,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// Prune the string table removing all strings only pointed to by the
// string table. Cannot use string_table() here because the string
// table is marked.
- StringTable* string_table = heap()->string_table();
+ StringTable string_table = heap()->string_table();
InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
string_table->IterateElements(&internalized_visitor);
string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
@@ -1859,36 +1876,44 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// cleared.
ClearFullMapTransitions();
}
- ClearWeakReferences();
- MarkDependentCodeForDeoptimization();
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
+ ClearWeakReferences();
+ ClearWeakCollections();
+ ClearJSWeakCells();
+ }
- ClearWeakCollections();
+ MarkDependentCodeForDeoptimization();
DCHECK(weak_objects_.transition_arrays.IsEmpty());
DCHECK(weak_objects_.weak_references.IsEmpty());
DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
+ DCHECK(weak_objects_.js_weak_cells.IsEmpty());
}
void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
- std::pair<HeapObject*, Code*> weak_object_in_code;
+ std::pair<HeapObject*, Code> weak_object_in_code;
while (weak_objects_.weak_objects_in_code.Pop(kMainThread,
&weak_object_in_code)) {
HeapObject* object = weak_object_in_code.first;
- Code* code = weak_object_in_code.second;
+ Code code = weak_object_in_code.second;
if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
- !code->marked_for_deoptimization()) {
- code->SetMarkedForDeoptimization("weak objects");
- code->InvalidateEmbeddedObjects(heap_);
- have_code_to_deoptimize_ = true;
+ !code->embedded_objects_cleared()) {
+ if (!code->marked_for_deoptimization()) {
+ code->SetMarkedForDeoptimization("weak objects");
+ have_code_to_deoptimize_ = true;
+ }
+ code->ClearEmbeddedObjects(heap_);
+ DCHECK(code->embedded_objects_cleared());
}
}
}
-void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* dead_target) {
+void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
Object* potential_parent = dead_target->constructor_or_backpointer();
if (potential_parent->IsMap()) {
- Map* parent = Map::cast(potential_parent);
+ Map parent = Map::cast(potential_parent);
DisallowHeapAllocation no_gc_obviously;
if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
TransitionsAccessor(isolate(), parent, &no_gc_obviously)
@@ -1898,8 +1923,8 @@ void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* dead_target) {
}
}
-void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* map,
- Map* dead_target) {
+void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
+ Map dead_target) {
DCHECK(!map->is_prototype_map());
DCHECK(!dead_target->is_prototype_map());
DCHECK_EQ(map->raw_transitions(), HeapObjectReference::Weak(dead_target));
@@ -1918,12 +1943,12 @@ void MarkCompactCollector::ClearFullMapTransitions() {
while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
int num_transitions = array->number_of_entries();
if (num_transitions > 0) {
- Map* map;
+ Map map;
// The array might contain "undefined" elements because it's not yet
// filled. Allow it.
if (array->GetTargetIfExists(0, isolate(), &map)) {
- DCHECK_NOT_NULL(map); // Weak pointers aren't cleared yet.
- Map* parent = Map::cast(map->constructor_or_backpointer());
+ DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
+ Map parent = Map::cast(map->constructor_or_backpointer());
bool parent_is_alive =
non_atomic_marking_state()->IsBlackOrGrey(parent);
DescriptorArray* descriptors =
@@ -1939,14 +1964,14 @@ void MarkCompactCollector::ClearFullMapTransitions() {
}
bool MarkCompactCollector::CompactTransitionArray(
- Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
+ Map map, TransitionArray* transitions, DescriptorArray* descriptors) {
DCHECK(!map->is_prototype_map());
int num_transitions = transitions->number_of_entries();
bool descriptors_owner_died = false;
int transition_index = 0;
// Compact all live transitions to the left.
for (int i = 0; i < num_transitions; ++i) {
- Map* target = transitions->GetTarget(i);
+ Map target = transitions->GetTarget(i);
DCHECK_EQ(target->constructor_or_backpointer(), map);
if (non_atomic_marking_state()->IsWhite(target)) {
if (descriptors != nullptr &&
@@ -1956,14 +1981,13 @@ bool MarkCompactCollector::CompactTransitionArray(
}
} else {
if (i != transition_index) {
- Name* key = transitions->GetKey(i);
+ Name key = transitions->GetKey(i);
transitions->SetKey(transition_index, key);
- HeapObjectReference** key_slot =
- transitions->GetKeySlot(transition_index);
+ HeapObjectSlot key_slot = transitions->GetKeySlot(transition_index);
RecordSlot(transitions, key_slot, key);
- MaybeObject* raw_target = transitions->GetRawTarget(i);
+ MaybeObject raw_target = transitions->GetRawTarget(i);
transitions->SetRawTarget(transition_index, raw_target);
- HeapObjectReference** target_slot =
+ HeapObjectSlot target_slot =
transitions->GetTargetSlot(transition_index);
RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
}
@@ -1988,26 +2012,44 @@ bool MarkCompactCollector::CompactTransitionArray(
return descriptors_owner_died;
}
-void MarkCompactCollector::TrimDescriptorArray(Map* map,
+void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray* array,
+ int descriptors_to_trim) {
+ int old_nof_all_descriptors = array->number_of_all_descriptors();
+ int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
+ DCHECK_LT(0, descriptors_to_trim);
+ DCHECK_LE(0, new_nof_all_descriptors);
+ Address start = array->GetDescriptorSlot(new_nof_all_descriptors).address();
+ Address end = array->GetDescriptorSlot(old_nof_all_descriptors).address();
+ RememberedSet<OLD_TO_NEW>::RemoveRange(MemoryChunk::FromHeapObject(array),
+ start, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(MemoryChunk::FromHeapObject(array),
+ start, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
+ ClearRecordedSlots::kNo);
+ array->set_number_of_all_descriptors(new_nof_all_descriptors);
+}
+
+void MarkCompactCollector::TrimDescriptorArray(Map map,
DescriptorArray* descriptors) {
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) {
DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
return;
}
-
- int number_of_descriptors = descriptors->number_of_descriptors_storage();
- int to_trim = number_of_descriptors - number_of_own_descriptors;
+ // TODO(ulan): Trim only if slack is greater than some percentage threshold.
+ int to_trim =
+ descriptors->number_of_all_descriptors() - number_of_own_descriptors;
if (to_trim > 0) {
- heap_->RightTrimWeakFixedArray(descriptors,
- to_trim * DescriptorArray::kEntrySize);
- descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
+ descriptors->set_number_of_descriptors(number_of_own_descriptors);
+ RightTrimDescriptorArray(descriptors, to_trim);
TrimEnumCache(map, descriptors);
descriptors->Sort();
if (FLAG_unbox_double_fields) {
- LayoutDescriptor* layout_descriptor = map->layout_descriptor();
+ LayoutDescriptor layout_descriptor = map->layout_descriptor();
layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
number_of_own_descriptors);
SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
@@ -2017,21 +2059,21 @@ void MarkCompactCollector::TrimDescriptorArray(Map* map,
map->set_owns_descriptors(true);
}
-void MarkCompactCollector::TrimEnumCache(Map* map,
+void MarkCompactCollector::TrimEnumCache(Map map,
DescriptorArray* descriptors) {
int live_enum = map->EnumLength();
if (live_enum == kInvalidEnumCacheSentinel) {
live_enum = map->NumberOfEnumerableProperties();
}
if (live_enum == 0) return descriptors->ClearEnumCache();
- EnumCache* enum_cache = descriptors->GetEnumCache();
+ EnumCache* enum_cache = descriptors->enum_cache();
- FixedArray* keys = enum_cache->keys();
+ FixedArray keys = enum_cache->keys();
int to_trim = keys->length() - live_enum;
if (to_trim <= 0) return;
heap_->RightTrimFixedArray(keys, to_trim);
- FixedArray* indices = enum_cache->indices();
+ FixedArray indices = enum_cache->indices();
to_trim = indices->length() - live_enum;
if (to_trim <= 0) return;
heap_->RightTrimFixedArray(indices, to_trim);
@@ -2039,7 +2081,7 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
void MarkCompactCollector::ClearWeakCollections() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
- EphemeronHashTable* table;
+ EphemeronHashTable table;
while (weak_objects_.ephemeron_hash_tables.Pop(kMainThread, &table)) {
for (int i = 0; i < table->Capacity(); i++) {
@@ -2062,10 +2104,12 @@ void MarkCompactCollector::ClearWeakCollections() {
void MarkCompactCollector::ClearWeakReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
- std::pair<HeapObject*, HeapObjectReference**> slot;
+ std::pair<HeapObject*, HeapObjectSlot> slot;
+ HeapObjectReference cleared_weak_ref =
+ HeapObjectReference::ClearedValue(isolate());
while (weak_objects_.weak_references.Pop(kMainThread, &slot)) {
HeapObject* value;
- HeapObjectReference** location = slot.second;
+ HeapObjectSlot location = slot.second;
if ((*location)->GetHeapObjectIfWeak(&value)) {
DCHECK(!value->IsCell());
if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
@@ -2076,12 +2120,53 @@ void MarkCompactCollector::ClearWeakReferences() {
// The map is non-live.
ClearPotentialSimpleMapTransition(Map::cast(value));
}
- *location = HeapObjectReference::ClearedValue();
+ location.store(cleared_weak_ref);
}
}
}
}
+void MarkCompactCollector::ClearJSWeakCells() {
+ if (!FLAG_harmony_weak_refs) {
+ return;
+ }
+ JSWeakCell* weak_cell;
+ while (weak_objects_.js_weak_cells.Pop(kMainThread, &weak_cell)) {
+ // We do not insert cleared weak cells into the list, so the value
+ // cannot be a Smi here.
+ HeapObject* target = HeapObject::cast(weak_cell->target());
+ if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
+ // The value of the JSWeakCell is dead.
+ JSWeakFactory* weak_factory = JSWeakFactory::cast(weak_cell->factory());
+ if (!weak_factory->scheduled_for_cleanup()) {
+ heap()->AddDirtyJSWeakFactory(
+ weak_factory,
+ [](HeapObject* object, ObjectSlot slot, Object* target) {
+ if (target->IsHeapObject()) {
+ RecordSlot(object, slot, HeapObject::cast(target));
+ }
+ });
+ }
+ // We're modifying the pointers in JSWeakCell and JSWeakFactory during GC;
+ // thus we need to record the slots it writes. The normal write barrier is
+ // not enough, since it's disabled before GC.
+ weak_cell->Nullify(
+ isolate(), [](HeapObject* object, ObjectSlot slot, Object* target) {
+ if (target->IsHeapObject()) {
+ RecordSlot(object, slot, HeapObject::cast(target));
+ }
+ });
+ DCHECK(weak_factory->NeedsCleanup());
+ DCHECK(weak_factory->scheduled_for_cleanup());
+ } else {
+ // The value of the JSWeakCell is alive.
+ ObjectSlot slot =
+ HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
+ RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
+ }
+ }
+}
+
void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.transition_arrays.Clear();
weak_objects_.ephemeron_hash_tables.Clear();
@@ -2090,14 +2175,19 @@ void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.discovered_ephemerons.Clear();
weak_objects_.weak_references.Clear();
weak_objects_.weak_objects_in_code.Clear();
+ weak_objects_.js_weak_cells.Clear();
+}
+
+bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
+ return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
}
-void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
+void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
- Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
+ Page* source_page = Page::FromAddress(host.ptr());
if (target_page->IsEvacuationCandidate() &&
- (rinfo->host() == nullptr ||
+ (rinfo->host().is_null() ||
!source_page->ShouldSkipEvacuationSlotRecording())) {
RelocInfo::Mode rmode = rinfo->rmode();
Address addr = rinfo->pc();
@@ -2111,14 +2201,14 @@ void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
slot_type = OBJECT_SLOT;
}
}
- RememberedSet<OLD_TO_OLD>::InsertTyped(
- source_page, reinterpret_cast<Address>(host), slot_type, addr);
+ RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, host.ptr(), slot_type,
+ addr);
}
}
template <AccessMode access_mode>
static inline SlotCallbackResult UpdateSlot(
- MaybeObject** slot, MaybeObject* old, HeapObject* heap_obj,
+ MaybeObjectSlot slot, MaybeObject old, HeapObject* heap_obj,
HeapObjectReferenceType reference_type) {
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
@@ -2126,14 +2216,14 @@ static inline SlotCallbackResult UpdateSlot(
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
Page::FromAddress(heap_obj->address())
->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
- MaybeObject* target =
+ MaybeObject target =
reference_type == HeapObjectReferenceType::WEAK
? HeapObjectReference::Weak(map_word.ToForwardingAddress())
: HeapObjectReference::Strong(map_word.ToForwardingAddress());
if (access_mode == AccessMode::NON_ATOMIC) {
- *slot = target;
+ slot.store(target);
} else {
- base::AsAtomicPointer::Release_CompareAndSwap(slot, old, target);
+ slot.Release_CompareAndSwap(old, target);
}
DCHECK(!Heap::InFromSpace(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
@@ -2145,8 +2235,8 @@ static inline SlotCallbackResult UpdateSlot(
}
template <AccessMode access_mode>
-static inline SlotCallbackResult UpdateSlot(MaybeObject** slot) {
- MaybeObject* obj = base::AsAtomicPointer::Relaxed_Load(slot);
+static inline SlotCallbackResult UpdateSlot(MaybeObjectSlot slot) {
+ MaybeObject obj = slot.Relaxed_Load();
HeapObject* heap_obj;
if (obj->GetHeapObjectIfWeak(&heap_obj)) {
UpdateSlot<access_mode>(slot, obj, heap_obj, HeapObjectReferenceType::WEAK);
@@ -2158,10 +2248,10 @@ static inline SlotCallbackResult UpdateSlot(MaybeObject** slot) {
}
template <AccessMode access_mode>
-static inline SlotCallbackResult UpdateStrongSlot(MaybeObject** maybe_slot) {
+static inline SlotCallbackResult UpdateStrongSlot(MaybeObjectSlot maybe_slot) {
DCHECK((*maybe_slot)->IsSmi() || (*maybe_slot)->IsStrong());
- Object** slot = reinterpret_cast<Object**>(maybe_slot);
- Object* obj = base::AsAtomicPointer::Relaxed_Load(slot);
+ ObjectSlot slot(maybe_slot);
+ Object* obj = slot.Relaxed_Load();
if (obj->IsHeapObject()) {
HeapObject* heap_obj = HeapObject::cast(obj);
return UpdateSlot<access_mode>(maybe_slot, MaybeObject::FromObject(obj),
@@ -2178,73 +2268,73 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointer(HeapObject* host, Object** p) override {
+ void VisitPointer(HeapObject* host, ObjectSlot p) override {
UpdateStrongSlotInternal(p);
}
- void VisitPointer(HeapObject* host, MaybeObject** p) override {
+ void VisitPointer(HeapObject* host, MaybeObjectSlot p) override {
UpdateSlotInternal(p);
}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) {
+ void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot p = start; p < end; ++p) {
UpdateStrongSlotInternal(p);
}
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
- for (MaybeObject** p = start; p < end; p++) {
+ void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ for (MaybeObjectSlot p = start; p < end; ++p) {
UpdateSlotInternal(p);
}
}
void VisitRootPointer(Root root, const char* description,
- Object** p) override {
+ ObjectSlot p) override {
UpdateStrongSlotInternal(p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++) UpdateStrongSlotInternal(p);
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override {
+ for (ObjectSlot p = start; p < end; ++p) UpdateStrongSlotInternal(p);
}
- void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
UpdateTypedSlotHelper::UpdateEmbeddedPointer(
heap_, rinfo, UpdateStrongMaybeObjectSlotInternal);
}
- void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
UpdateTypedSlotHelper::UpdateCodeTarget(
rinfo, UpdateStrongMaybeObjectSlotInternal);
}
private:
static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
- MaybeObject** slot) {
+ MaybeObjectSlot slot) {
DCHECK(!(*slot)->IsWeakOrCleared());
- return UpdateStrongSlotInternal(reinterpret_cast<Object**>(slot));
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
}
- static inline SlotCallbackResult UpdateStrongSlotInternal(Object** slot) {
+ static inline SlotCallbackResult UpdateStrongSlotInternal(ObjectSlot slot) {
DCHECK(!HasWeakHeapObjectTag(*slot));
- return UpdateStrongSlot<AccessMode::NON_ATOMIC>(
- reinterpret_cast<MaybeObject**>(slot));
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(MaybeObjectSlot(slot));
}
- static inline SlotCallbackResult UpdateSlotInternal(MaybeObject** slot) {
+ static inline SlotCallbackResult UpdateSlotInternal(MaybeObjectSlot slot) {
return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
}
Heap* heap_;
};
-static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
+static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
+ ObjectSlot p) {
MapWord map_word = HeapObject::cast(*p)->map_word();
if (map_word.IsForwardingAddress()) {
- String* new_string = String::cast(map_word.ToForwardingAddress());
+ String new_string = String::cast(map_word.ToForwardingAddress());
if (new_string->IsExternalString()) {
MemoryChunk::MoveExternalBackingStoreBytes(
@@ -2270,6 +2360,8 @@ void MarkCompactCollector::EvacuatePrologue() {
new_space->Flip();
new_space->ResetLinearAllocationArea();
+ heap()->new_lo_space()->Flip();
+
// Old space.
DCHECK(old_space_evacuation_pages_.empty());
old_space_evacuation_pages_ = std::move(evacuation_candidates_);
@@ -2283,6 +2375,8 @@ void MarkCompactCollector::EvacuateEpilogue() {
heap()->new_space()->set_age_mark(heap()->new_space()->top());
// Deallocate unmarked large objects.
heap()->lo_space()->FreeUnmarkedObjects();
+ heap()->code_lo_space()->FreeUnmarkedObjects();
+ heap()->new_lo_space()->FreeUnmarkedObjects();
// Old space. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
// Give pages that are queued to be freed back to the OS.
@@ -2318,10 +2412,11 @@ class Evacuator : public Malloced {
// NewSpacePages with more live bytes than this threshold qualify for fast
// evacuation.
- static int PageEvacuationThreshold() {
+ static intptr_t NewSpacePageEvacuationThreshold() {
if (FLAG_page_promotion)
- return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
- return Page::kAllocatableMemory + kPointerSize;
+ return FLAG_page_promotion_threshold *
+ MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
+ return MemoryChunkLayout::AllocatableMemoryInDataPage() + kPointerSize;
}
Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
@@ -2341,7 +2436,7 @@ class Evacuator : public Malloced {
virtual ~Evacuator() = default;
- void EvacuatePage(Page* page);
+ void EvacuatePage(MemoryChunk* chunk);
void AddObserver(MigrationObserver* observer) {
new_space_visitor_.AddObserver(observer);
@@ -2358,7 +2453,8 @@ class Evacuator : public Malloced {
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
// |saved_live_bytes| returns the live bytes of the page that was processed.
- virtual void RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0;
+ virtual void RawEvacuatePage(MemoryChunk* chunk,
+ intptr_t* saved_live_bytes) = 0;
inline Heap* heap() { return heap_; }
@@ -2386,29 +2482,30 @@ class Evacuator : public Malloced {
intptr_t bytes_compacted_;
};
-void Evacuator::EvacuatePage(Page* page) {
+void Evacuator::EvacuatePage(MemoryChunk* chunk) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
- DCHECK(page->SweepingDone());
+ DCHECK(chunk->SweepingDone());
intptr_t saved_live_bytes = 0;
double evacuation_time = 0.0;
{
AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time);
- RawEvacuatePage(page, &saved_live_bytes);
+ RawEvacuatePage(chunk, &saved_live_bytes);
}
ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) {
- PrintIsolate(
- heap()->isolate(),
- "evacuation[%p]: page=%p new_space=%d "
- "page_evacuation=%d executable=%d contains_age_mark=%d "
- "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
- static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(),
- page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
- page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
- page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
- page->Contains(heap()->new_space()->age_mark()), saved_live_bytes,
- evacuation_time, page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
+ PrintIsolate(heap()->isolate(),
+ "evacuation[%p]: page=%p new_space=%d "
+ "page_evacuation=%d executable=%d contains_age_mark=%d "
+ "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
+ static_cast<void*>(this), static_cast<void*>(chunk),
+ chunk->InNewSpace(),
+ chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
+ chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
+ chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
+ chunk->Contains(heap()->new_space()->age_mark()),
+ saved_live_bytes, evacuation_time,
+ chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
}
}
@@ -2439,51 +2536,51 @@ class FullEvacuator : public Evacuator {
}
protected:
- void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
+ void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
MarkCompactCollector* collector_;
};
-void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
- const EvacuationMode evacuation_mode = ComputeEvacuationMode(page);
+void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
+ const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"FullEvacuator::RawEvacuatePage", "evacuation_mode",
evacuation_mode);
MarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
- *live_bytes = marking_state->live_bytes(page);
+ *live_bytes = marking_state->live_bytes(chunk);
HeapObject* failed_object = nullptr;
switch (evacuation_mode) {
case kObjectsNewToOld:
LiveObjectVisitor::VisitBlackObjectsNoFail(
- page, marking_state, &new_space_visitor_,
+ chunk, marking_state, &new_space_visitor_,
LiveObjectVisitor::kClearMarkbits);
// ArrayBufferTracker will be updated during pointers updating.
break;
case kPageNewToOld:
LiveObjectVisitor::VisitBlackObjectsNoFail(
- page, marking_state, &new_to_old_page_visitor_,
+ chunk, marking_state, &new_to_old_page_visitor_,
LiveObjectVisitor::kKeepMarking);
new_to_old_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
+ marking_state->live_bytes(chunk));
// ArrayBufferTracker will be updated during sweeping.
break;
case kPageNewToNew:
LiveObjectVisitor::VisitBlackObjectsNoFail(
- page, marking_state, &new_to_new_page_visitor_,
+ chunk, marking_state, &new_to_new_page_visitor_,
LiveObjectVisitor::kKeepMarking);
new_to_new_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
+ marking_state->live_bytes(chunk));
// ArrayBufferTracker will be updated during sweeping.
break;
case kObjectsOldToOld: {
const bool success = LiveObjectVisitor::VisitBlackObjects(
- page, marking_state, &old_space_visitor_,
+ chunk, marking_state, &old_space_visitor_,
LiveObjectVisitor::kClearMarkbits, &failed_object);
if (!success) {
// Aborted compaction page. Actual processing happens on the main
// thread for simplicity reasons.
- collector_->ReportAbortedEvacuationCandidate(failed_object, page);
+ collector_->ReportAbortedEvacuationCandidate(failed_object, chunk);
} else {
// ArrayBufferTracker will be updated during pointers updating.
}
@@ -2492,14 +2589,14 @@ void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
}
}
-class PageEvacuationItem : public ItemParallelJob::Item {
+class EvacuationItem : public ItemParallelJob::Item {
public:
- explicit PageEvacuationItem(Page* page) : page_(page) {}
- ~PageEvacuationItem() override = default;
- Page* page() const { return page_; }
+ explicit EvacuationItem(MemoryChunk* chunk) : chunk_(chunk) {}
+ ~EvacuationItem() override = default;
+ MemoryChunk* chunk() const { return chunk_; }
private:
- Page* page_;
+ MemoryChunk* chunk_;
};
class PageEvacuationTask : public ItemParallelJob::Task {
@@ -2511,9 +2608,9 @@ class PageEvacuationTask : public ItemParallelJob::Task {
void RunInParallel() override {
TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
- PageEvacuationItem* item = nullptr;
- while ((item = GetItem<PageEvacuationItem>()) != nullptr) {
- evacuator_->EvacuatePage(item->page());
+ EvacuationItem* item = nullptr;
+ while ((item = GetItem<EvacuationItem>()) != nullptr) {
+ evacuator_->EvacuatePage(item->chunk());
item->MarkFinished();
}
};
@@ -2571,7 +2668,7 @@ bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
const bool reduce_memory = heap()->ShouldReduceMemory();
const Address age_mark = heap()->new_space()->age_mark();
return !reduce_memory && !p->NeverEvacuate() &&
- (live_bytes > Evacuator::PageEvacuationThreshold()) &&
+ (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
!p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
}
@@ -2582,7 +2679,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
for (Page* page : old_space_evacuation_pages_) {
live_bytes += non_atomic_marking_state()->live_bytes(page);
- evacuation_job.AddItem(new PageEvacuationItem(page));
+ evacuation_job.AddItem(new EvacuationItem(page));
}
for (Page* page : new_space_evacuation_pages_) {
@@ -2601,8 +2698,25 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- evacuation_job.AddItem(new PageEvacuationItem(page));
+ evacuation_job.AddItem(new EvacuationItem(page));
+ }
+
+ // Promote young generation large objects.
+ LargePage* current = heap()->new_lo_space()->first_page();
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+ while (current) {
+ LargePage* next_current = current->next_page();
+ HeapObject* object = current->GetObject();
+ DCHECK(!marking_state->IsGrey(object));
+ if (marking_state->IsBlack(object)) {
+ heap_->lo_space()->PromoteNewLargeObject(current);
+ current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ evacuation_job.AddItem(new EvacuationItem(current));
+ }
+ current = next_current;
}
+
if (evacuation_job.NumberOfItems() == 0) return;
RecordMigratedSlotVisitor record_visitor(this);
@@ -2625,11 +2739,6 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
}
};
-// Return true if the given code is deoptimized or will be deoptimized.
-bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
- return code->is_optimized_code() && code->marked_for_deoptimization();
-}
-
void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
EvacuateRecordOnlyVisitor visitor(heap());
LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
@@ -2717,7 +2826,7 @@ void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
void MarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
- base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
+ base::MutexGuard guard(heap()->relocation_mutex());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
@@ -2842,7 +2951,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
PointersUpdatingVisitor visitor(chunk_->heap());
for (Address cur = start_; cur < end_;) {
HeapObject* object = HeapObject::FromAddress(cur);
- Map* map = object->map();
+ Map map = object->map();
int size = object->SizeFromMap(map);
object->IterateBodyFast(map, size, &visitor);
cur += size;
@@ -2882,15 +2991,14 @@ class RememberedSetUpdatingItem : public UpdatingItem {
void Process() override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"RememberedSetUpdatingItem::Process");
- base::LockGuard<base::Mutex> guard(chunk_->mutex());
+ base::MutexGuard guard(chunk_->mutex());
CodePageMemoryModificationScope memory_modification_scope(chunk_);
UpdateUntypedPointers();
UpdateTypedPointers();
}
private:
- inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
- MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address);
+ inline SlotCallbackResult CheckAndUpdateOldToNewSlot(MaybeObjectSlot slot) {
HeapObject* heap_object;
if (!(*slot)->GetHeapObject(&heap_object)) {
return REMOVE_SLOT;
@@ -2898,9 +3006,8 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (Heap::InFromSpace(heap_object)) {
MapWord map_word = heap_object->map_word();
if (map_word.IsForwardingAddress()) {
- HeapObjectReference::Update(
- reinterpret_cast<HeapObjectReference**>(slot),
- map_word.ToForwardingAddress());
+ HeapObjectReference::Update(HeapObjectSlot(slot),
+ map_word.ToForwardingAddress());
}
bool success = (*slot)->GetHeapObject(&heap_object);
USE(success);
@@ -2940,7 +3047,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
- [this](Address slot) { return CheckAndUpdateOldToNewSlot(slot); },
+ [this](MaybeObjectSlot slot) {
+ return CheckAndUpdateOldToNewSlot(slot);
+ },
SlotSet::PREFREE_EMPTY_BUCKETS);
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
@@ -2948,10 +3057,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
InvalidatedSlotsFilter filter(chunk_);
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
- [&filter](Address slot) {
- if (!filter.IsValid(slot)) return REMOVE_SLOT;
- return UpdateSlot<AccessMode::NON_ATOMIC>(
- reinterpret_cast<MaybeObject**>(slot));
+ [&filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+ return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
@@ -2975,8 +3083,8 @@ class RememberedSetUpdatingItem : public UpdatingItem {
nullptr) {
CHECK_NE(chunk_->owner(), heap_->map_space());
const auto check_and_update_old_to_new_slot_fn =
- [this](MaybeObject** slot) {
- return CheckAndUpdateOldToNewSlot(reinterpret_cast<Address>(slot));
+ [this](MaybeObjectSlot slot) {
+ return CheckAndUpdateOldToNewSlot(slot);
};
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk_, [=](SlotType slot_type, Address host_addr, Address slot) {
@@ -3181,6 +3289,8 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
&updating_job, heap()->code_space(), RememberedSetUpdatingMode::ALL);
remembered_set_pages += CollectRememberedSetUpdatingItems(
&updating_job, heap()->lo_space(), RememberedSetUpdatingMode::ALL);
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->code_lo_space(), RememberedSetUpdatingMode::ALL);
const int remembered_set_tasks =
remembered_set_pages == 0
? 0
@@ -3243,10 +3353,11 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
void MarkCompactCollector::ReportAbortedEvacuationCandidate(
- HeapObject* failed_object, Page* page) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ HeapObject* failed_object, MemoryChunk* chunk) {
+ base::MutexGuard guard(&mutex_);
- aborted_evacuation_candidates_.push_back(std::make_pair(failed_object, page));
+ aborted_evacuation_candidates_.push_back(
+ std::make_pair(failed_object, static_cast<Page*>(chunk)));
}
void MarkCompactCollector::PostProcessEvacuationCandidates() {
@@ -3443,8 +3554,8 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
VerifyMarking(heap_->new_space());
}
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
+ for (ObjectSlot current = start; current < end; ++current) {
DCHECK(!HasWeakHeapObjectTag(*current));
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
@@ -3454,8 +3565,8 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
}
}
- void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
- for (MaybeObject** current = start; current < end; current++) {
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
+ for (MaybeObjectSlot current = start; current < end; ++current) {
HeapObject* object;
// Minor MC treats weak references as strong.
if ((*current)->GetHeapObject(&object)) {
@@ -3485,16 +3596,16 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
}
protected:
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
+ void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
+ for (ObjectSlot current = start; current < end; ++current) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
CHECK_IMPLIES(Heap::InNewSpace(object), Heap::InToSpace(object));
}
}
}
- void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
- for (MaybeObject** current = start; current < end; current++) {
+ void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
+ for (MaybeObjectSlot current = start; current < end; ++current) {
HeapObject* object;
if ((*current)->GetHeapObject(&object)) {
CHECK_IMPLIES(Heap::InNewSpace(object), Heap::InToSpace(object));
@@ -3519,7 +3630,7 @@ void SeedGlobalHandles(Heap* heap, GlobalHandles* global_handles,
}
}
-bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
+bool IsUnmarkedObjectForYoungGeneration(Heap* heap, ObjectSlot p) {
DCHECK_IMPLIES(Heap::InNewSpace(*p), Heap::InToSpace(*p));
return Heap::InNewSpace(*p) && !heap->minor_mark_compact_collector()
->non_atomic_marking_state()
@@ -3536,21 +3647,21 @@ class YoungGenerationMarkingVisitor final
MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
: worklist_(global_worklist, task_id), marking_state_(marking_state) {}
- V8_INLINE void VisitPointers(HeapObject* host, Object** start,
- Object** end) final {
- for (Object** p = start; p < end; p++) {
+ V8_INLINE void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) final {
+ for (ObjectSlot p = start; p < end; ++p) {
VisitPointer(host, p);
}
}
- V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
- for (MaybeObject** p = start; p < end; p++) {
+ V8_INLINE void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
+ for (MaybeObjectSlot p = start; p < end; ++p) {
VisitPointer(host, p);
}
}
- V8_INLINE void VisitPointer(HeapObject* host, Object** slot) final {
+ V8_INLINE void VisitPointer(HeapObject* host, ObjectSlot slot) final {
Object* target = *slot;
DCHECK(!HasWeakHeapObjectTag(target));
if (Heap::InNewSpace(target)) {
@@ -3559,8 +3670,8 @@ class YoungGenerationMarkingVisitor final
}
}
- V8_INLINE void VisitPointer(HeapObject* host, MaybeObject** slot) final {
- MaybeObject* target = *slot;
+ V8_INLINE void VisitPointer(HeapObject* host, MaybeObjectSlot slot) final {
+ MaybeObject target = *slot;
if (Heap::InNewSpace(target)) {
HeapObject* target_object;
// Treat weak references as strong. TODO(marja): Proper weakness handling
@@ -3655,8 +3766,8 @@ class YoungGenerationRecordMigratedSlotVisitor final
MarkCompactCollector* collector)
: RecordMigratedSlotVisitor(collector) {}
- void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
- void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final {
+ void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
+ void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
UNREACHABLE();
}
@@ -3667,10 +3778,10 @@ class YoungGenerationRecordMigratedSlotVisitor final
return collector_->non_atomic_marking_state()->IsBlack(object);
}
- inline void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
+ inline void RecordMigratedSlot(HeapObject* host, MaybeObject value,
Address slot) final {
if (value->IsStrongOrWeak()) {
- Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
+ Page* p = Page::FromAddress(value.ptr());
if (p->InNewSpace()) {
DCHECK_IMPLIES(p->InToSpace(),
p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
@@ -3710,6 +3821,9 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
remembered_set_pages += CollectRememberedSetUpdatingItems(
&updating_job, heap()->lo_space(),
RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->code_lo_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
const int remembered_set_tasks =
remembered_set_pages == 0 ? 0
: NumberOfParallelPointerUpdateTasks(
@@ -3751,19 +3865,20 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
: collector_(collector) {}
- void VisitRootPointer(Root root, const char* description, Object** p) final {
+ void VisitRootPointer(Root root, const char* description,
+ ObjectSlot p) final {
MarkObjectByPointer(p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) final {
- for (Object** p = start; p < end; p++) {
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) final {
+ for (ObjectSlot p = start; p < end; ++p) {
MarkObjectByPointer(p);
}
}
private:
- V8_INLINE void MarkObjectByPointer(Object** p) {
+ V8_INLINE void MarkObjectByPointer(ObjectSlot p) {
if (!(*p)->IsHeapObject()) return;
collector_->MarkRootObject(HeapObject::cast(*p));
}
@@ -3832,7 +3947,6 @@ void MinorMarkCompactCollector::MakeIterable(
// remove here.
MarkCompactCollector* full_collector = heap()->mark_compact_collector();
Address free_start = p->area_start();
- DCHECK_EQ(0, free_start % (32 * kPointerSize));
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
@@ -3851,7 +3965,7 @@ void MinorMarkCompactCollector::MakeIterable(
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
- Map* map = object->synchronized_map();
+ Map map = object->synchronized_map();
int size = object->SizeFromMap(map);
free_start = free_end + size;
}
@@ -3885,12 +3999,12 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
: heap_(collector->heap()),
marking_state_(collector->non_atomic_marking_state()) {}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override {
DCHECK_EQ(static_cast<int>(root),
static_cast<int>(Root::kExternalStringsTable));
// Visit all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) {
+ for (ObjectSlot p = start; p < end; ++p) {
Object* o = *p;
if (o->IsHeapObject()) {
HeapObject* heap_object = HeapObject::cast(o);
@@ -3902,7 +4016,7 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
DCHECK(o->IsThinString());
}
// Set the entry to the_hole_value (as deleted).
- *p = ReadOnlyRoots(heap_).the_hole_value();
+ p.store(ReadOnlyRoots(heap_).the_hole_value());
}
}
}
@@ -4089,7 +4203,7 @@ class PageMarkingItem : public MarkingItem {
void Process(YoungGenerationMarkingTask* task) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"PageMarkingItem::Process");
- base::LockGuard<base::Mutex> guard(chunk_->mutex());
+ base::MutexGuard guard(chunk_->mutex());
MarkUntypedPointers(task);
MarkTypedPointers(task);
}
@@ -4098,10 +4212,11 @@ class PageMarkingItem : public MarkingItem {
inline Heap* heap() { return chunk_->heap(); }
void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
- RememberedSet<OLD_TO_NEW>::Iterate(
- chunk_,
- [this, task](Address slot) { return CheckAndMarkObject(task, slot); },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(chunk_,
+ [this, task](MaybeObjectSlot slot) {
+ return CheckAndMarkObject(task, slot);
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
}
void MarkTypedPointers(YoungGenerationMarkingTask* task) {
@@ -4109,16 +4224,15 @@ class PageMarkingItem : public MarkingItem {
chunk_,
[this, task](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap(), slot_type, slot, [this, task](MaybeObject** slot) {
- return CheckAndMarkObject(task,
- reinterpret_cast<Address>(slot));
+ heap(), slot_type, slot, [this, task](MaybeObjectSlot slot) {
+ return CheckAndMarkObject(task, slot);
});
});
}
SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
- Address slot_address) {
- MaybeObject* object = *reinterpret_cast<MaybeObject**>(slot_address);
+ MaybeObjectSlot slot) {
+ MaybeObject object = *slot;
if (Heap::InNewSpace(object)) {
// Marking happens before flipping the young generation, so the object
// has to be in ToSpace.
@@ -4162,15 +4276,15 @@ class GlobalHandlesMarkingItem : public MarkingItem {
: task_(task) {}
void VisitRootPointer(Root root, const char* description,
- Object** p) override {
+ ObjectSlot p) override {
DCHECK_EQ(Root::kGlobalHandles, root);
task_->MarkObject(*p);
}
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) override {
DCHECK_EQ(Root::kGlobalHandles, root);
- for (Object** p = start; p < end; p++) {
+ for (ObjectSlot p = start; p < end; ++p) {
task_->MarkObject(*p);
}
}
@@ -4267,7 +4381,7 @@ void MinorMarkCompactCollector::ProcessMarkingWorklist() {
void MinorMarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
- base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
+ base::MutexGuard guard(heap()->relocation_mutex());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
@@ -4321,63 +4435,67 @@ class YoungGenerationEvacuator : public Evacuator {
}
protected:
- void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
+ void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
MinorMarkCompactCollector* collector_;
};
-void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
+void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
intptr_t* live_bytes) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"YoungGenerationEvacuator::RawEvacuatePage");
MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
collector_->non_atomic_marking_state();
- *live_bytes = marking_state->live_bytes(page);
- switch (ComputeEvacuationMode(page)) {
+ *live_bytes = marking_state->live_bytes(chunk);
+ switch (ComputeEvacuationMode(chunk)) {
case kObjectsNewToOld:
LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_space_visitor_,
+ chunk, marking_state, &new_space_visitor_,
LiveObjectVisitor::kClearMarkbits);
// ArrayBufferTracker will be updated during pointers updating.
break;
case kPageNewToOld:
LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_to_old_page_visitor_,
+ chunk, marking_state, &new_to_old_page_visitor_,
LiveObjectVisitor::kKeepMarking);
new_to_old_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
- // TODO(mlippautz): If cleaning array buffers is too slow here we can
- // delay it until the next GC.
- ArrayBufferTracker::FreeDead(page, marking_state);
- if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- ZAP_FREE_SPACE);
- } else if (heap()->incremental_marking()->IsMarking()) {
- // When incremental marking is on, we need to clear the mark bits of
- // the full collector. We cannot yet discard the young generation mark
- // bits as they are still relevant for pointers updating.
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- IGNORE_FREE_SPACE);
+ marking_state->live_bytes(chunk));
+ if (chunk->owner()->identity() != NEW_LO_SPACE) {
+ // TODO(mlippautz): If cleaning array buffers is too slow here we can
+ // delay it until the next GC.
+ ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
+ if (heap()->ShouldZapGarbage()) {
+ collector_->MakeIterable(static_cast<Page*>(chunk),
+ MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
+ } else if (heap()->incremental_marking()->IsMarking()) {
+ // When incremental marking is on, we need to clear the mark bits of
+ // the full collector. We cannot yet discard the young generation mark
+ // bits as they are still relevant for pointers updating.
+ collector_->MakeIterable(static_cast<Page*>(chunk),
+ MarkingTreatmentMode::KEEP,
+ IGNORE_FREE_SPACE);
+ }
}
break;
case kPageNewToNew:
LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_to_new_page_visitor_,
+ chunk, marking_state, &new_to_new_page_visitor_,
LiveObjectVisitor::kKeepMarking);
new_to_new_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
+ marking_state->live_bytes(chunk));
+ DCHECK_NE(chunk->owner()->identity(), NEW_LO_SPACE);
// TODO(mlippautz): If cleaning array buffers is too slow here we can
// delay it until the next GC.
- ArrayBufferTracker::FreeDead(page, marking_state);
+ ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- ZAP_FREE_SPACE);
+ collector_->MakeIterable(static_cast<Page*>(chunk),
+ MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits of
// the full collector. We cannot yet discard the young generation mark
// bits as they are still relevant for pointers updating.
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- IGNORE_FREE_SPACE);
+ collector_->MakeIterable(static_cast<Page*>(chunk),
+ MarkingTreatmentMode::KEEP, IGNORE_FREE_SPACE);
}
break;
case kObjectsOldToOld:
@@ -4404,7 +4522,7 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
- evacuation_job.AddItem(new PageEvacuationItem(page));
+ evacuation_job.AddItem(new EvacuationItem(page));
}
if (evacuation_job.NumberOfItems() == 0) return;
diff --git a/chromium/v8/src/heap/mark-compact.h b/chromium/v8/src/heap/mark-compact.h
index c4ab5b2b9cc..ced16f616c9 100644
--- a/chromium/v8/src/heap/mark-compact.h
+++ b/chromium/v8/src/heap/mark-compact.h
@@ -21,6 +21,7 @@ namespace internal {
class EvacuationJobTraits;
class HeapObjectVisitor;
class ItemParallelJob;
+class JSWeakCell;
class MigrationObserver;
class RecordMigratedSlotVisitor;
class UpdatingItem;
@@ -76,13 +77,9 @@ class MarkingStateBase {
class MarkBitCellIterator {
public:
MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) {
- DCHECK(Bitmap::IsCellAligned(
- chunk_->AddressToMarkbitIndex(chunk_->area_start())));
- DCHECK(Bitmap::IsCellAligned(
- chunk_->AddressToMarkbitIndex(chunk_->area_end())));
last_cell_index_ =
Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
- cell_base_ = chunk_->area_start();
+ cell_base_ = chunk_->address();
cell_index_ =
Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
cells_ = bitmap->cells();
@@ -172,9 +169,9 @@ class LiveObjectRange {
inline void AdvanceToNextValidObject();
MemoryChunk* const chunk_;
- Map* const one_word_filler_map_;
- Map* const two_word_filler_map_;
- Map* const free_space_map_;
+ Map const one_word_filler_map_;
+ Map const two_word_filler_map_;
+ Map const free_space_map_;
MarkBitCellIterator it_;
Address cell_base_;
MarkBit::CellType current_cell_;
@@ -238,6 +235,8 @@ enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
// Base class for minor and full MC collectors.
class MarkCompactCollectorBase {
public:
+ static const int kMainThread = 0;
+
virtual ~MarkCompactCollectorBase() = default;
virtual void SetUp() = 0;
@@ -248,7 +247,6 @@ class MarkCompactCollectorBase {
inline Isolate* isolate();
protected:
- static const int kMainThread = 0;
explicit MarkCompactCollectorBase(Heap* heap)
: heap_(heap), old_to_new_slots_(0) {}
@@ -341,7 +339,10 @@ class IncrementalMarkingState final
: public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
public:
Bitmap* bitmap(const MemoryChunk* chunk) const {
- return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
+ reinterpret_cast<intptr_t>(chunk),
+ MemoryChunk::kMarkBitmapOffset);
+ return chunk->marking_bitmap_;
}
// Concurrent marking uses local live bytes.
@@ -362,7 +363,10 @@ class MajorAtomicMarkingState final
: public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
public:
Bitmap* bitmap(const MemoryChunk* chunk) const {
- return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
+ reinterpret_cast<intptr_t>(chunk),
+ MemoryChunk::kMarkBitmapOffset);
+ return chunk->marking_bitmap_;
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
@@ -383,7 +387,10 @@ class MajorNonAtomicMarkingState final
AccessMode::NON_ATOMIC> {
public:
Bitmap* bitmap(const MemoryChunk* chunk) const {
- return Bitmap::FromAddress(chunk->address() + MemoryChunk::kHeaderSize);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
+ reinterpret_cast<intptr_t>(chunk),
+ MemoryChunk::kMarkBitmapOffset);
+ return chunk->marking_bitmap_;
}
void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
@@ -412,7 +419,7 @@ struct WeakObjects {
// Keep track of all EphemeronHashTables in the heap to process
// them in the atomic pause.
- Worklist<EphemeronHashTable*, 64> ephemeron_hash_tables;
+ Worklist<EphemeronHashTable, 64> ephemeron_hash_tables;
// Keep track of all ephemerons for concurrent marking tasks. Only store
// ephemerons in these Worklists if both key and value are unreachable at the
@@ -434,8 +441,10 @@ struct WeakObjects {
// TODO(marja): For old space, we only need the slot, not the host
// object. Optimize this by adding a different storage for old space.
- Worklist<std::pair<HeapObject*, HeapObjectReference**>, 64> weak_references;
- Worklist<std::pair<HeapObject*, Code*>, 64> weak_objects_in_code;
+ Worklist<std::pair<HeapObject*, HeapObjectSlot>, 64> weak_references;
+ Worklist<std::pair<HeapObject*, Code>, 64> weak_objects_in_code;
+
+ Worklist<JSWeakCell*, 64> js_weak_cells;
};
struct EphemeronMarking {
@@ -614,16 +623,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
->IsEvacuationCandidate();
}
- static inline bool IsOnEvacuationCandidate(MaybeObject* obj) {
- return Page::FromAddress(reinterpret_cast<Address>(obj))
- ->IsEvacuationCandidate();
- }
+ static bool IsOnEvacuationCandidate(MaybeObject obj);
- void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
- V8_INLINE static void RecordSlot(HeapObject* object, Object** slot,
+ void RecordRelocSlot(Code host, RelocInfo* rinfo, Object* target);
+ V8_INLINE static void RecordSlot(HeapObject* object, ObjectSlot slot,
HeapObject* target);
- V8_INLINE static void RecordSlot(HeapObject* object,
- HeapObjectReference** slot,
+ V8_INLINE static void RecordSlot(HeapObject* object, HeapObjectSlot slot,
HeapObject* target);
void RecordLiveSlotsOnPage(Page* page);
@@ -652,7 +657,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
weak_objects_.transition_arrays.Push(kMainThread, array);
}
- void AddEphemeronHashTable(EphemeronHashTable* table) {
+ void AddEphemeronHashTable(EphemeronHashTable table) {
weak_objects_.ephemeron_hash_tables.Push(kMainThread, table);
}
@@ -661,15 +666,19 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
Ephemeron{key, value});
}
- void AddWeakReference(HeapObject* host, HeapObjectReference** slot) {
+ void AddWeakReference(HeapObject* host, HeapObjectSlot slot) {
weak_objects_.weak_references.Push(kMainThread, std::make_pair(host, slot));
}
- void AddWeakObjectInCode(HeapObject* object, Code* code) {
+ void AddWeakObjectInCode(HeapObject* object, Code code) {
weak_objects_.weak_objects_in_code.Push(kMainThread,
std::make_pair(object, code));
}
+ void AddWeakCell(JSWeakCell* weak_cell) {
+ weak_objects_.js_weak_cells.Push(kMainThread, weak_cell);
+ }
+
void AddNewlyDiscovered(HeapObject* object) {
if (ephemeron_marking_.newly_discovered_overflowed) return;
@@ -701,14 +710,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void VerifyMarkbitsAreDirty(PagedSpace* space);
void VerifyMarkbitsAreClean(PagedSpace* space);
void VerifyMarkbitsAreClean(NewSpace* space);
+ void VerifyMarkbitsAreClean(LargeObjectSpace* space);
#endif
private:
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override;
- bool WillBeDeoptimized(Code* code);
-
void ComputeEvacuationHeuristics(size_t area_size,
int* target_fragmentation_percent,
size_t* max_evacuated_bytes);
@@ -783,7 +791,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Callback function for telling whether the object *p is an unmarked
// heap object.
- static bool IsUnmarkedHeapObject(Heap* heap, Object** p);
+ static bool IsUnmarkedHeapObject(Heap* heap, ObjectSlot p);
// Clear non-live references in weak cells, transition and descriptor arrays,
// and deoptimize dependent code of non-live maps.
@@ -792,15 +800,15 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Checks if the given weak cell is a simple transition from the parent map
// of the given dead target. If so it clears the transition and trims
// the descriptor array of the parent if needed.
- void ClearPotentialSimpleMapTransition(Map* dead_target);
- void ClearPotentialSimpleMapTransition(Map* map, Map* dead_target);
+ void ClearPotentialSimpleMapTransition(Map dead_target);
+ void ClearPotentialSimpleMapTransition(Map map, Map dead_target);
// Compact every array in the global list of transition arrays and
// trim the corresponding descriptor array if a transition target is non-live.
void ClearFullMapTransitions();
- bool CompactTransitionArray(Map* map, TransitionArray* transitions,
+ bool CompactTransitionArray(Map map, TransitionArray* transitions,
DescriptorArray* descriptors);
- void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
- void TrimEnumCache(Map* map, DescriptorArray* descriptors);
+ void TrimDescriptorArray(Map map, DescriptorArray* descriptors);
+ void TrimEnumCache(Map map, DescriptorArray* descriptors);
// After all reachable objects have been marked those weak map entries
// with an unreachable key are removed from all encountered weak maps.
@@ -812,6 +820,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// the dead map via weak cell, then this function also clears the map
// transition.
void ClearWeakReferences();
+
+ // Goes through the list of encountered JSWeakCells and clears those with dead
+ // values.
+ void ClearJSWeakCells();
+
void AbortWeakObjects();
// Starts sweeping of spaces by contributing on the main thread and setting
@@ -835,12 +848,16 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void ReleaseEvacuationCandidates();
void PostProcessEvacuationCandidates();
- void ReportAbortedEvacuationCandidate(HeapObject* failed_object, Page* page);
+ void ReportAbortedEvacuationCandidate(HeapObject* failed_object,
+ MemoryChunk* chunk);
static const int kEphemeronChunkSize = 8 * KB;
int NumberOfParallelEphemeronVisitingTasks(size_t elements);
+ void RightTrimDescriptorArray(DescriptorArray* array,
+ int descriptors_to_trim);
+
base::Mutex mutex_;
base::Semaphore page_parallel_job_semaphore_;
@@ -909,42 +926,43 @@ class MarkingVisitor final
V8_INLINE bool ShouldVisitMapPointer() { return false; }
- V8_INLINE int VisitBytecodeArray(Map* map, BytecodeArray* object);
- V8_INLINE int VisitEphemeronHashTable(Map* map, EphemeronHashTable* object);
- V8_INLINE int VisitFixedArray(Map* map, FixedArray* object);
- V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
- V8_INLINE int VisitJSArrayBuffer(Map* map, JSArrayBuffer* object);
- V8_INLINE int VisitJSDataView(Map* map, JSDataView* object);
- V8_INLINE int VisitJSTypedArray(Map* map, JSTypedArray* object);
- V8_INLINE int VisitMap(Map* map, Map* object);
- V8_INLINE int VisitTransitionArray(Map* map, TransitionArray* object);
+ V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
+ V8_INLINE int VisitEphemeronHashTable(Map map, EphemeronHashTable object);
+ V8_INLINE int VisitFixedArray(Map map, FixedArray object);
+ V8_INLINE int VisitJSApiObject(Map map, JSObject* object);
+ V8_INLINE int VisitJSArrayBuffer(Map map, JSArrayBuffer* object);
+ V8_INLINE int VisitJSDataView(Map map, JSDataView* object);
+ V8_INLINE int VisitJSTypedArray(Map map, JSTypedArray* object);
+ V8_INLINE int VisitMap(Map map, Map object);
+ V8_INLINE int VisitTransitionArray(Map map, TransitionArray* object);
+ V8_INLINE int VisitJSWeakCell(Map map, JSWeakCell* object);
// ObjectVisitor implementation.
- V8_INLINE void VisitPointer(HeapObject* host, Object** p) final;
- V8_INLINE void VisitPointer(HeapObject* host, MaybeObject** p) final;
- V8_INLINE void VisitPointers(HeapObject* host, Object** start,
- Object** end) final;
- V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final;
- V8_INLINE void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final;
- V8_INLINE void VisitCodeTarget(Code* host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitPointer(HeapObject* host, ObjectSlot p) final;
+ V8_INLINE void VisitPointer(HeapObject* host, MaybeObjectSlot p) final;
+ V8_INLINE void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) final;
+ V8_INLINE void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final;
+ V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final;
+ V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final;
// Weak list pointers should be ignored during marking. The lists are
// reconstructed after GC.
- void VisitCustomWeakPointers(HeapObject* host, Object** start,
- Object** end) final {}
+ void VisitCustomWeakPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) final {}
private:
// Granularity in which FixedArrays are scanned if |fixed_array_mode|
// is true.
static const int kProgressBarScanningChunk = 32 * 1024;
- V8_INLINE int VisitFixedArrayIncremental(Map* map, FixedArray* object);
+ V8_INLINE int VisitFixedArrayIncremental(Map map, FixedArray object);
template <typename T>
- V8_INLINE int VisitEmbedderTracingSubclass(Map* map, T* object);
+ V8_INLINE int VisitEmbedderTracingSubclass(Map map, T* object);
- V8_INLINE void MarkMapContents(Map* map);
+ V8_INLINE void MarkMapContents(Map map);
// Marks the object black without pushing it on the marking work list. Returns
// true if the object needed marking and false otherwise.
diff --git a/chromium/v8/src/heap/marking.h b/chromium/v8/src/heap/marking.h
index ccf25d65496..a2f267185f2 100644
--- a/chromium/v8/src/heap/marking.h
+++ b/chromium/v8/src/heap/marking.h
@@ -122,10 +122,6 @@ class V8_EXPORT_PRIVATE Bitmap {
return index & ~kBitIndexMask;
}
- V8_INLINE static bool IsCellAligned(uint32_t index) {
- return (index & kBitIndexMask) == 0;
- }
-
V8_INLINE MarkBit::CellType* cells() {
return reinterpret_cast<MarkBit::CellType*>(this);
}
diff --git a/chromium/v8/src/heap/memory-reducer.cc b/chromium/v8/src/heap/memory-reducer.cc
index 4af7df87fd7..475728b769c 100644
--- a/chromium/v8/src/heap/memory-reducer.cc
+++ b/chromium/v8/src/heap/memory-reducer.cc
@@ -7,6 +7,7 @@
#include "src/flags.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/incremental-marking.h"
#include "src/utils.h"
#include "src/v8.h"
diff --git a/chromium/v8/src/heap/object-stats.cc b/chromium/v8/src/heap/object-stats.cc
index bb069d19f4a..d56c0222c45 100644
--- a/chromium/v8/src/heap/object-stats.cc
+++ b/chromium/v8/src/heap/object-stats.cc
@@ -16,8 +16,10 @@
#include "src/heap/mark-compact.h"
#include "src/isolate.h"
#include "src/objects/compilation-cache-inl.h"
+#include "src/objects/heap-object.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/slots.h"
#include "src/objects/templates.h"
#include "src/utils.h"
@@ -66,11 +68,12 @@ class FieldStatsCollector : public ObjectVisitor {
*raw_fields_count_ += raw_fields_count_in_object;
}
- void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) override {
*tagged_fields_count_ += (end - start);
}
- void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) override {
+ void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) override {
*tagged_fields_count_ += (end - start);
}
@@ -82,9 +85,10 @@ class FieldStatsCollector : public ObjectVisitor {
unsigned embedded_fields_count_ : kDescriptorIndexBitCount;
unsigned unboxed_double_fields_count_ : kDescriptorIndexBitCount;
};
- std::unordered_map<Map*, JSObjectFieldStats> field_stats_cache_;
+ std::unordered_map<Map, JSObjectFieldStats, ObjectPtr::Hasher>
+ field_stats_cache_;
- JSObjectFieldStats GetInobjectFieldStats(Map* map);
+ JSObjectFieldStats GetInobjectFieldStats(Map map);
size_t* const tagged_fields_count_;
size_t* const embedder_fields_count_;
@@ -93,7 +97,7 @@ class FieldStatsCollector : public ObjectVisitor {
};
FieldStatsCollector::JSObjectFieldStats
-FieldStatsCollector::GetInobjectFieldStats(Map* map) {
+FieldStatsCollector::GetInobjectFieldStats(Map map) {
auto iter = field_stats_cache_.find(map);
if (iter != field_stats_cache_.end()) {
return iter->second;
@@ -272,7 +276,7 @@ void ObjectStats::Dump(std::stringstream& stream) {
}
void ObjectStats::CheckpointObjectStats() {
- base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
+ base::MutexGuard lock_guard(object_stats_mutex.Pointer());
MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
ClearObjectStats();
@@ -349,12 +353,12 @@ class ObjectStatsCollectorImpl {
ObjectStats::VirtualInstanceType type);
// For HashTable it is possible to compute over allocated memory.
void RecordHashTableVirtualObjectStats(HeapObject* parent,
- FixedArray* hash_table,
+ FixedArray hash_table,
ObjectStats::VirtualInstanceType type);
bool SameLiveness(HeapObject* obj1, HeapObject* obj2);
- bool CanRecordFixedArray(FixedArrayBase* array);
- bool IsCowArray(FixedArrayBase* array);
+ bool CanRecordFixedArray(FixedArrayBase array);
+ bool IsCowArray(FixedArrayBase array);
// Blacklist for objects that should not be recorded using
// VirtualObjectStats and RecordSimpleVirtualObjectStats. For recording those
@@ -371,18 +375,18 @@ class ObjectStatsCollectorImpl {
// Details.
void RecordVirtualAllocationSiteDetails(AllocationSite* site);
- void RecordVirtualBytecodeArrayDetails(BytecodeArray* bytecode);
- void RecordVirtualCodeDetails(Code* code);
- void RecordVirtualContext(Context* context);
+ void RecordVirtualBytecodeArrayDetails(BytecodeArray bytecode);
+ void RecordVirtualCodeDetails(Code code);
+ void RecordVirtualContext(Context context);
void RecordVirtualFeedbackVectorDetails(FeedbackVector* vector);
- void RecordVirtualFixedArrayDetails(FixedArray* array);
+ void RecordVirtualFixedArrayDetails(FixedArray array);
void RecordVirtualFunctionTemplateInfoDetails(FunctionTemplateInfo* fti);
void RecordVirtualJSGlobalObjectDetails(JSGlobalObject* object);
void RecordVirtualJSCollectionDetails(JSObject* object);
void RecordVirtualJSObjectDetails(JSObject* object);
- void RecordVirtualMapDetails(Map* map);
+ void RecordVirtualMapDetails(Map map);
void RecordVirtualScriptDetails(Script* script);
- void RecordVirtualExternalStringDetails(ExternalString* script);
+ void RecordVirtualExternalStringDetails(ExternalString script);
void RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo* info);
void RecordVirtualJSFunctionDetails(JSFunction* function);
@@ -409,7 +413,7 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject* obj,
CowMode check_cow_array) {
if (obj->IsFixedArrayExact()) {
- FixedArray* fixed_array = FixedArray::cast(obj);
+ FixedArray fixed_array = FixedArray::cast(obj);
bool cow_check = check_cow_array == kIgnoreCow || !IsCowArray(fixed_array);
return CanRecordFixedArray(fixed_array) && cow_check;
}
@@ -418,7 +422,7 @@ bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject* obj,
}
void ObjectStatsCollectorImpl::RecordHashTableVirtualObjectStats(
- HeapObject* parent, FixedArray* hash_table,
+ HeapObject* parent, FixedArray hash_table,
ObjectStats::VirtualInstanceType type) {
CHECK(hash_table->IsHashTable());
// TODO(mlippautz): Implement over allocation for hash tables.
@@ -471,16 +475,16 @@ void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
if (boilerplate->HasFastProperties()) {
// We'll mis-classify the empty_property_array here. Given that there is a
// single instance, this is negligible.
- PropertyArray* properties = boilerplate->property_array();
+ PropertyArray properties = boilerplate->property_array();
RecordSimpleVirtualObjectStats(
site, properties, ObjectStats::BOILERPLATE_PROPERTY_ARRAY_TYPE);
} else {
- NameDictionary* properties = boilerplate->property_dictionary();
+ NameDictionary properties = boilerplate->property_dictionary();
RecordSimpleVirtualObjectStats(
site, properties, ObjectStats::BOILERPLATE_PROPERTY_DICTIONARY_TYPE);
}
}
- FixedArrayBase* elements = boilerplate->elements();
+ FixedArrayBase elements = boilerplate->elements();
RecordSimpleVirtualObjectStats(site, elements,
ObjectStats::BOILERPLATE_ELEMENTS_TYPE);
}
@@ -494,9 +498,9 @@ void ObjectStatsCollectorImpl::RecordVirtualFunctionTemplateInfoDetails(
fti, CallHandlerInfo::cast(fti->call_code()),
ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
- if (!fti->instance_call_handler()->IsUndefined(isolate())) {
+ if (!fti->GetInstanceCallHandler()->IsUndefined(isolate())) {
RecordSimpleVirtualObjectStats(
- fti, CallHandlerInfo::cast(fti->instance_call_handler()),
+ fti, CallHandlerInfo::cast(fti->GetInstanceCallHandler()),
ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
}
@@ -504,11 +508,11 @@ void ObjectStatsCollectorImpl::RecordVirtualFunctionTemplateInfoDetails(
void ObjectStatsCollectorImpl::RecordVirtualJSGlobalObjectDetails(
JSGlobalObject* object) {
// Properties.
- GlobalDictionary* properties = object->global_dictionary();
+ GlobalDictionary properties = object->global_dictionary();
RecordHashTableVirtualObjectStats(object, properties,
ObjectStats::GLOBAL_PROPERTIES_TYPE);
// Elements.
- FixedArrayBase* elements = object->elements();
+ FixedArrayBase elements = object->elements();
RecordSimpleVirtualObjectStats(object, elements,
ObjectStats::GLOBAL_ELEMENTS_TYPE);
}
@@ -518,12 +522,12 @@ void ObjectStatsCollectorImpl::RecordVirtualJSCollectionDetails(
if (object->IsJSMap()) {
RecordSimpleVirtualObjectStats(
object, FixedArray::cast(JSMap::cast(object)->table()),
- ObjectStats::JS_COLLETION_TABLE_TYPE);
+ ObjectStats::JS_COLLECTION_TABLE_TYPE);
}
if (object->IsJSSet()) {
RecordSimpleVirtualObjectStats(
object, FixedArray::cast(JSSet::cast(object)->table()),
- ObjectStats::JS_COLLETION_TABLE_TYPE);
+ ObjectStats::JS_COLLECTION_TABLE_TYPE);
}
}
@@ -533,20 +537,20 @@ void ObjectStatsCollectorImpl::RecordVirtualJSObjectDetails(JSObject* object) {
// Properties.
if (object->HasFastProperties()) {
- PropertyArray* properties = object->property_array();
+ PropertyArray properties = object->property_array();
CHECK_EQ(PROPERTY_ARRAY_TYPE, properties->map()->instance_type());
} else {
- NameDictionary* properties = object->property_dictionary();
+ NameDictionary properties = object->property_dictionary();
RecordHashTableVirtualObjectStats(
object, properties, ObjectStats::OBJECT_PROPERTY_DICTIONARY_TYPE);
}
// Elements.
- FixedArrayBase* elements = object->elements();
+ FixedArrayBase elements = object->elements();
RecordSimpleVirtualObjectStats(object, elements, ObjectStats::ELEMENTS_TYPE);
}
static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
- MaybeObject* maybe_obj, FeedbackSlotKind kind, Isolate* isolate) {
+ MaybeObject maybe_obj, FeedbackSlotKind kind, Isolate* isolate) {
if (maybe_obj->IsCleared())
return ObjectStats::FEEDBACK_VECTOR_SLOT_OTHER_TYPE;
Object* obj = maybe_obj->GetHeapObjectOrSmi();
@@ -600,8 +604,7 @@ void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
size_t calculated_size = 0;
// Log the feedback vector's header (fixed fields).
- size_t header_size =
- reinterpret_cast<Address>(vector->slots_start()) - vector->address();
+ size_t header_size = vector->slots_start().address() - vector->address();
stats_->RecordVirtualObjectStats(ObjectStats::FEEDBACK_VECTOR_HEADER_TYPE,
header_size,
ObjectStats::kNoOverAllocation);
@@ -622,7 +625,7 @@ void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
// Log the monomorphic/polymorphic helper objects that this slot owns.
for (int i = 0; i < it.entry_size(); i++) {
- MaybeObject* raw_object = vector->get(slot.ToInt() + i);
+ MaybeObject raw_object = vector->get(slot.ToInt() + i);
HeapObject* object;
if (raw_object->GetHeapObject(&object)) {
if (object->IsCell() || object->IsWeakFixedArray()) {
@@ -638,7 +641,7 @@ void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
}
void ObjectStatsCollectorImpl::RecordVirtualFixedArrayDetails(
- FixedArray* array) {
+ FixedArray array) {
if (IsCowArray(array)) {
RecordVirtualObjectStats(nullptr, array, ObjectStats::COW_ARRAY_TYPE,
array->Size(), ObjectStats::kNoOverAllocation,
@@ -648,7 +651,7 @@ void ObjectStatsCollectorImpl::RecordVirtualFixedArrayDetails(
void ObjectStatsCollectorImpl::CollectStatistics(
HeapObject* obj, Phase phase, CollectFieldStats collect_field_stats) {
- Map* map = obj->map();
+ Map map = obj->map();
switch (phase) {
case kPhase1:
if (obj->IsFeedbackVector()) {
@@ -745,15 +748,15 @@ void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject* obj,
}
}
-bool ObjectStatsCollectorImpl::CanRecordFixedArray(FixedArrayBase* array) {
+bool ObjectStatsCollectorImpl::CanRecordFixedArray(FixedArrayBase array) {
ReadOnlyRoots roots(heap_);
return array != roots.empty_fixed_array() &&
array != roots.empty_sloppy_arguments_elements() &&
array != roots.empty_slow_element_dictionary() &&
- array != heap_->empty_property_dictionary();
+ array != roots.empty_property_dictionary();
}
-bool ObjectStatsCollectorImpl::IsCowArray(FixedArrayBase* array) {
+bool ObjectStatsCollectorImpl::IsCowArray(FixedArrayBase array) {
return array->map() == ReadOnlyRoots(heap_).fixed_cow_array_map();
}
@@ -763,14 +766,14 @@ bool ObjectStatsCollectorImpl::SameLiveness(HeapObject* obj1,
marking_state_->Color(obj1) == marking_state_->Color(obj2);
}
-void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map* map) {
+void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
// TODO(mlippautz): map->dependent_code(): DEPENDENT_CODE_TYPE.
DescriptorArray* array = map->instance_descriptors();
if (map->owns_descriptors() &&
array != ReadOnlyRoots(heap_).empty_descriptor_array()) {
// DescriptorArray has its own instance type.
- EnumCache* enum_cache = array->GetEnumCache();
+ EnumCache* enum_cache = array->enum_cache();
RecordSimpleVirtualObjectStats(array, enum_cache->keys(),
ObjectStats::ENUM_CACHE_TYPE);
RecordSimpleVirtualObjectStats(array, enum_cache->indices(),
@@ -800,7 +803,7 @@ void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
// The contents of external strings aren't on the heap, so we have to record
// them manually. The on-heap String object is recorded indepentendely in
// the normal pass.
- ExternalString* string = ExternalString::cast(raw_source);
+ ExternalString string = ExternalString::cast(raw_source);
Address resource = string->resource_as_address();
size_t off_heap_size = string->ExternalPayloadSize();
RecordExternalResourceStats(
@@ -810,7 +813,7 @@ void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
: ObjectStats::SCRIPT_SOURCE_EXTERNAL_TWO_BYTE_TYPE,
off_heap_size);
} else if (raw_source->IsString()) {
- String* source = String::cast(raw_source);
+ String source = String::cast(raw_source);
RecordSimpleVirtualObjectStats(
script, source,
source->IsOneByteRepresentation()
@@ -820,7 +823,7 @@ void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
}
void ObjectStatsCollectorImpl::RecordVirtualExternalStringDetails(
- ExternalString* string) {
+ ExternalString string) {
// Track the external string resource size in a separate category.
Address resource = string->resource_as_address();
@@ -863,7 +866,7 @@ void ObjectStatsCollectorImpl::
ObjectStats::VirtualInstanceType type) {
if (!RecordSimpleVirtualObjectStats(parent, object, type)) return;
if (object->IsFixedArrayExact()) {
- FixedArray* array = FixedArray::cast(object);
+ FixedArray array = FixedArray::cast(object);
for (int i = 0; i < array->length(); i++) {
Object* entry = array->get(i);
if (!entry->IsHeapObject()) continue;
@@ -874,13 +877,13 @@ void ObjectStatsCollectorImpl::
}
void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
- BytecodeArray* bytecode) {
+ BytecodeArray bytecode) {
RecordSimpleVirtualObjectStats(
bytecode, bytecode->constant_pool(),
ObjectStats::BYTECODE_ARRAY_CONSTANT_POOL_TYPE);
// FixedArrays on constant pool are used for holding descriptor information.
// They are shared with optimized code.
- FixedArray* constant_pool = FixedArray::cast(bytecode->constant_pool());
+ FixedArray constant_pool = FixedArray::cast(bytecode->constant_pool());
for (int i = 0; i < constant_pool->length(); i++) {
Object* entry = constant_pool->get(i);
if (entry->IsFixedArrayExact()) {
@@ -892,6 +895,8 @@ void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
RecordSimpleVirtualObjectStats(
bytecode, bytecode->handler_table(),
ObjectStats::BYTECODE_ARRAY_HANDLER_TABLE_TYPE);
+ RecordSimpleVirtualObjectStats(bytecode, bytecode->SourcePositionTable(),
+ ObjectStats::SOURCE_POSITION_TABLE_TYPE);
}
namespace {
@@ -912,7 +917,7 @@ ObjectStats::VirtualInstanceType CodeKindToVirtualInstanceType(
} // namespace
-void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
+void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code code) {
RecordSimpleVirtualObjectStats(nullptr, code,
CodeKindToVirtualInstanceType(code->kind()));
RecordSimpleVirtualObjectStats(code, code->deoptimization_data(),
@@ -932,7 +937,7 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
ObjectStats::SOURCE_POSITION_TABLE_TYPE);
}
if (code->kind() == Code::Kind::OPTIMIZED_FUNCTION) {
- DeoptimizationData* input_data =
+ DeoptimizationData input_data =
DeoptimizationData::cast(code->deoptimization_data());
if (input_data->length() > 0) {
RecordSimpleVirtualObjectStats(code->deoptimization_data(),
@@ -953,7 +958,7 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
}
}
-void ObjectStatsCollectorImpl::RecordVirtualContext(Context* context) {
+void ObjectStatsCollectorImpl::RecordVirtualContext(Context context) {
if (context->IsNativeContext()) {
RecordObjectStats(context, NATIVE_CONTEXT_TYPE, context->Size());
} else if (context->IsFunctionContext()) {
diff --git a/chromium/v8/src/heap/object-stats.h b/chromium/v8/src/heap/object-stats.h
index 7914f098813..a1e4cd3b935 100644
--- a/chromium/v8/src/heap/object-stats.h
+++ b/chromium/v8/src/heap/object-stats.h
@@ -44,7 +44,7 @@
V(GLOBAL_ELEMENTS_TYPE) \
V(GLOBAL_PROPERTIES_TYPE) \
V(JS_ARRAY_BOILERPLATE_TYPE) \
- V(JS_COLLETION_TABLE_TYPE) \
+ V(JS_COLLECTION_TABLE_TYPE) \
V(JS_OBJECT_BOILERPLATE_TYPE) \
V(NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
V(NUMBER_STRING_CACHE_TYPE) \
diff --git a/chromium/v8/src/heap/objects-visiting-inl.h b/chromium/v8/src/heap/objects-visiting-inl.h
index c7a4f70f011..978b7f2014a 100644
--- a/chromium/v8/src/heap/objects-visiting-inl.h
+++ b/chromium/v8/src/heap/objects-visiting-inl.h
@@ -13,38 +13,43 @@
#include "src/macro-assembler.h"
#include "src/objects-body-descriptors-inl.h"
#include "src/objects-inl.h"
+#include "src/objects/js-weak-refs-inl.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
template <typename ResultType, typename ConcreteVisitor>
-template <typename T>
+template <typename T, typename>
T* HeapVisitor<ResultType, ConcreteVisitor>::Cast(HeapObject* object) {
return T::cast(object);
}
template <typename ResultType, typename ConcreteVisitor>
+template <typename T, typename>
+T HeapVisitor<ResultType, ConcreteVisitor>::Cast(HeapObject* object) {
+ return T::cast(object);
+}
+
+template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(HeapObject* object) {
return Visit(object->map(), object);
}
template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map* map,
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
HeapObject* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
switch (map->visitor_id()) {
-#define CASE(type) \
- case kVisit##type: \
- return visitor->Visit##type(map, \
- ConcreteVisitor::template Cast<type>(object));
+#define CASE(TypeName, Type) \
+ case kVisit##TypeName: \
+ return visitor->Visit##TypeName( \
+ map, ConcreteVisitor::template Cast<TypeName>(object));
TYPED_VISITOR_ID_LIST(CASE)
#undef CASE
case kVisitShortcutCandidate:
return visitor->VisitShortcutCandidate(
map, ConcreteVisitor::template Cast<ConsString>(object));
- case kVisitNativeContext:
- return visitor->VisitNativeContext(
- map, ConcreteVisitor::template Cast<Context>(object));
case kVisitDataObject:
return visitor->VisitDataObject(map, object);
case kVisitJSObjectFast:
@@ -59,6 +64,8 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map* map,
return visitor->VisitFreeSpace(map, FreeSpace::cast(object));
case kVisitWeakArray:
return visitor->VisitWeakArray(map, object);
+ case kVisitJSWeakCell:
+ return visitor->VisitJSWeakCell(map, JSWeakCell::cast(object));
case kVisitorIdCount:
UNREACHABLE();
}
@@ -68,16 +75,15 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map* map,
}
template <typename ResultType, typename ConcreteVisitor>
-void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
- HeapObject* host, HeapObject** map) {
- static_cast<ConcreteVisitor*>(this)->VisitPointer(
- host, reinterpret_cast<Object**>(map));
+void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(HeapObject* host,
+ ObjectSlot map) {
+ static_cast<ConcreteVisitor*>(this)->VisitPointer(host, map);
}
-#define VISIT(type) \
+#define VISIT(TypeName, Type) \
template <typename ResultType, typename ConcreteVisitor> \
- ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##type( \
- Map* map, type* object) { \
+ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##TypeName( \
+ Map map, Type object) { \
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
if (!visitor->ShouldVisit(object)) return ResultType(); \
if (!visitor->AllowDefaultJSObjectVisit()) { \
@@ -85,10 +91,10 @@ void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
"Implement custom visitor for new JSObject subclass in " \
"concurrent marker"); \
} \
- int size = type::BodyDescriptor::SizeOf(map, object); \
+ int size = TypeName::BodyDescriptor::SizeOf(map, object); \
if (visitor->ShouldVisitMapPointer()) \
visitor->VisitMapPointer(object, object->map_slot()); \
- type::BodyDescriptor::IterateBody(map, object, size, visitor); \
+ TypeName::BodyDescriptor::IterateBody(map, object, size, visitor); \
return static_cast<ResultType>(size); \
}
TYPED_VISITOR_ID_LIST(VISIT)
@@ -96,25 +102,13 @@ TYPED_VISITOR_ID_LIST(VISIT)
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitShortcutCandidate(
- Map* map, ConsString* object) {
+ Map map, ConsString object) {
return static_cast<ConcreteVisitor*>(this)->VisitConsString(map, object);
}
template <typename ResultType, typename ConcreteVisitor>
-ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitNativeContext(
- Map* map, Context* object) {
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- if (!visitor->ShouldVisit(object)) return ResultType();
- int size = Context::BodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer())
- visitor->VisitMapPointer(object, object->map_slot());
- Context::BodyDescriptor::IterateBody(map, object, size, visitor);
- return static_cast<ResultType>(size);
-}
-
-template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
- Map* map, HeapObject* object) {
+ Map map, HeapObject* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map->instance_size();
@@ -125,7 +119,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
- Map* map, JSObject* object) {
+ Map map, JSObject* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
@@ -137,7 +131,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
- Map* map, JSObject* object) {
+ Map map, JSObject* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::BodyDescriptor::SizeOf(map, object);
@@ -149,7 +143,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
- Map* map, HeapObject* object) {
+ Map map, HeapObject* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map->instance_size();
@@ -161,7 +155,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
- Map* map, FreeSpace* object) {
+ Map map, FreeSpace* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
if (visitor->ShouldVisitMapPointer())
@@ -170,16 +164,16 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
}
template <typename ConcreteVisitor>
-int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map* map,
- Context* object) {
+int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map map,
+ NativeContext object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- int size = Context::BodyDescriptor::SizeOf(map, object);
- Context::BodyDescriptor::IterateBody(map, object, size, visitor);
+ int size = NativeContext::BodyDescriptor::SizeOf(map, object);
+ NativeContext::BodyDescriptor::IterateBody(map, object, size, visitor);
return size;
}
template <typename ConcreteVisitor>
-int NewSpaceVisitor<ConcreteVisitor>::VisitJSApiObject(Map* map,
+int NewSpaceVisitor<ConcreteVisitor>::VisitJSApiObject(Map map,
JSObject* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
return visitor->VisitJSObject(map, object);
@@ -187,7 +181,7 @@ int NewSpaceVisitor<ConcreteVisitor>::VisitJSApiObject(Map* map,
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitWeakArray(
- Map* map, HeapObject* object) {
+ Map map, HeapObject* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = WeakArrayBodyDescriptor::SizeOf(map, object);
diff --git a/chromium/v8/src/heap/objects-visiting.cc b/chromium/v8/src/heap/objects-visiting.cc
index 594b837f698..322f23cc0f5 100644
--- a/chromium/v8/src/heap/objects-visiting.cc
+++ b/chromium/v8/src/heap/objects-visiting.cc
@@ -24,7 +24,6 @@ static bool MustRecordSlots(Heap* heap) {
template <class T>
struct WeakListVisitor;
-
template <class T>
Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
Object* undefined = ReadOnlyRoots(heap).undefined_value();
@@ -52,7 +51,7 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
if (record_slots) {
HeapObject* slot_holder = WeakListVisitor<T>::WeakNextHolder(tail);
int slot_offset = WeakListVisitor<T>::WeakNextOffset();
- Object** slot = HeapObject::RawField(slot_holder, slot_offset);
+ ObjectSlot slot = HeapObject::RawField(slot_holder, slot_offset);
MarkCompactCollector::RecordSlot(slot_holder, slot,
HeapObject::cast(retained));
}
@@ -75,12 +74,63 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
return head;
}
+// TODO(3770): Replacement for the above, temporarily separate to allow
+// incremental transition. Assumes that T derives from ObjectPtr.
+template <class T>
+Object* VisitWeakList2(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
+ Object* undefined = ReadOnlyRoots(heap).undefined_value();
+ Object* head = undefined;
+ T tail;
+ bool record_slots = MustRecordSlots(heap);
+
+ while (list != undefined) {
+ // Check whether to keep the candidate in the list.
+ T candidate = T::cast(list);
+
+ Object* retained = retainer->RetainAs(list);
+
+ // Move to the next element before the WeakNext is cleared.
+ list = WeakListVisitor<T>::WeakNext(candidate);
+
+ if (retained != nullptr) {
+ if (head == undefined) {
+ // First element in the list.
+ head = retained;
+ } else {
+ // Subsequent elements in the list.
+ DCHECK(!tail.is_null());
+ WeakListVisitor<T>::SetWeakNext(tail, retained);
+ if (record_slots) {
+ HeapObject* slot_holder = WeakListVisitor<T>::WeakNextHolder(tail);
+ int slot_offset = WeakListVisitor<T>::WeakNextOffset();
+ ObjectSlot slot = HeapObject::RawField(slot_holder, slot_offset);
+ MarkCompactCollector::RecordSlot(slot_holder, slot,
+ HeapObject::cast(retained));
+ }
+ }
+ // Retained object is new tail.
+ DCHECK(!retained->IsUndefined(heap->isolate()));
+ candidate = T::cast(retained);
+ tail = candidate;
+
+ // tail is a live object, visit it.
+ WeakListVisitor<T>::VisitLiveObject(heap, tail, retainer);
+
+ } else {
+ WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
+ }
+ }
+
+ // Terminate the list if there is one or more elements.
+ if (!tail.is_null()) WeakListVisitor<T>::SetWeakNext(tail, undefined);
+ return head;
+}
template <class T>
static void ClearWeakList(Heap* heap, Object* list) {
Object* undefined = ReadOnlyRoots(heap).undefined_value();
while (list != undefined) {
- T* candidate = reinterpret_cast<T*>(list);
+ T candidate = T::cast(list);
list = WeakListVisitor<T>::WeakNext(candidate);
WeakListVisitor<T>::SetWeakNext(candidate, undefined);
}
@@ -88,24 +138,24 @@ static void ClearWeakList(Heap* heap, Object* list) {
template <>
struct WeakListVisitor<Code> {
- static void SetWeakNext(Code* code, Object* next) {
+ static void SetWeakNext(Code code, Object* next) {
code->code_data_container()->set_next_code_link(next,
UPDATE_WEAK_WRITE_BARRIER);
}
- static Object* WeakNext(Code* code) {
+ static Object* WeakNext(Code code) {
return code->code_data_container()->next_code_link();
}
- static HeapObject* WeakNextHolder(Code* code) {
+ static HeapObject* WeakNextHolder(Code code) {
return code->code_data_container();
}
static int WeakNextOffset() { return CodeDataContainer::kNextCodeLinkOffset; }
- static void VisitLiveObject(Heap*, Code*, WeakObjectRetainer*) {}
+ static void VisitLiveObject(Heap*, Code, WeakObjectRetainer*) {}
- static void VisitPhantomObject(Heap* heap, Code* code) {
+ static void VisitPhantomObject(Heap* heap, Code code) {
// Even though the code is dying, its code_data_container can still be
// alive. Clear the next_code_link slot to avoid a dangling pointer.
SetWeakNext(code, ReadOnlyRoots(heap).undefined_value());
@@ -115,27 +165,27 @@ struct WeakListVisitor<Code> {
template <>
struct WeakListVisitor<Context> {
- static void SetWeakNext(Context* context, Object* next) {
+ static void SetWeakNext(Context context, Object* next) {
context->set(Context::NEXT_CONTEXT_LINK, next, UPDATE_WEAK_WRITE_BARRIER);
}
- static Object* WeakNext(Context* context) {
+ static Object* WeakNext(Context context) {
return context->next_context_link();
}
- static HeapObject* WeakNextHolder(Context* context) { return context; }
+ static HeapObject* WeakNextHolder(Context context) { return context; }
static int WeakNextOffset() {
return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
}
- static void VisitLiveObject(Heap* heap, Context* context,
+ static void VisitLiveObject(Heap* heap, Context context,
WeakObjectRetainer* retainer) {
if (heap->gc_state() == Heap::MARK_COMPACT) {
// Record the slots of the weak entries in the native context.
for (int idx = Context::FIRST_WEAK_SLOT;
idx < Context::NATIVE_CONTEXT_SLOTS; ++idx) {
- Object** slot = Context::cast(context)->RawFieldOfElementAt(idx);
+ ObjectSlot slot = context->RawField(Context::OffsetOfElementAt(idx));
MarkCompactCollector::RecordSlot(context, slot,
HeapObject::cast(*slot));
}
@@ -147,24 +197,23 @@ struct WeakListVisitor<Context> {
}
template <class T>
- static void DoWeakList(Heap* heap, Context* context,
+ static void DoWeakList(Heap* heap, Context context,
WeakObjectRetainer* retainer, int index) {
// Visit the weak list, removing dead intermediate elements.
- Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer);
+ Object* list_head = VisitWeakList2<T>(heap, context->get(index), retainer);
// Update the list head.
context->set(index, list_head, UPDATE_WRITE_BARRIER);
if (MustRecordSlots(heap)) {
// Record the updated slot if necessary.
- Object** head_slot =
- HeapObject::RawField(context, FixedArray::SizeFor(index));
+ ObjectSlot head_slot = context->RawField(FixedArray::SizeFor(index));
heap->mark_compact_collector()->RecordSlot(context, head_slot,
HeapObject::cast(list_head));
}
}
- static void VisitPhantomObject(Heap* heap, Context* context) {
+ static void VisitPhantomObject(Heap* heap, Context context) {
ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST));
ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST));
}
@@ -188,9 +237,8 @@ struct WeakListVisitor<AllocationSite> {
static void VisitPhantomObject(Heap*, AllocationSite*) {}
};
-
-template Object* VisitWeakList<Context>(Heap* heap, Object* list,
- WeakObjectRetainer* retainer);
+template Object* VisitWeakList2<Context>(Heap* heap, Object* list,
+ WeakObjectRetainer* retainer);
template Object* VisitWeakList<AllocationSite>(Heap* heap, Object* list,
WeakObjectRetainer* retainer);
diff --git a/chromium/v8/src/heap/objects-visiting.h b/chromium/v8/src/heap/objects-visiting.h
index 147af52c7eb..99a9989a31e 100644
--- a/chromium/v8/src/heap/objects-visiting.h
+++ b/chromium/v8/src/heap/objects-visiting.h
@@ -20,54 +20,63 @@ namespace internal {
class BigInt;
class BytecodeArray;
class DataHandler;
+class EmbedderDataArray;
class JSArrayBuffer;
class JSDataView;
class JSRegExp;
class JSTypedArray;
+class JSWeakCell;
class JSWeakCollection;
+class NativeContext;
class UncompiledDataWithoutPreParsedScope;
class UncompiledDataWithPreParsedScope;
-
-#define TYPED_VISITOR_ID_LIST(V) \
- V(AllocationSite) \
- V(BigInt) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(Cell) \
- V(Code) \
- V(CodeDataContainer) \
- V(ConsString) \
- V(DataHandler) \
- V(EphemeronHashTable) \
- V(FeedbackCell) \
- V(FeedbackVector) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- V(FixedFloat64Array) \
- V(FixedTypedArrayBase) \
- V(JSArrayBuffer) \
- V(JSDataView) \
- V(JSObject) \
- V(JSTypedArray) \
- V(JSWeakCollection) \
- V(Map) \
- V(Oddball) \
- V(PreParsedScopeData) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(PrototypeInfo) \
- V(SeqOneByteString) \
- V(SeqTwoByteString) \
- V(SharedFunctionInfo) \
- V(SlicedString) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(Symbol) \
- V(ThinString) \
- V(TransitionArray) \
- V(UncompiledDataWithoutPreParsedScope) \
- V(UncompiledDataWithPreParsedScope) \
- V(WasmInstanceObject)
+class WasmInstanceObject;
+
+#define TYPED_VISITOR_ID_LIST(V) \
+ V(AllocationSite, AllocationSite*) \
+ V(BigInt, BigInt*) \
+ V(ByteArray, ByteArray) \
+ V(BytecodeArray, BytecodeArray) \
+ V(Cell, Cell*) \
+ V(Code, Code) \
+ V(CodeDataContainer, CodeDataContainer*) \
+ V(ConsString, ConsString) \
+ V(Context, Context) \
+ V(DataHandler, DataHandler*) \
+ V(DescriptorArray, DescriptorArray*) \
+ V(EmbedderDataArray, EmbedderDataArray) \
+ V(EphemeronHashTable, EphemeronHashTable) \
+ V(FeedbackCell, FeedbackCell*) \
+ V(FeedbackVector, FeedbackVector*) \
+ V(FixedArray, FixedArray) \
+ V(FixedDoubleArray, FixedDoubleArray) \
+ V(FixedFloat64Array, FixedFloat64Array) \
+ V(FixedTypedArrayBase, FixedTypedArrayBase) \
+ V(JSArrayBuffer, JSArrayBuffer*) \
+ V(JSDataView, JSDataView*) \
+ V(JSObject, JSObject*) \
+ V(JSTypedArray, JSTypedArray*) \
+ V(JSWeakCollection, JSWeakCollection*) \
+ V(Map, Map) \
+ V(NativeContext, NativeContext) \
+ V(Oddball, Oddball*) \
+ V(PreParsedScopeData, PreParsedScopeData*) \
+ V(PropertyArray, PropertyArray) \
+ V(PropertyCell, PropertyCell*) \
+ V(PrototypeInfo, PrototypeInfo*) \
+ V(SeqOneByteString, SeqOneByteString) \
+ V(SeqTwoByteString, SeqTwoByteString) \
+ V(SharedFunctionInfo, SharedFunctionInfo*) \
+ V(SlicedString, SlicedString) \
+ V(SmallOrderedHashMap, SmallOrderedHashMap) \
+ V(SmallOrderedHashSet, SmallOrderedHashSet) \
+ V(SmallOrderedNameDictionary, SmallOrderedNameDictionary) \
+ V(Symbol, Symbol) \
+ V(ThinString, ThinString) \
+ V(TransitionArray, TransitionArray*) \
+ V(UncompiledDataWithoutPreParsedScope, UncompiledDataWithoutPreParsedScope*) \
+ V(UncompiledDataWithPreParsedScope, UncompiledDataWithPreParsedScope*) \
+ V(WasmInstanceObject, WasmInstanceObject*)
// The base class for visitors that need to dispatch on object type. The default
// behavior of all visit functions is to iterate body of the given object using
@@ -84,7 +93,7 @@ template <typename ResultType, typename ConcreteVisitor>
class HeapVisitor : public ObjectVisitor {
public:
V8_INLINE ResultType Visit(HeapObject* object);
- V8_INLINE ResultType Visit(Map* map, HeapObject* object);
+ V8_INLINE ResultType Visit(Map map, HeapObject* object);
protected:
// A guard predicate for visiting the object.
@@ -94,25 +103,30 @@ class HeapVisitor : public ObjectVisitor {
// Guard predicate for visiting the objects map pointer separately.
V8_INLINE bool ShouldVisitMapPointer() { return true; }
// A callback for visiting the map pointer in the object header.
- V8_INLINE void VisitMapPointer(HeapObject* host, HeapObject** map);
+ V8_INLINE void VisitMapPointer(HeapObject* host, ObjectSlot map);
// If this predicate returns false, then the heap visitor will fail
// in default Visit implemention for subclasses of JSObject.
V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }
-#define VISIT(type) V8_INLINE ResultType Visit##type(Map* map, type* object);
+#define VISIT(TypeName, Type) \
+ V8_INLINE ResultType Visit##TypeName(Map map, Type object);
TYPED_VISITOR_ID_LIST(VISIT)
#undef VISIT
- V8_INLINE ResultType VisitShortcutCandidate(Map* map, ConsString* object);
- V8_INLINE ResultType VisitNativeContext(Map* map, Context* object);
- V8_INLINE ResultType VisitDataObject(Map* map, HeapObject* object);
- V8_INLINE ResultType VisitJSObjectFast(Map* map, JSObject* object);
- V8_INLINE ResultType VisitJSApiObject(Map* map, JSObject* object);
- V8_INLINE ResultType VisitStruct(Map* map, HeapObject* object);
- V8_INLINE ResultType VisitFreeSpace(Map* map, FreeSpace* object);
- V8_INLINE ResultType VisitWeakArray(Map* map, HeapObject* object);
-
- template <typename T>
+ V8_INLINE ResultType VisitShortcutCandidate(Map map, ConsString object);
+ V8_INLINE ResultType VisitDataObject(Map map, HeapObject* object);
+ V8_INLINE ResultType VisitJSObjectFast(Map map, JSObject* object);
+ V8_INLINE ResultType VisitJSApiObject(Map map, JSObject* object);
+ V8_INLINE ResultType VisitStruct(Map map, HeapObject* object);
+ V8_INLINE ResultType VisitFreeSpace(Map map, FreeSpace* object);
+ V8_INLINE ResultType VisitWeakArray(Map map, HeapObject* object);
+
+ template <typename T, typename = typename std::enable_if<
+ std::is_base_of<Object, T>::value>::type>
static V8_INLINE T* Cast(HeapObject* object);
+
+ template <typename T, typename = typename std::enable_if<
+ std::is_base_of<ObjectPtr, T>::value>::type>
+ static V8_INLINE T Cast(HeapObject* object);
};
template <typename ConcreteVisitor>
@@ -122,15 +136,20 @@ class NewSpaceVisitor : public HeapVisitor<int, ConcreteVisitor> {
// Special cases for young generation.
- V8_INLINE int VisitNativeContext(Map* map, Context* object);
- V8_INLINE int VisitJSApiObject(Map* map, JSObject* object);
+ V8_INLINE int VisitNativeContext(Map map, NativeContext object);
+ V8_INLINE int VisitJSApiObject(Map map, JSObject* object);
- int VisitBytecodeArray(Map* map, BytecodeArray* object) {
+ int VisitBytecodeArray(Map map, BytecodeArray object) {
UNREACHABLE();
return 0;
}
- int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) {
+ int VisitSharedFunctionInfo(Map map, SharedFunctionInfo* object) {
+ UNREACHABLE();
+ return 0;
+ }
+
+ int VisitJSWeakCell(Map map, JSWeakCell* js_weak_cell) {
UNREACHABLE();
return 0;
}
@@ -145,6 +164,8 @@ class WeakObjectRetainer;
// access the next-element pointers.
template <class T>
Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
+template <class T>
+Object* VisitWeakList2(Heap* heap, Object* list, WeakObjectRetainer* retainer);
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/remembered-set.h b/chromium/v8/src/heap/remembered-set.h
index e59457b10d5..7b626508ffd 100644
--- a/chromium/v8/src/heap/remembered-set.h
+++ b/chromium/v8/src/heap/remembered-set.h
@@ -116,7 +116,7 @@ class RememberedSet : public AllStatic {
// The callback should take (MemoryChunk* chunk) and return void.
template <typename Callback>
static void IterateMemoryChunks(Heap* heap, Callback callback) {
- MemoryChunkIterator it(heap);
+ OldGenerationMemoryChunkIterator it(heap);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = chunk->slot_set<type>();
@@ -252,7 +252,7 @@ class RememberedSet : public AllStatic {
// Clear all old to old slots from the remembered set.
static void ClearAll(Heap* heap) {
STATIC_ASSERT(type == OLD_TO_OLD);
- MemoryChunkIterator it(heap);
+ OldGenerationMemoryChunkIterator it(heap);
MemoryChunk* chunk;
while ((chunk = it.next()) != nullptr) {
chunk->ReleaseSlotSet<OLD_TO_OLD>();
@@ -261,44 +261,36 @@ class RememberedSet : public AllStatic {
}
}
- // Eliminates all stale slots from the remembered set, i.e.
- // slots that are not part of live objects anymore. This method must be
- // called after marking, when the whole transitive closure is known and
- // must be called before sweeping when mark bits are still intact.
- static void ClearInvalidTypedSlots(Heap* heap, MemoryChunk* chunk);
-
private:
- static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, Object** slot);
+ static bool IsValidSlot(Heap* heap, MemoryChunk* chunk, ObjectSlot slot);
};
class UpdateTypedSlotHelper {
public:
// Updates a code entry slot using an untyped slot callback.
- // The callback accepts Object** and returns SlotCallbackResult.
+ // The callback accepts MaybeObjectSlot and returns SlotCallbackResult.
template <typename Callback>
static SlotCallbackResult UpdateCodeEntry(Address entry_address,
Callback callback) {
Object* code = Code::GetObjectFromEntryAddress(entry_address);
Object* old_code = code;
- SlotCallbackResult result =
- callback(reinterpret_cast<MaybeObject**>(&code));
+ SlotCallbackResult result = callback(MaybeObjectSlot(&code));
DCHECK(!HasWeakHeapObjectTag(code));
if (code != old_code) {
- Memory<Address>(entry_address) = reinterpret_cast<Code*>(code)->entry();
+ Memory<Address>(entry_address) = Code::cast(code)->entry();
}
return result;
}
// Updates a code target slot using an untyped slot callback.
- // The callback accepts Object** and returns SlotCallbackResult.
+ // The callback accepts MaybeObjectSlot and returns SlotCallbackResult.
template <typename Callback>
static SlotCallbackResult UpdateCodeTarget(RelocInfo* rinfo,
Callback callback) {
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
- Code* old_target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code old_target = Code::GetCodeFromTargetAddress(rinfo->target_address());
Object* new_target = old_target;
- SlotCallbackResult result =
- callback(reinterpret_cast<MaybeObject**>(&new_target));
+ SlotCallbackResult result = callback(MaybeObjectSlot(&new_target));
DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
rinfo->set_target_address(
@@ -308,15 +300,14 @@ class UpdateTypedSlotHelper {
}
// Updates an embedded pointer slot using an untyped slot callback.
- // The callback accepts Object** and returns SlotCallbackResult.
+ // The callback accepts MaybeObjectSlot and returns SlotCallbackResult.
template <typename Callback>
static SlotCallbackResult UpdateEmbeddedPointer(Heap* heap, RelocInfo* rinfo,
Callback callback) {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* old_target = rinfo->target_object();
Object* new_target = old_target;
- SlotCallbackResult result =
- callback(reinterpret_cast<MaybeObject**>(&new_target));
+ SlotCallbackResult result = callback(MaybeObjectSlot(&new_target));
DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
rinfo->set_target_object(heap, HeapObject::cast(new_target));
@@ -325,24 +316,24 @@ class UpdateTypedSlotHelper {
}
// Updates a typed slot using an untyped slot callback.
- // The callback accepts MaybeObject** and returns SlotCallbackResult.
+ // The callback accepts MaybeObjectSlot and returns SlotCallbackResult.
template <typename Callback>
static SlotCallbackResult UpdateTypedSlot(Heap* heap, SlotType slot_type,
Address addr, Callback callback) {
switch (slot_type) {
case CODE_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, nullptr);
+ RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, Code());
return UpdateCodeTarget(&rinfo, callback);
}
case CODE_ENTRY_SLOT: {
return UpdateCodeEntry(addr, callback);
}
case EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, nullptr);
+ RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, Code());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
case OBJECT_SLOT: {
- return callback(reinterpret_cast<MaybeObject**>(addr));
+ return callback(MaybeObjectSlot(addr));
}
case CLEARED_SLOT:
break;
diff --git a/chromium/v8/src/heap/scavenge-job.cc b/chromium/v8/src/heap/scavenge-job.cc
index 5848d5342ea..14e7d000caa 100644
--- a/chromium/v8/src/heap/scavenge-job.cc
+++ b/chromium/v8/src/heap/scavenge-job.cc
@@ -8,6 +8,7 @@
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
+#include "src/heap/spaces.h"
#include "src/isolate.h"
#include "src/v8.h"
#include "src/vm-state-inl.h"
diff --git a/chromium/v8/src/heap/scavenger-inl.h b/chromium/v8/src/heap/scavenger-inl.h
index 376b5e75aa5..6e882dcdb7c 100644
--- a/chromium/v8/src/heap/scavenger-inl.h
+++ b/chromium/v8/src/heap/scavenger-inl.h
@@ -11,6 +11,7 @@
#include "src/heap/local-allocator-inl.h"
#include "src/objects-inl.h"
#include "src/objects/map.h"
+#include "src/objects/slots-inl.h"
namespace v8 {
namespace internal {
@@ -21,7 +22,7 @@ void Scavenger::PromotionList::View::PushRegularObject(HeapObject* object,
}
void Scavenger::PromotionList::View::PushLargeObject(HeapObject* object,
- Map* map, int size) {
+ Map map, int size) {
promotion_list_->PushLargeObject(task_id_, object, map, size);
}
@@ -51,7 +52,7 @@ void Scavenger::PromotionList::PushRegularObject(int task_id,
}
void Scavenger::PromotionList::PushLargeObject(int task_id, HeapObject* object,
- Map* map, int size) {
+ Map map, int size) {
large_object_promotion_list_.Push(task_id, {object, map, size});
}
@@ -109,7 +110,7 @@ bool Scavenger::ContainsOnlyData(VisitorId visitor_id) {
return false;
}
-void Scavenger::PageMemoryFence(MaybeObject* object) {
+void Scavenger::PageMemoryFence(MaybeObject object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
// with page initialization.
@@ -121,16 +122,15 @@ void Scavenger::PageMemoryFence(MaybeObject* object) {
#endif
}
-bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
+bool Scavenger::MigrateObject(Map map, HeapObject* source, HeapObject* target,
int size) {
// Copy the content of source to target.
target->set_map_word(MapWord::FromMap(map));
heap()->CopyBlock(target->address() + kPointerSize,
source->address() + kPointerSize, size - kPointerSize);
- HeapObject* old = base::AsAtomicPointer::Release_CompareAndSwap(
- reinterpret_cast<HeapObject**>(source->address()), map,
- MapWord::FromForwardingAddress(target).ToMap());
+ ObjectPtr old = source->map_slot().Release_CompareAndSwap(
+ map, MapWord::FromForwardingAddress(target).ToMap());
if (old != map) {
// Other task migrated the object.
return false;
@@ -147,8 +147,8 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
return true;
}
-CopyAndForwardResult Scavenger::SemiSpaceCopyObject(Map* map,
- HeapObjectReference** slot,
+CopyAndForwardResult Scavenger::SemiSpaceCopyObject(Map map,
+ HeapObjectSlot slot,
HeapObject* object,
int object_size) {
DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
@@ -179,8 +179,7 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(Map* map,
return CopyAndForwardResult::FAILURE;
}
-CopyAndForwardResult Scavenger::PromoteObject(Map* map,
- HeapObjectReference** slot,
+CopyAndForwardResult Scavenger::PromoteObject(Map map, HeapObjectSlot slot,
HeapObject* object,
int object_size) {
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
@@ -218,15 +217,17 @@ SlotCallbackResult Scavenger::RememberedSetEntryNeeded(
: REMOVE_SLOT;
}
-bool Scavenger::HandleLargeObject(Map* map, HeapObject* object,
+bool Scavenger::HandleLargeObject(Map map, HeapObject* object,
int object_size) {
- if (V8_UNLIKELY(FLAG_young_generation_large_objects &&
- object_size > kMaxNewSpaceHeapObjectSize)) {
+ // TODO(hpayer): Make this check size based, i.e.
+ // object_size > kMaxRegularHeapObjectSize
+ if (V8_UNLIKELY(
+ FLAG_young_generation_large_objects &&
+ MemoryChunk::FromHeapObject(object)->IsInNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner()->identity());
- if (base::AsAtomicPointer::Release_CompareAndSwap(
- reinterpret_cast<HeapObject**>(object->address()), map,
- MapWord::FromForwardingAddress(object).ToMap()) == map) {
+ if (object->map_slot().Release_CompareAndSwap(
+ map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
surviving_new_large_objects_.insert({object, map});
if (!ContainsOnlyData(map->visitor_id())) {
@@ -238,11 +239,10 @@ bool Scavenger::HandleLargeObject(Map* map, HeapObject* object,
return false;
}
-SlotCallbackResult Scavenger::EvacuateObjectDefault(Map* map,
- HeapObjectReference** slot,
+SlotCallbackResult Scavenger::EvacuateObjectDefault(Map map,
+ HeapObjectSlot slot,
HeapObject* object,
int object_size) {
- SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
SLOW_DCHECK(object->SizeFromMap(map) == object_size);
CopyAndForwardResult result;
@@ -250,6 +250,9 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(Map* map,
return REMOVE_SLOT;
}
+ SLOW_DCHECK(static_cast<size_t>(object_size) <=
+ MemoryChunkLayout::AllocatableMemoryInDataPage());
+
if (!heap()->ShouldBePromoted(object->address())) {
// A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object.
@@ -277,39 +280,37 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(Map* map,
UNREACHABLE();
}
-SlotCallbackResult Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
- ThinString* object,
+SlotCallbackResult Scavenger::EvacuateThinString(Map map, HeapObjectSlot slot,
+ ThinString object,
int object_size) {
if (!is_incremental_marking_) {
// The ThinString should die after Scavenge, so avoid writing the proper
// forwarding pointer and instead just signal the actual object as forwarded
// reference.
- String* actual = object->actual();
+ String actual = object->actual();
// ThinStrings always refer to internalized strings, which are always in old
// space.
DCHECK(!Heap::InNewSpace(actual));
- *slot = actual;
+ slot.StoreHeapObject(actual);
return REMOVE_SLOT;
}
- return EvacuateObjectDefault(
- map, reinterpret_cast<HeapObjectReference**>(slot), object, object_size);
+ return EvacuateObjectDefault(map, slot, object, object_size);
}
-SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map* map,
- HeapObject** slot,
- ConsString* object,
+SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
+ HeapObjectSlot slot,
+ ConsString object,
int object_size) {
DCHECK(IsShortcutCandidate(map->instance_type()));
if (!is_incremental_marking_ &&
object->unchecked_second() == ReadOnlyRoots(heap()).empty_string()) {
HeapObject* first = HeapObject::cast(object->unchecked_first());
- *slot = first;
+ slot.StoreHeapObject(first);
if (!Heap::InNewSpace(first)) {
- base::AsAtomicPointer::Release_Store(
- reinterpret_cast<Map**>(object->address()),
+ object->map_slot().Release_Store(
MapWord::FromForwardingAddress(first).ToMap());
return REMOVE_SLOT;
}
@@ -318,28 +319,24 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map* map,
if (first_word.IsForwardingAddress()) {
HeapObject* target = first_word.ToForwardingAddress();
- *slot = target;
- base::AsAtomicPointer::Release_Store(
- reinterpret_cast<Map**>(object->address()),
+ slot.StoreHeapObject(target);
+ object->map_slot().Release_Store(
MapWord::FromForwardingAddress(target).ToMap());
return Heap::InToSpace(target) ? KEEP_SLOT : REMOVE_SLOT;
}
- Map* map = first_word.ToMap();
- SlotCallbackResult result = EvacuateObjectDefault(
- map, reinterpret_cast<HeapObjectReference**>(slot), first,
- first->SizeFromMap(map));
- base::AsAtomicPointer::Release_Store(
- reinterpret_cast<Map**>(object->address()),
- MapWord::FromForwardingAddress(*slot).ToMap());
+ Map map = first_word.ToMap();
+ SlotCallbackResult result =
+ EvacuateObjectDefault(map, slot, first, first->SizeFromMap(map));
+ object->map_slot().Release_Store(
+ MapWord::FromForwardingAddress(slot.ToHeapObject()).ToMap());
return result;
}
- return EvacuateObjectDefault(
- map, reinterpret_cast<HeapObjectReference**>(slot), object, object_size);
+ return EvacuateObjectDefault(map, slot, object, object_size);
}
-SlotCallbackResult Scavenger::EvacuateObject(HeapObjectReference** slot,
- Map* map, HeapObject* source) {
+SlotCallbackResult Scavenger::EvacuateObject(HeapObjectSlot slot, Map map,
+ HeapObject* source) {
SLOW_DCHECK(Heap::InFromSpace(source));
SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
int size = source->SizeFromMap(map);
@@ -349,20 +346,19 @@ SlotCallbackResult Scavenger::EvacuateObject(HeapObjectReference** slot,
case kVisitThinString:
// At the moment we don't allow weak pointers to thin strings.
DCHECK(!(*slot)->IsWeak());
- return EvacuateThinString(map, reinterpret_cast<HeapObject**>(slot),
- reinterpret_cast<ThinString*>(source), size);
+ return EvacuateThinString(map, slot, ThinString::unchecked_cast(source),
+ size);
case kVisitShortcutCandidate:
DCHECK(!(*slot)->IsWeak());
// At the moment we don't allow weak pointers to cons strings.
return EvacuateShortcutCandidate(
- map, reinterpret_cast<HeapObject**>(slot),
- reinterpret_cast<ConsString*>(source), size);
+ map, slot, ConsString::unchecked_cast(source), size);
default:
return EvacuateObjectDefault(map, slot, source, size);
}
}
-SlotCallbackResult Scavenger::ScavengeObject(HeapObjectReference** p,
+SlotCallbackResult Scavenger::ScavengeObject(HeapObjectSlot p,
HeapObject* object) {
DCHECK(Heap::InFromSpace(object));
@@ -375,16 +371,20 @@ SlotCallbackResult Scavenger::ScavengeObject(HeapObjectReference** p,
HeapObject* dest = first_word.ToForwardingAddress();
DCHECK(Heap::InFromSpace(*p));
if ((*p)->IsWeak()) {
- *p = HeapObjectReference::Weak(dest);
+ p.store(HeapObjectReference::Weak(dest));
} else {
DCHECK((*p)->IsStrong());
- *p = HeapObjectReference::Strong(dest);
+ p.store(HeapObjectReference::Strong(dest));
}
- DCHECK(Heap::InToSpace(dest) || !Heap::InNewSpace((dest)));
+ DCHECK_IMPLIES(Heap::InNewSpace(dest),
+ (Heap::InToSpace(dest) ||
+ MemoryChunk::FromHeapObject(dest)->owner()->identity() ==
+ NEW_LO_SPACE));
+
return Heap::InToSpace(dest) ? KEEP_SLOT : REMOVE_SLOT;
}
- Map* map = first_word.ToMap();
+ Map map = first_word.ToMap();
// AllocationMementos are unrooted and shouldn't survive a scavenge
DCHECK_NE(ReadOnlyRoots(heap()).allocation_memento_map(), map);
// Call the slow part of scavenge object.
@@ -392,16 +392,16 @@ SlotCallbackResult Scavenger::ScavengeObject(HeapObjectReference** p,
}
SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
- Address slot_address) {
- MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address);
- MaybeObject* object = *slot;
+ MaybeObjectSlot slot) {
+ MaybeObject object = *slot;
if (Heap::InFromSpace(object)) {
HeapObject* heap_object = object->GetHeapObject();
DCHECK(heap_object->IsHeapObject());
- SlotCallbackResult result = ScavengeObject(
- reinterpret_cast<HeapObjectReference**>(slot), heap_object);
- DCHECK_IMPLIES(result == REMOVE_SLOT, !Heap::InNewSpace(*slot));
+ SlotCallbackResult result =
+ ScavengeObject(HeapObjectSlot(slot), heap_object);
+ DCHECK_IMPLIES(result == REMOVE_SLOT,
+ !heap->IsInYoungGeneration((*slot)->GetHeapObject()));
return result;
} else if (Heap::InToSpace(object)) {
// Already updated slot. This can happen when processing of the work list
@@ -413,26 +413,25 @@ SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
return REMOVE_SLOT;
}
-void ScavengeVisitor::VisitPointers(HeapObject* host, Object** start,
- Object** end) {
- for (Object** p = start; p < end; p++) {
+void ScavengeVisitor::VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) {
+ for (ObjectSlot p = start; p < end; ++p) {
Object* object = *p;
if (!Heap::InNewSpace(object)) continue;
- scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
+ scavenger_->ScavengeObject(HeapObjectSlot(p),
reinterpret_cast<HeapObject*>(object));
}
}
-void ScavengeVisitor::VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) {
- for (MaybeObject** p = start; p < end; p++) {
- MaybeObject* object = *p;
+void ScavengeVisitor::VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) {
+ for (MaybeObjectSlot p = start; p < end; ++p) {
+ MaybeObject object = *p;
if (!Heap::InNewSpace(object)) continue;
// Treat the weak reference as strong.
HeapObject* heap_object;
if (object->GetHeapObject(&heap_object)) {
- scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
- heap_object);
+ scavenger_->ScavengeObject(HeapObjectSlot(p), heap_object);
} else {
UNREACHABLE();
}
diff --git a/chromium/v8/src/heap/scavenger.cc b/chromium/v8/src/heap/scavenger.cc
index 4c63ed099aa..43176fb807c 100644
--- a/chromium/v8/src/heap/scavenger.cc
+++ b/chromium/v8/src/heap/scavenger.cc
@@ -76,36 +76,33 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
bool record_slots)
: heap_(heap), scavenger_(scavenger), record_slots_(record_slots) {}
- inline void VisitPointers(HeapObject* host, Object** start,
- Object** end) final {
- for (Object** slot = start; slot < end; ++slot) {
+ inline void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) final {
+ for (ObjectSlot slot = start; slot < end; ++slot) {
Object* target = *slot;
DCHECK(!HasWeakHeapObjectTag(target));
if (target->IsHeapObject()) {
- HandleSlot(host, reinterpret_cast<Address>(slot),
- HeapObject::cast(target));
+ HandleSlot(host, HeapObjectSlot(slot), HeapObject::cast(target));
}
}
}
- inline void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final {
+ inline void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final {
// Treat weak references as strong. TODO(marja): Proper weakness handling in
// the young generation.
- for (MaybeObject** slot = start; slot < end; ++slot) {
- MaybeObject* target = *slot;
+ for (MaybeObjectSlot slot = start; slot < end; ++slot) {
+ MaybeObject target = *slot;
HeapObject* heap_object;
if (target->GetHeapObject(&heap_object)) {
- HandleSlot(host, reinterpret_cast<Address>(slot), heap_object);
+ HandleSlot(host, HeapObjectSlot(slot), heap_object);
}
}
}
- inline void HandleSlot(HeapObject* host, Address slot_address,
+ inline void HandleSlot(HeapObject* host, HeapObjectSlot slot,
HeapObject* target) {
- HeapObjectReference** slot =
- reinterpret_cast<HeapObjectReference**>(slot_address);
- scavenger_->PageMemoryFence(reinterpret_cast<MaybeObject*>(target));
+ scavenger_->PageMemoryFence(MaybeObject::FromObject(target));
if (Heap::InFromSpace(target)) {
SlotCallbackResult result = scavenger_->ScavengeObject(slot, target);
@@ -115,14 +112,15 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
if (result == KEEP_SLOT) {
SLOW_DCHECK(target->IsHeapObject());
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot_address),
- slot_address);
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot.address()),
+ slot.address());
}
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
HeapObject::cast(target)));
} else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
HeapObject::cast(target))) {
- heap_->mark_compact_collector()->RecordSlot(host, slot, target);
+ heap_->mark_compact_collector()->RecordSlot(host, ObjectSlot(slot),
+ target);
}
}
@@ -132,7 +130,7 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
const bool record_slots_;
};
-static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
+static bool IsUnscavengedHeapObject(Heap* heap, ObjectSlot p) {
return Heap::InFromSpace(*p) &&
!HeapObject::cast(*p)->map_word().IsForwardingAddress();
}
@@ -156,13 +154,14 @@ ScavengerCollector::ScavengerCollector(Heap* heap)
: isolate_(heap->isolate()), heap_(heap), parallel_scavenge_semaphore_(0) {}
void ScavengerCollector::CollectGarbage() {
+ DCHECK(surviving_new_large_objects_.empty());
ItemParallelJob job(isolate_->cancelable_task_manager(),
&parallel_scavenge_semaphore_);
const int kMainThreadId = 0;
Scavenger* scavengers[kMaxScavengerTasks];
const bool is_logging = isolate_->LogObjectRelocation();
const int num_scavenge_tasks = NumberOfScavengeTasks();
- OneshotBarrier barrier;
+ OneshotBarrier barrier(base::TimeDelta::FromMilliseconds(kMaxWaitTimeMs));
Scavenger::CopiedList copied_list(num_scavenge_tasks);
Scavenger::PromotionList promotion_list(num_scavenge_tasks);
for (int i = 0; i < num_scavenge_tasks; i++) {
@@ -271,6 +270,11 @@ void ScavengerCollector::CollectGarbage() {
}
heap_->array_buffer_collector()->FreeAllocations();
+ // Since we promote all surviving large objects immediatelly, all remaining
+ // large objects must be dead.
+ // TODO(hpayer): Don't free all as soon as we have an intermediate generation.
+ heap_->new_lo_space()->FreeAllObjects();
+
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(heap_, [](MemoryChunk* chunk) {
if (chunk->SweepingDone()) {
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
@@ -281,24 +285,20 @@ void ScavengerCollector::CollectGarbage() {
// Update how much has survived scavenge.
heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedNewSpaceObjectSize());
-
- // Scavenger may find new wrappers by iterating objects promoted onto a black
- // page.
- heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
}
void ScavengerCollector::HandleSurvivingNewLargeObjects() {
for (SurvivingNewLargeObjectMapEntry update_info :
surviving_new_large_objects_) {
HeapObject* object = update_info.first;
- Map* map = update_info.second;
+ Map map = update_info.second;
// Order is important here. We have to re-install the map to have access
// to meta-data like size during page promotion.
object->set_map_word(MapWord::FromMap(map));
LargePage* page = LargePage::FromHeapObject(object);
heap_->lo_space()->PromoteNewLargeObject(page);
}
- DCHECK(heap_->new_lo_space()->IsEmpty());
+ surviving_new_large_objects_.clear();
}
void ScavengerCollector::MergeSurvivingNewLargeObjects(
@@ -340,7 +340,7 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
is_incremental_marking_(heap->incremental_marking()->IsMarking()),
is_compacting_(heap->incremental_marking()->IsCompacting()) {}
-void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, Map* map,
+void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, Map map,
int size) {
// We are not collecting slots on new space objects during mutation thus we
// have to scan for pointers to evacuation candidates when we promote
@@ -365,18 +365,18 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
}
void Scavenger::ScavengePage(MemoryChunk* page) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Scavenger::ScavengePage");
CodePageMemoryModificationScope memory_modification_scope(page);
- RememberedSet<OLD_TO_NEW>::Iterate(
- page,
- [this](Address addr) { return CheckAndScavengeObject(heap_, addr); },
- SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(page,
+ [this](MaybeObjectSlot addr) {
+ return CheckAndScavengeObject(heap_,
+ addr);
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::IterateTyped(
page, [this](SlotType type, Address host_addr, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap_, type, addr, [this](MaybeObject** addr) {
- return CheckAndScavengeObject(heap(),
- reinterpret_cast<Address>(addr));
+ heap_, type, addr, [this](MaybeObjectSlot slot) {
+ return CheckAndScavengeObject(heap(), slot);
});
});
@@ -384,7 +384,6 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
}
void Scavenger::Process(OneshotBarrier* barrier) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Scavenger::Process");
ScavengeVisitor scavenge_visitor(this);
const bool have_barrier = barrier != nullptr;
@@ -428,23 +427,23 @@ void Scavenger::Finalize() {
}
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
- Object** p) {
+ ObjectSlot p) {
DCHECK(!HasWeakHeapObjectTag(*p));
ScavengePointer(p);
}
void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
- Object** start, Object** end) {
+ ObjectSlot start, ObjectSlot end) {
// Copy all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) ScavengePointer(p);
+ for (ObjectSlot p = start; p < end; ++p) ScavengePointer(p);
}
-void RootScavengeVisitor::ScavengePointer(Object** p) {
+void RootScavengeVisitor::ScavengePointer(ObjectSlot p) {
Object* object = *p;
DCHECK(!HasWeakHeapObjectTag(object));
if (!Heap::InNewSpace(object)) return;
- scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
+ scavenger_->ScavengeObject(HeapObjectSlot(p),
reinterpret_cast<HeapObject*>(object));
}
diff --git a/chromium/v8/src/heap/scavenger.h b/chromium/v8/src/heap/scavenger.h
index b984102c6b6..d9cce8c701b 100644
--- a/chromium/v8/src/heap/scavenger.h
+++ b/chromium/v8/src/heap/scavenger.h
@@ -23,12 +23,13 @@ enum class CopyAndForwardResult {
};
using ObjectAndSize = std::pair<HeapObject*, int>;
-using SurvivingNewLargeObjectsMap = std::unordered_map<HeapObject*, Map*>;
-using SurvivingNewLargeObjectMapEntry = std::pair<HeapObject*, Map*>;
+using SurvivingNewLargeObjectsMap = std::unordered_map<HeapObject*, Map>;
+using SurvivingNewLargeObjectMapEntry = std::pair<HeapObject*, Map>;
class ScavengerCollector {
public:
static const int kMaxScavengerTasks = 8;
+ static const int kMaxWaitTimeMs = 2;
explicit ScavengerCollector(Heap* heap);
@@ -54,7 +55,7 @@ class Scavenger {
public:
struct PromotionListEntry {
HeapObject* heap_object;
- Map* map;
+ Map map;
int size;
};
@@ -66,7 +67,7 @@ class Scavenger {
: promotion_list_(promotion_list), task_id_(task_id) {}
inline void PushRegularObject(HeapObject* object, int size);
- inline void PushLargeObject(HeapObject* object, Map* map, int size);
+ inline void PushLargeObject(HeapObject* object, Map map, int size);
inline bool IsEmpty();
inline size_t LocalPushSegmentSize();
inline bool Pop(struct PromotionListEntry* entry);
@@ -83,7 +84,7 @@ class Scavenger {
large_object_promotion_list_(num_tasks) {}
inline void PushRegularObject(int task_id, HeapObject* object, int size);
- inline void PushLargeObject(int task_id, HeapObject* object, Map* map,
+ inline void PushLargeObject(int task_id, HeapObject* object, Map map,
int size);
inline bool IsEmpty();
inline size_t LocalPushSegmentSize(int task_id);
@@ -134,59 +135,58 @@ class Scavenger {
inline Heap* heap() { return heap_; }
- inline void PageMemoryFence(MaybeObject* object);
+ inline void PageMemoryFence(MaybeObject object);
void AddPageToSweeperIfNecessary(MemoryChunk* page);
- // Potentially scavenges an object referenced from |slot_address| if it is
+ // Potentially scavenges an object referenced from |slot| if it is
// indeed a HeapObject and resides in from space.
inline SlotCallbackResult CheckAndScavengeObject(Heap* heap,
- Address slot_address);
+ MaybeObjectSlot slot);
// Scavenges an object |object| referenced from slot |p|. |object| is required
// to be in from space.
- inline SlotCallbackResult ScavengeObject(HeapObjectReference** p,
+ inline SlotCallbackResult ScavengeObject(HeapObjectSlot p,
HeapObject* object);
// Copies |source| to |target| and sets the forwarding pointer in |source|.
- V8_INLINE bool MigrateObject(Map* map, HeapObject* source, HeapObject* target,
+ V8_INLINE bool MigrateObject(Map map, HeapObject* source, HeapObject* target,
int size);
V8_INLINE SlotCallbackResult
RememberedSetEntryNeeded(CopyAndForwardResult result);
- V8_INLINE CopyAndForwardResult SemiSpaceCopyObject(Map* map,
- HeapObjectReference** slot,
+ V8_INLINE CopyAndForwardResult SemiSpaceCopyObject(Map map,
+ HeapObjectSlot slot,
HeapObject* object,
int object_size);
- V8_INLINE CopyAndForwardResult PromoteObject(Map* map,
- HeapObjectReference** slot,
+ V8_INLINE CopyAndForwardResult PromoteObject(Map map, HeapObjectSlot slot,
HeapObject* object,
int object_size);
- V8_INLINE SlotCallbackResult EvacuateObject(HeapObjectReference** slot,
- Map* map, HeapObject* source);
+ V8_INLINE SlotCallbackResult EvacuateObject(HeapObjectSlot slot, Map map,
+ HeapObject* source);
- V8_INLINE bool HandleLargeObject(Map* map, HeapObject* object,
+ V8_INLINE bool HandleLargeObject(Map map, HeapObject* object,
int object_size);
// Different cases for object evacuation.
- V8_INLINE SlotCallbackResult EvacuateObjectDefault(Map* map,
- HeapObjectReference** slot,
+ V8_INLINE SlotCallbackResult EvacuateObjectDefault(Map map,
+ HeapObjectSlot slot,
HeapObject* object,
int object_size);
- inline SlotCallbackResult EvacuateThinString(Map* map, HeapObject** slot,
- ThinString* object,
+ inline SlotCallbackResult EvacuateThinString(Map map, HeapObjectSlot slot,
+ ThinString object,
int object_size);
- inline SlotCallbackResult EvacuateShortcutCandidate(Map* map,
- HeapObject** slot,
- ConsString* object,
+ inline SlotCallbackResult EvacuateShortcutCandidate(Map map,
+ HeapObjectSlot slot,
+ ConsString object,
int object_size);
- void IterateAndScavengePromotedObject(HeapObject* target, Map* map, int size);
+ void IterateAndScavengePromotedObject(HeapObject* target, Map map, int size);
static inline bool ContainsOnlyData(VisitorId visitor_id);
@@ -214,12 +214,12 @@ class RootScavengeVisitor final : public RootVisitor {
public:
explicit RootScavengeVisitor(Scavenger* scavenger);
- void VisitRootPointer(Root root, const char* description, Object** p) final;
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) final;
+ void VisitRootPointer(Root root, const char* description, ObjectSlot p) final;
+ void VisitRootPointers(Root root, const char* description, ObjectSlot start,
+ ObjectSlot end) final;
private:
- void ScavengePointer(Object** p);
+ void ScavengePointer(ObjectSlot p);
Scavenger* const scavenger_;
};
@@ -228,10 +228,10 @@ class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
public:
explicit ScavengeVisitor(Scavenger* scavenger);
- V8_INLINE void VisitPointers(HeapObject* host, Object** start,
- Object** end) final;
- V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
- MaybeObject** end) final;
+ V8_INLINE void VisitPointers(HeapObject* host, ObjectSlot start,
+ ObjectSlot end) final;
+ V8_INLINE void VisitPointers(HeapObject* host, MaybeObjectSlot start,
+ MaybeObjectSlot end) final;
private:
Scavenger* const scavenger_;
diff --git a/chromium/v8/src/heap/setup-heap-internal.cc b/chromium/v8/src/heap/setup-heap-internal.cc
index 5790b82907a..4ac09a032b4 100644
--- a/chromium/v8/src/heap/setup-heap-internal.cc
+++ b/chromium/v8/src/heap/setup-heap-internal.cc
@@ -5,7 +5,6 @@
#include "src/setup-isolate.h"
#include "src/accessors.h"
-#include "src/ast/context-slot-cache.h"
#include "src/compilation-cache.h"
#include "src/contexts.h"
#include "src/heap-symbols.h"
@@ -21,14 +20,17 @@
#include "src/objects/debug-objects.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
+#include "src/objects/instance-type-inl.h"
+#include "src/objects/js-generator.h"
+#include "src/objects/js-weak-refs.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/map.h"
-#include "src/objects/microtask-queue.h"
#include "src/objects/microtask.h"
#include "src/objects/module.h"
#include "src/objects/promise.h"
#include "src/objects/script.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/smi.h"
#include "src/objects/stack-frame-info.h"
#include "src/objects/string.h"
#include "src/regexp/jsregexp.h"
@@ -108,7 +110,7 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
result->set_map_after_allocation(ReadOnlyRoots(this).meta_map(),
SKIP_WRITE_BARRIER);
- Map* map = isolate()->factory()->InitializeMap(
+ Map map = isolate()->factory()->InitializeMap(
Map::cast(result), instance_type, instance_size, elements_kind,
inobject_properties);
@@ -121,9 +123,10 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
AllocationResult allocation = AllocateRaw(Map::kSize, RO_SPACE);
if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
- Map* map = reinterpret_cast<Map*>(result);
+ Map map = Map::unchecked_cast(result);
map->set_map_after_allocation(
- reinterpret_cast<Map*>(root(RootIndex::kMetaMap)), SKIP_WRITE_BARRIER);
+ Map::unchecked_cast(isolate()->root(RootIndex::kMetaMap)),
+ SKIP_WRITE_BARRIER);
map->set_instance_type(instance_type);
map->set_instance_size(instance_size);
// Initialize to only containing tagged fields.
@@ -147,10 +150,10 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
return map;
}
-void Heap::FinalizePartialMap(Map* map) {
+void Heap::FinalizePartialMap(Map map) {
ReadOnlyRoots roots(this);
map->set_dependent_code(DependentCode::cast(roots.empty_weak_fixed_array()));
- map->set_raw_transitions(MaybeObject::FromSmi(Smi::kZero));
+ map->set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
map->set_instance_descriptors(roots.empty_descriptor_array());
if (FLAG_unbox_double_fields) {
map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
@@ -159,7 +162,7 @@ void Heap::FinalizePartialMap(Map* map) {
map->set_constructor_or_backpointer(roots.null_value());
}
-AllocationResult Heap::Allocate(Map* map, AllocationSpace space) {
+AllocationResult Heap::Allocate(Map map, AllocationSpace space) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
HeapObject* result = nullptr;
@@ -185,7 +188,7 @@ AllocationResult Heap::AllocateEmptyFixedTypedArray(
object->set_map_after_allocation(
ReadOnlyRoots(this).MapForFixedTypedArray(array_type),
SKIP_WRITE_BARRIER);
- FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
+ FixedTypedArrayBase elements = FixedTypedArrayBase::cast(object);
elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
elements->set_external_pointer(
reinterpret_cast<void*>(
@@ -202,7 +205,7 @@ bool Heap::CreateInitialMaps() {
if (!allocation.To(&obj)) return false;
}
// Map::cast cannot be used due to uninitialized map field.
- Map* new_meta_map = reinterpret_cast<Map*>(obj);
+ Map new_meta_map = Map::unchecked_cast(obj);
set_meta_map(new_meta_map);
new_meta_map->set_map_after_allocation(new_meta_map);
@@ -210,7 +213,7 @@ bool Heap::CreateInitialMaps() {
{ // Partial map allocation
#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
{ \
- Map* map; \
+ Map map; \
if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
set_##field_name##_map(map); \
}
@@ -290,9 +293,9 @@ bool Heap::CreateInitialMaps() {
// Setup the struct maps first (needed for the EnumCache).
for (unsigned i = 0; i < arraysize(struct_table); i++) {
const StructTable& entry = struct_table[i];
- Map* map;
+ Map map;
if (!AllocatePartialMap(entry.type, entry.size).To(&map)) return false;
- roots_[entry.index] = map;
+ roots_table()[entry.index] = map;
}
// Allocate the empty enum cache.
@@ -306,19 +309,14 @@ bool Heap::CreateInitialMaps() {
// Allocate the empty descriptor array.
{
- STATIC_ASSERT(DescriptorArray::kFirstIndex != 0);
- int length = DescriptorArray::kFirstIndex;
- int size = WeakFixedArray::SizeFor(length);
+ int size = DescriptorArray::SizeFor(0);
if (!AllocateRaw(size, RO_SPACE).To(&obj)) return false;
obj->set_map_after_allocation(roots.descriptor_array_map(),
SKIP_WRITE_BARRIER);
- DescriptorArray::cast(obj)->set_length(length);
+ DescriptorArray* array = DescriptorArray::cast(obj);
+ array->Initialize(roots.empty_enum_cache(), roots.undefined_value(), 0, 0);
}
set_empty_descriptor_array(DescriptorArray::cast(obj));
- DescriptorArray::cast(obj)->SetNumberOfDescriptors(0);
- WeakFixedArray::cast(obj)->Set(
- DescriptorArray::kEnumCacheIndex,
- MaybeObject::FromObject(roots.empty_enum_cache()));
// Fix the instance_descriptors for the existing maps.
FinalizePartialMap(roots.meta_map());
@@ -334,16 +332,16 @@ bool Heap::CreateInitialMaps() {
FinalizePartialMap(roots.the_hole_map());
for (unsigned i = 0; i < arraysize(struct_table); ++i) {
const StructTable& entry = struct_table[i];
- FinalizePartialMap(Map::cast(roots_[entry.index]));
+ FinalizePartialMap(Map::cast(roots_table()[entry.index]));
}
{ // Map allocation
#define ALLOCATE_MAP(instance_type, size, field_name) \
- { \
- Map* map; \
- if (!AllocateMap((instance_type), size).To(&map)) return false; \
- set_##field_name##_map(map); \
- }
+ { \
+ Map map; \
+ if (!AllocateMap((instance_type), size).To(&map)) return false; \
+ set_##field_name##_map(map); \
+ }
#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
@@ -380,24 +378,21 @@ bool Heap::CreateInitialMaps() {
for (unsigned i = 0; i < arraysize(string_type_table); i++) {
const StringTypeTable& entry = string_type_table[i];
- {
- AllocationResult allocation = AllocateMap(entry.type, entry.size);
- if (!allocation.To(&obj)) return false;
- }
- Map* map = Map::cast(obj);
+ Map map;
+ if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
// Mark cons string maps as unstable, because their objects can change
// maps during GC.
if (StringShape(entry.type).IsCons()) map->mark_unstable();
- roots_[entry.index] = map;
+ roots_table()[entry.index] = map;
}
{ // Create a separate external one byte string map for native sources.
+ Map map;
AllocationResult allocation =
AllocateMap(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE,
ExternalOneByteString::kUncachedSize);
- if (!allocation.To(&obj)) return false;
- Map* map = Map::cast(obj);
+ if (!allocation.To(&map)) return false;
map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
set_native_source_string_map(map);
}
@@ -411,6 +406,8 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(PROPERTY_ARRAY_TYPE, property_array)
ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_MAP_TYPE, small_ordered_hash_map)
ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_HASH_SET_TYPE, small_ordered_hash_set)
+ ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_NAME_DICTIONARY_TYPE,
+ small_ordered_name_dictionary)
#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype) \
ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
@@ -425,7 +422,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell);
{
// The invalid_prototype_validity_cell is needed for JSObject maps.
- Smi* value = Smi::FromInt(Map::kPrototypeChainInvalid);
+ Smi value = Smi::FromInt(Map::kPrototypeChainInvalid);
AllocationResult alloc = AllocateRaw(Cell::kSize, OLD_SPACE);
if (!alloc.To(&obj)) return false;
obj->set_map_after_allocation(roots.cell_map(), SKIP_WRITE_BARRIER);
@@ -444,12 +441,15 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, one_closure_cell)
roots.one_closure_cell_map()->mark_unstable();
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, many_closures_cell)
+ ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, no_feedback_cell)
+ roots.no_feedback_cell_map()->mark_unstable();
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, hash_table)
ALLOCATE_VARSIZE_MAP(ORDERED_HASH_MAP_TYPE, ordered_hash_map)
ALLOCATE_VARSIZE_MAP(ORDERED_HASH_SET_TYPE, ordered_hash_set)
+ ALLOCATE_VARSIZE_MAP(ORDERED_NAME_DICTIONARY_TYPE, ordered_name_dictionary)
ALLOCATE_VARSIZE_MAP(NAME_DICTIONARY_TYPE, name_dictionary)
ALLOCATE_VARSIZE_MAP(GLOBAL_DICTIONARY_TYPE, global_dictionary)
ALLOCATE_VARSIZE_MAP(NUMBER_DICTIONARY_TYPE, number_dictionary)
@@ -457,6 +457,7 @@ bool Heap::CreateInitialMaps() {
simple_number_dictionary)
ALLOCATE_VARSIZE_MAP(STRING_TABLE_TYPE, string_table)
+ ALLOCATE_VARSIZE_MAP(EMBEDDER_DATA_ARRAY_TYPE, embedder_data_array)
ALLOCATE_VARSIZE_MAP(EPHEMERON_HASH_TABLE_TYPE, ephemeron_hash_table)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, array_list)
@@ -475,8 +476,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
object_boilerplate_description)
- ALLOCATE_VARSIZE_MAP(NATIVE_CONTEXT_TYPE, native_context)
- roots.native_context_map()->set_visitor_id(kVisitNativeContext);
+ ALLOCATE_MAP(NATIVE_CONTEXT_TYPE, NativeContext::kSize, native_context)
ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
side_effect_call_handler_info)
@@ -499,7 +499,8 @@ bool Heap::CreateInitialMaps() {
code_data_container)
ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
- ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
+ ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kEmbedderDataSlotSize,
+ external)
external_map()->set_is_extensible(false);
#undef ALLOCATE_PRIMITIVE_MAP
#undef ALLOCATE_VARSIZE_MAP
@@ -574,12 +575,13 @@ bool Heap::CreateInitialMaps() {
set_empty_property_array(PropertyArray::cast(obj));
}
-#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype) \
- { \
- FixedTypedArrayBase* obj; \
- if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
- return false; \
- set_empty_fixed_##type##_array(obj); \
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype) \
+ { \
+ FixedTypedArrayBase obj; \
+ if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) { \
+ return false; \
+ } \
+ set_empty_fixed_##type##_array(obj); \
}
TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
@@ -622,9 +624,15 @@ void Heap::CreateInitialObjects() {
set_minus_infinity_value(
*factory->NewHeapNumber(-V8_INFINITY, TENURED_READ_ONLY));
- set_hash_seed(*factory->NewByteArray(kInt64Size, TENURED));
+ set_hash_seed(*factory->NewByteArray(kInt64Size, TENURED_READ_ONLY));
InitializeHashSeed();
+ // There's no "current microtask" in the beginning.
+ set_current_microtask(roots.undefined_value());
+
+ set_dirty_js_weak_factories(roots.undefined_value());
+ set_weak_refs_keep_during_job(roots.undefined_value());
+
// Allocate cache for single character one byte strings.
set_single_character_string_cache(
*factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
@@ -635,7 +643,7 @@ void Heap::CreateInitialObjects() {
for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
Handle<String> str =
factory->InternalizeUtf8String(constant_string_table[i].contents);
- roots_[constant_string_table[i].index] = *str;
+ roots_table()[constant_string_table[i].index] = *str;
}
// Allocate
@@ -695,6 +703,8 @@ void Heap::CreateInitialObjects() {
set_self_reference_marker(
*factory->NewSelfReferenceMarker(TENURED_READ_ONLY));
+ set_interpreter_entry_trampoline_for_profiling(roots.undefined_value());
+
// Create the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
set_code_stubs(*SimpleNumberDictionary::New(isolate(), 128));
@@ -705,7 +715,7 @@ void Heap::CreateInitialObjects() {
{ \
Handle<Symbol> symbol( \
isolate()->factory()->NewPrivateSymbol(TENURED_READ_ONLY)); \
- roots_[RootIndex::k##name] = *symbol; \
+ roots_table()[RootIndex::k##name] = *symbol; \
}
PRIVATE_SYMBOL_LIST_GENERATOR(SYMBOL_INIT, /* not used */)
#undef SYMBOL_INIT
@@ -713,22 +723,20 @@ void Heap::CreateInitialObjects() {
{
HandleScope scope(isolate());
-#define SYMBOL_INIT(_, name, description) \
- Handle<Symbol> name = factory->NewSymbol(TENURED_READ_ONLY); \
- Handle<String> name##d = \
- factory->NewStringFromStaticChars(#description, TENURED_READ_ONLY); \
- name->set_name(*name##d); \
- roots_[RootIndex::k##name] = *name;
+#define SYMBOL_INIT(_, name, description) \
+ Handle<Symbol> name = factory->NewSymbol(TENURED_READ_ONLY); \
+ Handle<String> name##d = factory->InternalizeUtf8String(#description); \
+ name->set_name(*name##d); \
+ roots_table()[RootIndex::k##name] = *name;
PUBLIC_SYMBOL_LIST_GENERATOR(SYMBOL_INIT, /* not used */)
#undef SYMBOL_INIT
-#define SYMBOL_INIT(_, name, description) \
- Handle<Symbol> name = factory->NewSymbol(TENURED_READ_ONLY); \
- Handle<String> name##d = \
- factory->NewStringFromStaticChars(#description, TENURED_READ_ONLY); \
- name->set_is_well_known_symbol(true); \
- name->set_name(*name##d); \
- roots_[RootIndex::k##name] = *name;
+#define SYMBOL_INIT(_, name, description) \
+ Handle<Symbol> name = factory->NewSymbol(TENURED_READ_ONLY); \
+ Handle<String> name##d = factory->InternalizeUtf8String(#description); \
+ name->set_is_well_known_symbol(true); \
+ name->set_name(*name##d); \
+ roots_table()[RootIndex::k##name] = *name;
WELL_KNOWN_SYMBOL_LIST_GENERATOR(SYMBOL_INIT, /* not used */)
#undef SYMBOL_INIT
@@ -736,8 +744,8 @@ void Heap::CreateInitialObjects() {
to_string_tag_symbol->set_is_interesting_symbol(true);
}
- Handle<NameDictionary> empty_property_dictionary =
- NameDictionary::New(isolate(), 1, TENURED, USE_CUSTOM_MINIMUM_CAPACITY);
+ Handle<NameDictionary> empty_property_dictionary = NameDictionary::New(
+ isolate(), 1, TENURED_READ_ONLY, USE_CUSTOM_MINIMUM_CAPACITY);
DCHECK(!empty_property_dictionary->HasSufficientCapacityToAdd(1));
set_empty_property_dictionary(*empty_property_dictionary);
@@ -759,7 +767,9 @@ void Heap::CreateInitialObjects() {
factory->NewManyClosuresCell(factory->undefined_value());
set_many_closures_cell(*many_closures_cell);
- set_default_microtask_queue(*factory->NewMicrotaskQueue());
+ // Allocate FeedbackCell for cases where we don't collect feedback.
+ Handle<FeedbackCell> no_feedback_cell = factory->NewNoFeedbackCell();
+ set_no_feedback_cell(*no_feedback_cell);
{
Handle<FixedArray> empty_sloppy_arguments_elements =
@@ -788,7 +798,7 @@ void Heap::CreateInitialObjects() {
// Handling of script id generation is in Heap::NextScriptId().
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
set_last_debugging_id(Smi::FromInt(DebugInfo::kNoDebuggingId));
- set_next_template_serial_number(Smi::kZero);
+ set_next_template_serial_number(Smi::zero());
// Allocate the empty OrderedHashMap.
Handle<FixedArray> empty_ordered_hash_map = factory->NewFixedArray(
@@ -839,6 +849,14 @@ void Heap::CreateInitialObjects() {
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_iterator_protector(*cell);
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_map_iterator_protector(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_set_iterator_protector(*cell);
+
Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
@@ -857,6 +875,10 @@ void Heap::CreateInitialObjects() {
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_regexp_species_protector(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_string_iterator_protector(*cell);
Handle<Cell> string_length_overflow_cell = factory->NewCell(
@@ -884,6 +906,9 @@ void Heap::CreateInitialObjects() {
set_noscript_shared_function_infos(roots.empty_weak_array_list());
+ set_off_heap_trampoline_relocation_info(
+ *Builtins::GenerateOffHeapTrampolineRelocInfo(isolate_));
+
// Evaluate the hash values which will then be cached in the strings.
isolate()->factory()->zero_string()->Hash();
isolate()->factory()->one_string()->Hash();
@@ -891,9 +916,6 @@ void Heap::CreateInitialObjects() {
// Initialize builtins constants table.
set_builtins_constants_table(roots.empty_fixed_array());
- // Initialize context slot cache.
- isolate_->context_slot_cache()->Clear();
-
// Initialize descriptor cache.
isolate_->descriptor_lookup_cache()->Clear();
@@ -908,15 +930,15 @@ void Heap::CreateInternalAccessorInfoObjects() {
#define INIT_ACCESSOR_INFO(_, accessor_name, AccessorName, ...) \
acessor_info = Accessors::Make##AccessorName##Info(isolate); \
- roots_[RootIndex::k##AccessorName##Accessor] = *acessor_info;
+ roots_table()[RootIndex::k##AccessorName##Accessor] = *acessor_info;
ACCESSOR_INFO_LIST_GENERATOR(INIT_ACCESSOR_INFO, /* not used */)
#undef INIT_ACCESSOR_INFO
#define INIT_SIDE_EFFECT_FLAG(_, accessor_name, AccessorName, GetterType, \
SetterType) \
- AccessorInfo::cast(roots_[RootIndex::k##AccessorName##Accessor]) \
+ AccessorInfo::cast(roots_table()[RootIndex::k##AccessorName##Accessor]) \
->set_getter_side_effect_type(SideEffectType::GetterType); \
- AccessorInfo::cast(roots_[RootIndex::k##AccessorName##Accessor]) \
+ AccessorInfo::cast(roots_table()[RootIndex::k##AccessorName##Accessor]) \
->set_setter_side_effect_type(SideEffectType::SetterType);
ACCESSOR_INFO_LIST_GENERATOR(INIT_SIDE_EFFECT_FLAG, /* not used */)
#undef INIT_SIDE_EFFECT_FLAG
diff --git a/chromium/v8/src/heap/slot-set.cc b/chromium/v8/src/heap/slot-set.cc
new file mode 100644
index 00000000000..f908b64b3a4
--- /dev/null
+++ b/chromium/v8/src/heap/slot-set.cc
@@ -0,0 +1,100 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/slot-set.h"
+
+namespace v8 {
+namespace internal {
+
+TypedSlots::~TypedSlots() {
+ Chunk* chunk = head_;
+ while (chunk != nullptr) {
+ Chunk* next = chunk->next;
+ delete[] chunk->buffer;
+ delete chunk;
+ chunk = next;
+ }
+ head_ = nullptr;
+ tail_ = nullptr;
+}
+
+void TypedSlots::Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
+ TypedSlot slot = {TypeField::encode(type) | OffsetField::encode(offset),
+ host_offset};
+ Chunk* chunk = EnsureChunk();
+ DCHECK_LT(chunk->count, chunk->capacity);
+ chunk->buffer[chunk->count] = slot;
+ ++chunk->count;
+}
+
+void TypedSlots::Merge(TypedSlots* other) {
+ if (other->head_ == nullptr) {
+ return;
+ }
+ if (head_ == nullptr) {
+ head_ = other->head_;
+ tail_ = other->tail_;
+ } else {
+ tail_->next = other->head_;
+ tail_ = other->tail_;
+ }
+ other->head_ = nullptr;
+ other->tail_ = nullptr;
+}
+
+TypedSlots::Chunk* TypedSlots::EnsureChunk() {
+ if (!head_) {
+ head_ = tail_ = NewChunk(nullptr, kInitialBufferSize);
+ }
+ if (head_->count == head_->capacity) {
+ head_ = NewChunk(head_, NextCapacity(head_->capacity));
+ }
+ return head_;
+}
+
+TypedSlots::Chunk* TypedSlots::NewChunk(Chunk* next, int capacity) {
+ Chunk* chunk = new Chunk;
+ chunk->next = next;
+ chunk->buffer = new TypedSlot[capacity];
+ chunk->capacity = capacity;
+ chunk->count = 0;
+ return chunk;
+}
+
+TypedSlotSet::~TypedSlotSet() { FreeToBeFreedChunks(); }
+
+void TypedSlotSet::FreeToBeFreedChunks() {
+ base::MutexGuard guard(&to_be_freed_chunks_mutex_);
+ std::stack<std::unique_ptr<Chunk>> empty;
+ to_be_freed_chunks_.swap(empty);
+}
+
+void TypedSlotSet::ClearInvalidSlots(
+ const std::map<uint32_t, uint32_t>& invalid_ranges) {
+ Chunk* chunk = LoadHead();
+ while (chunk != nullptr) {
+ TypedSlot* buffer = chunk->buffer;
+ int count = chunk->count;
+ for (int i = 0; i < count; i++) {
+ TypedSlot slot = LoadTypedSlot(buffer + i);
+ SlotType type = TypeField::decode(slot.type_and_offset);
+ if (type == CLEARED_SLOT) continue;
+ uint32_t host_offset = slot.host_offset;
+ std::map<uint32_t, uint32_t>::const_iterator upper_bound =
+ invalid_ranges.upper_bound(host_offset);
+ if (upper_bound == invalid_ranges.begin()) continue;
+ // upper_bounds points to the invalid range after the given slot. Hence,
+ // we have to go to the previous element.
+ upper_bound--;
+ DCHECK_LE(upper_bound->first, host_offset);
+ if (upper_bound->second > host_offset) {
+ ClearTypedSlot(buffer + i);
+ }
+ }
+ chunk = LoadNext(chunk);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/slot-set.h b/chromium/v8/src/heap/slot-set.h
index 7423665bcbf..c67eca3e16f 100644
--- a/chromium/v8/src/heap/slot-set.h
+++ b/chromium/v8/src/heap/slot-set.h
@@ -182,8 +182,8 @@ class SlotSet : public Malloced {
// This method should only be called on the main thread.
//
// Sample usage:
- // Iterate([](Address slot_address) {
- // if (good(slot_address)) return KEEP_SLOT;
+ // Iterate([](MaybeObjectSlot slot) {
+ // if (good(slot)) return KEEP_SLOT;
// else return REMOVE_SLOT;
// });
template <typename Callback>
@@ -203,7 +203,7 @@ class SlotSet : public Malloced {
int bit_offset = base::bits::CountTrailingZeros(cell);
uint32_t bit_mask = 1u << bit_offset;
uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
- if (callback(page_start_ + slot) == KEEP_SLOT) {
+ if (callback(MaybeObjectSlot(page_start_ + slot)) == KEEP_SLOT) {
++in_bucket_count;
} else {
mask |= bit_mask;
@@ -226,7 +226,7 @@ class SlotSet : public Malloced {
}
int NumberOfPreFreedEmptyBuckets() {
- base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
+ base::MutexGuard guard(&to_be_freed_buckets_mutex_);
return static_cast<int>(to_be_freed_buckets_.size());
}
@@ -253,7 +253,7 @@ class SlotSet : public Malloced {
}
void FreeToBeFreedBuckets() {
- base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
+ base::MutexGuard guard(&to_be_freed_buckets_mutex_);
while (!to_be_freed_buckets_.empty()) {
Bucket top = to_be_freed_buckets_.top();
to_be_freed_buckets_.pop();
@@ -294,7 +294,7 @@ class SlotSet : public Malloced {
void PreFreeEmptyBucket(int bucket_index) {
Bucket bucket = LoadBucket(&buckets_[bucket_index]);
if (bucket != nullptr) {
- base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
+ base::MutexGuard guard(&to_be_freed_buckets_mutex_);
to_be_freed_buckets_.push(bucket);
StoreBucket(&buckets_[bucket_index], nullptr);
}
@@ -392,101 +392,58 @@ enum SlotType {
CLEARED_SLOT
};
-// Data structure for maintaining a multiset of typed slots in a page.
+// Data structure for maintaining a list of typed slots in a page.
// Typed slots can only appear in Code and JSFunction objects, so
// the maximum possible offset is limited by the LargePage::kMaxCodePageSize.
// The implementation is a chain of chunks, where each chunks is an array of
// encoded (slot type, slot offset) pairs.
// There is no duplicate detection and we do not expect many duplicates because
// typed slots contain V8 internal pointers that are not directly exposed to JS.
-class TypedSlotSet {
+class TypedSlots {
public:
- enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
-
- typedef std::pair<SlotType, uint32_t> TypeAndOffset;
+ static const int kMaxOffset = 1 << 29;
+ TypedSlots() = default;
+ virtual ~TypedSlots();
+ V8_EXPORT_PRIVATE void Insert(SlotType type, uint32_t host_offset,
+ uint32_t offset);
+ V8_EXPORT_PRIVATE void Merge(TypedSlots* other);
+ protected:
+ class OffsetField : public BitField<int, 0, 29> {};
+ class TypeField : public BitField<SlotType, 29, 3> {};
struct TypedSlot {
- TypedSlot() : type_and_offset_(0), host_offset_(0) {}
-
- TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset)
- : type_and_offset_(TypeField::encode(type) |
- OffsetField::encode(offset)),
- host_offset_(host_offset) {}
-
- bool operator==(const TypedSlot other) {
- return type_and_offset() == other.type_and_offset() &&
- host_offset() == other.host_offset();
- }
-
- bool operator!=(const TypedSlot other) { return !(*this == other); }
-
- SlotType type() const { return TypeField::decode(type_and_offset()); }
-
- uint32_t offset() const { return OffsetField::decode(type_and_offset()); }
-
- TypeAndOffset GetTypeAndOffset() const {
- uint32_t t_and_o = type_and_offset();
- return std::make_pair(TypeField::decode(t_and_o),
- OffsetField::decode(t_and_o));
- }
-
- uint32_t type_and_offset() const {
- return base::AsAtomic32::Acquire_Load(&type_and_offset_);
- }
-
- uint32_t host_offset() const {
- return base::AsAtomic32::Acquire_Load(&host_offset_);
- }
-
- void Set(TypedSlot slot) {
- base::AsAtomic32::Release_Store(&type_and_offset_,
- slot.type_and_offset());
- base::AsAtomic32::Release_Store(&host_offset_, slot.host_offset());
- }
-
- void Clear() {
- base::AsAtomic32::Release_Store(
- &type_and_offset_,
- TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
- base::AsAtomic32::Release_Store(&host_offset_, 0);
- }
-
- uint32_t type_and_offset_;
- uint32_t host_offset_;
+ uint32_t type_and_offset;
+ uint32_t host_offset;
};
- static const int kMaxOffset = 1 << 29;
+ struct Chunk {
+ Chunk* next;
+ TypedSlot* buffer;
+ int32_t capacity;
+ int32_t count;
+ };
+ static const int kInitialBufferSize = 100;
+ static const int kMaxBufferSize = 16 * KB;
+ static int NextCapacity(int capacity) {
+ return Min(kMaxBufferSize, capacity * 2);
+ }
+ Chunk* EnsureChunk();
+ Chunk* NewChunk(Chunk* next, int capacity);
+ Chunk* head_ = nullptr;
+ Chunk* tail_ = nullptr;
+};
- explicit TypedSlotSet(Address page_start)
- : page_start_(page_start), top_(new Chunk(nullptr, kInitialBufferSize)) {}
+// A multiset of per-page typed slots that allows concurrent iteration
+// clearing of invalid slots.
+class V8_EXPORT_PRIVATE TypedSlotSet : public TypedSlots {
+ public:
+ // The PREFREE_EMPTY_CHUNKS indicates that chunks detected as empty
+ // during the iteration are queued in to_be_freed_chunks_, which are
+ // then freed in FreeToBeFreedChunks.
+ enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS };
- ~TypedSlotSet() {
- Chunk* chunk = load_top();
- while (chunk != nullptr) {
- Chunk* n = chunk->next();
- delete chunk;
- chunk = n;
- }
- FreeToBeFreedChunks();
- }
+ explicit TypedSlotSet(Address page_start) : page_start_(page_start) {}
- // The slot offset specifies a slot at address page_start_ + offset.
- // This method can only be called on the main thread.
- void Insert(SlotType type, uint32_t host_offset, uint32_t offset) {
- TypedSlot slot(type, host_offset, offset);
- Chunk* top_chunk = load_top();
- if (!top_chunk) {
- top_chunk = new Chunk(nullptr, kInitialBufferSize);
- set_top(top_chunk);
- }
- if (!top_chunk->AddSlot(slot)) {
- Chunk* new_top_chunk =
- new Chunk(top_chunk, NextCapacity(top_chunk->capacity()));
- bool added = new_top_chunk->AddSlot(slot);
- set_top(new_top_chunk);
- DCHECK(added);
- USE(added);
- }
- }
+ ~TypedSlotSet() override;
// Iterate over all slots in the set and for each slot invoke the callback.
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
@@ -497,145 +454,90 @@ class TypedSlotSet {
// if (good(slot_type, slot_address)) return KEEP_SLOT;
// else return REMOVE_SLOT;
// });
+ // This can run concurrently to ClearInvalidSlots().
template <typename Callback>
int Iterate(Callback callback, IterationMode mode) {
STATIC_ASSERT(CLEARED_SLOT < 8);
- Chunk* chunk = load_top();
+ Chunk* chunk = head_;
Chunk* previous = nullptr;
int new_count = 0;
while (chunk != nullptr) {
- TypedSlot* buf = chunk->buffer();
+ TypedSlot* buffer = chunk->buffer;
+ int count = chunk->count;
bool empty = true;
- for (int i = 0; i < chunk->count(); i++) {
- // Order is important here. We have to read out the slot type last to
- // observe the concurrent removal case consistently.
- Address host_addr = page_start_ + buf[i].host_offset();
- TypeAndOffset type_and_offset = buf[i].GetTypeAndOffset();
- SlotType type = type_and_offset.first;
+ for (int i = 0; i < count; i++) {
+ TypedSlot slot = LoadTypedSlot(buffer + i);
+ SlotType type = TypeField::decode(slot.type_and_offset);
if (type != CLEARED_SLOT) {
- Address addr = page_start_ + type_and_offset.second;
+ uint32_t offset = OffsetField::decode(slot.type_and_offset);
+ Address addr = page_start_ + offset;
+ Address host_addr = page_start_ + slot.host_offset;
if (callback(type, host_addr, addr) == KEEP_SLOT) {
new_count++;
empty = false;
} else {
- buf[i].Clear();
+ ClearTypedSlot(buffer + i);
}
}
}
-
- Chunk* n = chunk->next();
+ Chunk* next = chunk->next;
if (mode == PREFREE_EMPTY_CHUNKS && empty) {
// We remove the chunk from the list but let it still point its next
// chunk to allow concurrent iteration.
if (previous) {
- previous->set_next(n);
+ StoreNext(previous, next);
} else {
- set_top(n);
+ StoreHead(next);
}
- base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
- to_be_freed_chunks_.push(chunk);
+ base::MutexGuard guard(&to_be_freed_chunks_mutex_);
+ to_be_freed_chunks_.push(std::unique_ptr<Chunk>(chunk));
} else {
previous = chunk;
}
- chunk = n;
+ chunk = next;
}
return new_count;
}
- void FreeToBeFreedChunks() {
- base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_);
- while (!to_be_freed_chunks_.empty()) {
- Chunk* top = to_be_freed_chunks_.top();
- to_be_freed_chunks_.pop();
- delete top;
- }
- }
+ // Clears all slots that have the offset in the specified ranges.
+ // This can run concurrently to Iterate().
+ void ClearInvalidSlots(const std::map<uint32_t, uint32_t>& invalid_ranges);
- void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) {
- Chunk* chunk = load_top();
- while (chunk != nullptr) {
- TypedSlot* buf = chunk->buffer();
- for (int i = 0; i < chunk->count(); i++) {
- uint32_t host_offset = buf[i].host_offset();
- std::map<uint32_t, uint32_t>::iterator upper_bound =
- invalid_ranges.upper_bound(host_offset);
- if (upper_bound == invalid_ranges.begin()) continue;
- // upper_bounds points to the invalid range after the given slot. Hence,
- // we have to go to the previous element.
- upper_bound--;
- DCHECK_LE(upper_bound->first, host_offset);
- if (upper_bound->second > host_offset) {
- buf[i].Clear();
- }
- }
- chunk = chunk->next();
- }
- }
+ // Frees empty chunks accumulated by PREFREE_EMPTY_CHUNKS.
+ void FreeToBeFreedChunks();
private:
- static const int kInitialBufferSize = 100;
- static const int kMaxBufferSize = 16 * KB;
-
- static int NextCapacity(int capacity) {
- return Min(kMaxBufferSize, capacity * 2);
+ // Atomic operations used by Iterate and ClearInvalidSlots;
+ Chunk* LoadNext(Chunk* chunk) {
+ return base::AsAtomicPointer::Relaxed_Load(&chunk->next);
+ }
+ void StoreNext(Chunk* chunk, Chunk* next) {
+ return base::AsAtomicPointer::Relaxed_Store(&chunk->next, next);
+ }
+ Chunk* LoadHead() { return base::AsAtomicPointer::Relaxed_Load(&head_); }
+ void StoreHead(Chunk* chunk) {
+ base::AsAtomicPointer::Relaxed_Store(&head_, chunk);
+ }
+ TypedSlot LoadTypedSlot(TypedSlot* slot) {
+ // Order is important here and should match that of ClearTypedSlot. The
+ // order guarantees that type != CLEARED_SLOT implies valid host_offset.
+ TypedSlot result;
+ result.host_offset = base::AsAtomic32::Acquire_Load(&slot->host_offset);
+ result.type_and_offset =
+ base::AsAtomic32::Relaxed_Load(&slot->type_and_offset);
+ return result;
+ }
+ void ClearTypedSlot(TypedSlot* slot) {
+ // Order is important here and should match that of LoadTypedSlot.
+ base::AsAtomic32::Relaxed_Store(
+ &slot->type_and_offset,
+ TypeField::encode(CLEARED_SLOT) | OffsetField::encode(0));
+ base::AsAtomic32::Release_Store(&slot->host_offset, 0);
}
-
- class OffsetField : public BitField<int, 0, 29> {};
- class TypeField : public BitField<SlotType, 29, 3> {};
-
- struct Chunk : Malloced {
- explicit Chunk(Chunk* next_chunk, int chunk_capacity) {
- next_ = next_chunk;
- buffer_ = NewArray<TypedSlot>(chunk_capacity);
- capacity_ = chunk_capacity;
- count_ = 0;
- }
-
- ~Chunk() { DeleteArray(buffer_); }
-
- bool AddSlot(TypedSlot slot) {
- int current_count = count();
- if (current_count == capacity()) return false;
- TypedSlot* current_buffer = buffer();
- // Order is important here. We have to write the slot first before
- // increasing the counter to guarantee that a consistent state is
- // observed by concurrent threads.
- current_buffer[current_count].Set(slot);
- set_count(current_count + 1);
- return true;
- }
-
- Chunk* next() const { return base::AsAtomicPointer::Acquire_Load(&next_); }
-
- void set_next(Chunk* n) {
- return base::AsAtomicPointer::Release_Store(&next_, n);
- }
-
- TypedSlot* buffer() const { return buffer_; }
-
- int32_t capacity() const { return capacity_; }
-
- int32_t count() const { return base::AsAtomic32::Acquire_Load(&count_); }
-
- void set_count(int32_t new_value) {
- base::AsAtomic32::Release_Store(&count_, new_value);
- }
-
- private:
- Chunk* next_;
- TypedSlot* buffer_;
- int32_t capacity_;
- int32_t count_;
- };
-
- Chunk* load_top() { return base::AsAtomicPointer::Acquire_Load(&top_); }
-
- void set_top(Chunk* c) { base::AsAtomicPointer::Release_Store(&top_, c); }
Address page_start_;
- Chunk* top_;
base::Mutex to_be_freed_chunks_mutex_;
- std::stack<Chunk*> to_be_freed_chunks_;
+ std::stack<std::unique_ptr<Chunk>> to_be_freed_chunks_;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/spaces-inl.h b/chromium/v8/src/heap/spaces-inl.h
index 7162769e5e0..37191d82eb7 100644
--- a/chromium/v8/src/heap/spaces-inl.h
+++ b/chromium/v8/src/heap/spaces-inl.h
@@ -154,15 +154,11 @@ bool NewSpace::ToSpaceContainsSlow(Address a) {
return to_space_.ContainsSlow(a);
}
-bool NewSpace::FromSpaceContainsSlow(Address a) {
- return from_space_.ContainsSlow(a);
-}
-
bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
bool PagedSpace::Contains(Address addr) {
- if (heap()->lo_space()->FindPage(addr)) return false;
+ if (heap()->IsWithinLargeObject(addr)) return false;
return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
}
@@ -234,6 +230,10 @@ void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
amount);
}
+bool MemoryChunk::IsInNewLargeObjectSpace() const {
+ return owner()->identity() == NEW_LO_SPACE;
+}
+
void Page::MarkNeverAllocateForTesting() {
DCHECK(this->owner()->identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
@@ -259,15 +259,16 @@ void Page::ClearEvacuationCandidate() {
InitializeFreeListCategories();
}
-MemoryChunkIterator::MemoryChunkIterator(Heap* heap)
+OldGenerationMemoryChunkIterator::OldGenerationMemoryChunkIterator(Heap* heap)
: heap_(heap),
state_(kOldSpaceState),
old_iterator_(heap->old_space()->begin()),
code_iterator_(heap->code_space()->begin()),
map_iterator_(heap->map_space()->begin()),
- lo_iterator_(heap->lo_space()->begin()) {}
+ lo_iterator_(heap->lo_space()->begin()),
+ code_lo_iterator_(heap->code_lo_space()->begin()) {}
-MemoryChunk* MemoryChunkIterator::next() {
+MemoryChunk* OldGenerationMemoryChunkIterator::next() {
switch (state_) {
case kOldSpaceState: {
if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
@@ -287,6 +288,12 @@ MemoryChunk* MemoryChunkIterator::next() {
}
case kLargeObjectState: {
if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
+ state_ = kCodeLargeObjectState;
+ V8_FALLTHROUGH;
+ }
+ case kCodeLargeObjectState: {
+ if (code_lo_iterator_ != heap_->code_lo_space()->end())
+ return *(code_lo_iterator_++);
state_ = kFinishedState;
V8_FALLTHROUGH;
}
@@ -509,7 +516,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
return AllocateRaw(size_in_bytes, alignment);
}
diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc
index dcacea0afc9..819666b6c96 100644
--- a/chromium/v8/src/heap/spaces.cc
+++ b/chromium/v8/src/heap/spaces.cc
@@ -31,6 +31,14 @@
namespace v8 {
namespace internal {
+// These checks are here to ensure that the lower 32 bits of any real heap
+// object can't overlap with the lower 32 bits of cleared weak reference value
+// and therefore it's enough to compare only the lower 32 bits of a MaybeObject
+// in order to figure out if it's a cleared weak reference or not.
+STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
+STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
+STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
+
// ----------------------------------------------------------------------------
// HeapObjectIterator
@@ -98,7 +106,7 @@ static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
LAZY_INSTANCE_INITIALIZER;
Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
auto it = recently_freed_.find(code_range_size);
if (it == recently_freed_.end() || it->second.empty()) {
return reinterpret_cast<Address>(GetRandomMmapAddr());
@@ -110,7 +118,7 @@ Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
size_t code_range_size) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
recently_freed_[code_range_size].push_back(code_range_start);
}
@@ -121,7 +129,7 @@ void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
size_t code_range_size)
: isolate_(isolate),
- data_page_allocator_(GetPlatformPageAllocator()),
+ data_page_allocator_(isolate->page_allocator()),
code_page_allocator_(nullptr),
capacity_(RoundUp(capacity, Page::kPageSize)),
size_(0),
@@ -154,7 +162,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
// Fullfilling both reserved pages requirement and huge code area
// alignments is not supported (requires re-implementation).
- DCHECK_LE(kCodeRangeAreaAlignment, page_allocator->AllocatePageSize());
+ DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
}
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
@@ -163,7 +171,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
page_allocator->AllocatePageSize());
VirtualMemory reservation(
page_allocator, requested, reinterpret_cast<void*>(hint),
- Max(kCodeRangeAreaAlignment, page_allocator->AllocatePageSize()));
+ Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
if (!reservation.IsReserved()) {
V8::FatalProcessOutOfMemory(isolate_,
"CodeRange setup: allocate virtual memory");
@@ -190,7 +198,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
size_t size =
RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
MemoryChunk::kPageSize);
- DCHECK(IsAligned(aligned_base, kCodeRangeAreaAlignment));
+ DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
LOG(isolate_,
NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
@@ -283,7 +291,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
for (int i = 0; i < pending_unmapping_tasks_; i++) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- CancelableTaskManager::kTaskAborted) {
+ TryAbortResult::kTaskAborted) {
pending_unmapping_tasks_semaphore_.Wait();
}
}
@@ -360,8 +368,13 @@ void MemoryAllocator::Unmapper::TearDown() {
}
}
+size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
+ base::MutexGuard guard(&mutex_);
+ return chunks_[kRegular].size() + chunks_[kNonRegular].size();
+}
+
int MemoryAllocator::Unmapper::NumberOfChunks() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
size_t result = 0;
for (int i = 0; i < kNumberOfChunkQueues; i++) {
result += chunks_[i].size();
@@ -370,7 +383,7 @@ int MemoryAllocator::Unmapper::NumberOfChunks() {
}
size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
size_t sum = 0;
// kPooled chunks are already uncommited. We only have to account for
@@ -446,6 +459,74 @@ Address MemoryAllocator::AllocateAlignedMemory(
return base;
}
+void MemoryChunk::DiscardUnusedMemory(Address addr, size_t size) {
+ base::AddressRegion memory_area =
+ MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
+ if (memory_area.size() != 0) {
+ MemoryAllocator* memory_allocator = heap_->memory_allocator();
+ v8::PageAllocator* page_allocator =
+ memory_allocator->page_allocator(executable());
+ CHECK(page_allocator->DiscardSystemPages(
+ reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
+ }
+}
+
+size_t MemoryChunkLayout::CodePageGuardStartOffset() {
+ // We are guarding code pages: the first OS page after the header
+ // will be protected as non-writable.
+ return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
+}
+
+size_t MemoryChunkLayout::CodePageGuardSize() {
+ return MemoryAllocator::GetCommitPageSize();
+}
+
+intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
+ // We are guarding code pages: the first OS page after the header
+ // will be protected as non-writable.
+ return CodePageGuardStartOffset() + CodePageGuardSize();
+}
+
+intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
+ // We are guarding code pages: the last OS page will be protected as
+ // non-writable.
+ return Page::kPageSize -
+ static_cast<int>(MemoryAllocator::GetCommitPageSize());
+}
+
+size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
+ size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
+ DCHECK_LE(kMaxRegularHeapObjectSize, memory);
+ return memory;
+}
+
+intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
+ return MemoryChunk::kHeaderSize +
+ (kPointerSize - MemoryChunk::kHeaderSize % kPointerSize);
+}
+
+size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
+ AllocationSpace space) {
+ if (space == CODE_SPACE) {
+ return ObjectStartOffsetInCodePage();
+ }
+ return ObjectStartOffsetInDataPage();
+}
+
+size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
+ size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
+ DCHECK_LE(kMaxRegularHeapObjectSize, memory);
+ return memory;
+}
+
+size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
+ AllocationSpace space) {
+ if (space == CODE_SPACE) {
+ return AllocatableMemoryInCodePage();
+ }
+ return AllocatableMemoryInDataPage();
+}
+
Heap* MemoryChunk::synchronized_heap() {
return reinterpret_cast<Heap*>(
base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
@@ -465,10 +546,11 @@ void MemoryChunk::InitializationMemoryFence() {
void MemoryChunk::SetReadAndExecutable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
+ DCHECK(owner()->identity() == CODE_SPACE ||
+ owner()->identity() == CODE_LO_SPACE);
// Decrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
- base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
+ base::MutexGuard guard(page_protection_change_mutex_);
if (write_unprotect_counter_ == 0) {
// This is a corner case that may happen when we have a
// CodeSpaceMemoryModificationScope open and this page was newly
@@ -479,9 +561,9 @@ void MemoryChunk::SetReadAndExecutable() {
DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
if (write_unprotect_counter_ == 0) {
Address protect_start =
- address() + MemoryAllocator::CodePageAreaStartOffset();
+ address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
size_t page_size = MemoryAllocator::GetCommitPageSize();
- DCHECK(IsAddressAligned(protect_start, page_size));
+ DCHECK(IsAligned(protect_start, page_size));
size_t protect_size = RoundUp(area_size(), page_size);
CHECK(reservation_.SetPermissions(protect_start, protect_size,
PageAllocator::kReadExecute));
@@ -490,17 +572,18 @@ void MemoryChunk::SetReadAndExecutable() {
void MemoryChunk::SetReadAndWritable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
+ DCHECK(owner()->identity() == CODE_SPACE ||
+ owner()->identity() == CODE_LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
- base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
+ base::MutexGuard guard(page_protection_change_mutex_);
write_unprotect_counter_++;
DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
if (write_unprotect_counter_ == 1) {
Address unprotect_start =
- address() + MemoryAllocator::CodePageAreaStartOffset();
+ address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
size_t page_size = MemoryAllocator::GetCommitPageSize();
- DCHECK(IsAddressAligned(unprotect_start, page_size));
+ DCHECK(IsAligned(unprotect_start, page_size));
size_t unprotect_size = RoundUp(area_size(), page_size);
CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
PageAllocator::kReadWrite));
@@ -539,6 +622,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
chunk->young_generation_bitmap_ = nullptr;
+ chunk->marking_bitmap_ = nullptr;
chunk->local_tracker_ = nullptr;
chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
@@ -550,14 +634,15 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->categories_[i] = nullptr;
}
+ chunk->AllocateMarkingBitmap();
if (owner->identity() == RO_SPACE) {
heap->incremental_marking()
->non_atomic_marking_state()
->bitmap(chunk)
->MarkAllBits();
} else {
- heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(
- chunk);
+ heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
+ 0);
}
DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
@@ -569,7 +654,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
heap->code_space_memory_modification_scope_depth();
} else {
size_t page_size = MemoryAllocator::GetCommitPageSize();
- DCHECK(IsAddressAligned(area_start, page_size));
+ DCHECK(IsAligned(area_start, page_size));
size_t area_size = RoundUp(area_end - area_start, page_size);
CHECK(reservation.SetPermissions(area_start, area_size,
PageAllocator::kReadWriteExecute));
@@ -583,7 +668,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* page = static_cast<Page*>(chunk);
- DCHECK_GE(Page::kAllocatableMemory, page->area_size());
+ DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
+ page->owner()->identity()),
+ page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocatedBytes();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
@@ -719,7 +806,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
// Non-executable
// +----------------------------+<- base aligned with MemoryChunk::kAlignment
// | Header |
- // +----------------------------+<- area_start_ (base + kObjectStartOffset)
+ // +----------------------------+<- area_start_ (base + area_start_)
// | Area |
// +----------------------------+<- area_end_ (area_start + commit_area_size)
// | Committed but not used |
@@ -729,13 +816,15 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
//
if (executable == EXECUTABLE) {
- chunk_size = ::RoundUp(
- CodePageAreaStartOffset() + reserve_area_size + CodePageGuardSize(),
- GetCommitPageSize());
+ chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
+ reserve_area_size +
+ MemoryChunkLayout::CodePageGuardSize(),
+ GetCommitPageSize());
// Size of header (not executable) plus area (executable).
size_t commit_size = ::RoundUp(
- CodePageGuardStartOffset() + commit_area_size, GetCommitPageSize());
+ MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
+ GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
executable, address_hint, &reservation);
@@ -744,18 +833,20 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_executable_ += reservation.size();
if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, CodePageGuardStartOffset(), kZapValue);
- ZapBlock(base + CodePageAreaStartOffset(), commit_area_size, kZapValue);
+ ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
+ ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
+ commit_area_size, kZapValue);
}
- area_start = base + CodePageAreaStartOffset();
+ area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
area_end = area_start + commit_area_size;
} else {
- chunk_size = ::RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
- GetCommitPageSize());
- size_t commit_size =
- ::RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
- GetCommitPageSize());
+ chunk_size = ::RoundUp(
+ MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
+ GetCommitPageSize());
+ size_t commit_size = ::RoundUp(
+ MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
+ GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
executable, address_hint, &reservation);
@@ -763,10 +854,13 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
if (base == kNullAddress) return nullptr;
if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, Page::kObjectStartOffset + commit_area_size, kZapValue);
+ ZapBlock(
+ base,
+ MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
+ kZapValue);
}
- area_start = base + Page::kObjectStartOffset;
+ area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
area_end = area_start + commit_area_size;
}
@@ -940,7 +1034,7 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
size_t page_size = GetCommitPageSize();
DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
DCHECK_EQ(chunk->address() + chunk->size(),
- chunk->area_end() + CodePageGuardSize());
+ chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
reservation->SetPermissions(chunk->area_end_, page_size,
PageAllocator::kNoAccess);
}
@@ -1023,23 +1117,26 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
}
}
-template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kFull>(MemoryChunk* chunk);
-template void MemoryAllocator::Free<MemoryAllocator::kAlreadyPooled>(
- MemoryChunk* chunk);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
-template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
- MemoryChunk* chunk);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
-template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
- MemoryChunk* chunk);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
+ MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
Executability executable) {
MemoryChunk* chunk = nullptr;
if (alloc_mode == kPooled) {
- DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
+ DCHECK_EQ(size, static_cast<size_t>(
+ MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
+ owner->identity())));
DCHECK_EQ(executable, NOT_EXECUTABLE);
chunk = AllocatePagePooled(owner);
}
@@ -1050,15 +1147,15 @@ Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
return owner->InitializePage(chunk, executable);
}
-template Page*
-MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- size_t size, PagedSpace* owner, Executability executable);
-template Page*
-MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-template Page*
-MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+ size_t size, PagedSpace* owner, Executability executable);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner,
@@ -1074,7 +1171,9 @@ MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
if (chunk == nullptr) return nullptr;
const int size = MemoryChunk::kPageSize;
const Address start = reinterpret_cast<Address>(chunk);
- const Address area_start = start + MemoryChunk::kObjectStartOffset;
+ const Address area_start =
+ start +
+ MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
const Address area_end = start + size;
// Pooled pages are always regular data pages.
DCHECK_NE(CODE_SPACE, owner->identity());
@@ -1098,26 +1197,6 @@ void MemoryAllocator::ZapBlock(Address start, size_t size,
}
}
-size_t MemoryAllocator::CodePageGuardStartOffset() {
- // We are guarding code pages: the first OS page after the header
- // will be protected as non-writable.
- return ::RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
-}
-
-size_t MemoryAllocator::CodePageGuardSize() { return GetCommitPageSize(); }
-
-size_t MemoryAllocator::CodePageAreaStartOffset() {
- // We are guarding code pages: the first OS page after the header
- // will be protected as non-writable.
- return CodePageGuardStartOffset() + CodePageGuardSize();
-}
-
-size_t MemoryAllocator::CodePageAreaEndOffset() {
- // We are guarding code pages: the last OS page will be protected as
- // non-writable.
- return Page::kPageSize - static_cast<int>(GetCommitPageSize());
-}
-
intptr_t MemoryAllocator::GetCommitPageSize() {
if (FLAG_v8_os_page_size != 0) {
DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
@@ -1127,17 +1206,31 @@ intptr_t MemoryAllocator::GetCommitPageSize() {
}
}
+base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
+ size_t size) {
+ size_t page_size = MemoryAllocator::GetCommitPageSize();
+ if (size < page_size + FreeSpace::kSize) {
+ return base::AddressRegion(0, 0);
+ }
+ Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
+ Address discardable_end = RoundDown(addr + size, page_size);
+ if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
+ return base::AddressRegion(discardable_start,
+ discardable_end - discardable_start);
+}
+
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
size_t commit_size,
size_t reserved_size) {
const size_t page_size = GetCommitPageSize();
// All addresses and sizes must be aligned to the commit page size.
- DCHECK(IsAddressAligned(start, page_size));
+ DCHECK(IsAligned(start, page_size));
DCHECK_EQ(0, commit_size % page_size);
DCHECK_EQ(0, reserved_size % page_size);
- const size_t guard_size = CodePageGuardSize();
- const size_t pre_guard_offset = CodePageGuardStartOffset();
- const size_t code_area_offset = CodePageAreaStartOffset();
+ const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
+ const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
+ const size_t code_area_offset =
+ MemoryChunkLayout::ObjectStartOffsetInCodePage();
// reserved_size includes two guard regions, commit_size does not.
DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
const Address pre_guard_page = start + pre_guard_offset;
@@ -1189,6 +1282,7 @@ void MemoryChunk::ReleaseAllocatedMemory() {
ReleaseInvalidatedSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
+ if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
if (IsPagedSpace()) {
Page* page = static_cast<Page*>(this);
@@ -1330,6 +1424,17 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
young_generation_bitmap_ = nullptr;
}
+void MemoryChunk::AllocateMarkingBitmap() {
+ DCHECK_NULL(marking_bitmap_);
+ marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
+}
+
+void MemoryChunk::ReleaseMarkingBitmap() {
+ DCHECK_NOT_NULL(marking_bitmap_);
+ free(marking_bitmap_);
+ marking_bitmap_ = nullptr;
+}
+
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -1380,7 +1485,7 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
: SpaceWithLinearArea(heap, space), executable_(executable) {
- area_size_ = MemoryAllocator::PageAreaSize(space);
+ area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
accounting_stats_.Clear();
}
@@ -1411,12 +1516,12 @@ void PagedSpace::RefillFreeList() {
if (is_local()) {
DCHECK_NE(this, p->owner());
PagedSpace* owner = reinterpret_cast<PagedSpace*>(p->owner());
- base::LockGuard<base::Mutex> guard(owner->mutex());
+ base::MutexGuard guard(owner->mutex());
owner->RefineAllocatedBytesAfterSweeping(p);
owner->RemovePage(p);
added += AddPage(p);
} else {
- base::LockGuard<base::Mutex> guard(mutex());
+ base::MutexGuard guard(mutex());
DCHECK_EQ(this, p->owner());
RefineAllocatedBytesAfterSweeping(p);
added += RelinkFreeListCategories(p);
@@ -1428,7 +1533,7 @@ void PagedSpace::RefillFreeList() {
}
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
- base::LockGuard<base::Mutex> guard(mutex());
+ base::MutexGuard guard(mutex());
DCHECK(identity() == other->identity());
// Unmerged fields:
@@ -1491,7 +1596,7 @@ void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
}
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
- base::LockGuard<base::Mutex> guard(mutex());
+ base::MutexGuard guard(mutex());
// Check for pages that still contain free list entries. Bail out for smaller
// categories.
const int minimum_category =
@@ -1567,7 +1672,7 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
bool PagedSpace::Expand() {
// Always lock against the main space as we can only adjust capacity and
// pages concurrently for the main paged space.
- base::LockGuard<base::Mutex> guard(heap()->paged_space(identity())->mutex());
+ base::MutexGuard guard(heap()->paged_space(identity())->mutex());
const int size = AreaSize();
@@ -1842,7 +1947,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
// The first word should be a map, and we expect all map pointers to
// be in map space.
- Map* map = object->map();
+ Map map = object->map();
CHECK(map->IsMap());
CHECK(heap()->map_space()->Contains(map) ||
heap()->read_only_space()->Contains(map));
@@ -1864,7 +1969,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
end_of_previous_object = object->address() + size;
if (object->IsExternalString()) {
- ExternalString* external_string = ExternalString::cast(object);
+ ExternalString external_string = ExternalString::cast(object);
size_t size = external_string->ExternalPayloadSize();
external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
} else if (object->IsJSArrayBuffer()) {
@@ -2072,7 +2177,8 @@ bool SemiSpace::EnsureCurrentCapacity() {
actual_pages++;
current_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- Page::kAllocatableMemory, this, NOT_EXECUTABLE);
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
memory_chunk_list_.PushBack(current_page);
@@ -2139,8 +2245,10 @@ void NewSpace::UpdateLinearAllocationArea() {
Address new_top = to_space_.page_low();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.Reset(new_top, to_space_.page_high());
- original_top_ = top();
- original_limit_ = limit();
+ // The order of the following two stores is important.
+ // See the corresponding loads in ConcurrentMarking::Run.
+ original_limit_.store(limit(), std::memory_order_relaxed);
+ original_top_.store(top(), std::memory_order_release);
StartNextInlineAllocationStep();
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -2174,7 +2282,7 @@ void PagedSpace::UpdateInlineAllocationLimit(size_t min_size) {
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
- DCHECK(!Page::IsAtObjectStart(top));
+ DCHECK(!OldSpace::IsAtPageStart(top));
// Do a step to account for memory allocated on previous page.
InlineAllocationStep(top, top, kNullAddress, 0);
@@ -2195,7 +2303,7 @@ bool NewSpace::AddFreshPage() {
bool NewSpace::AddFreshPageSynchronized() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
return AddFreshPage();
}
@@ -2234,7 +2342,9 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
}
size_t LargeObjectSpace::Available() {
- return ObjectSizeFor(heap()->memory_allocator()->Available());
+ // We return zero here since we cannot take advantage of already allocated
+ // large object memory.
+ return 0;
}
void SpaceWithLinearArea::StartNextInlineAllocationStep() {
@@ -2334,7 +2444,7 @@ void NewSpace::Verify(Isolate* isolate) {
// The first word should be a map, and we expect all map pointers to
// be in map space or read-only space.
- Map* map = object->map();
+ Map map = object->map();
CHECK(map->IsMap());
CHECK(heap()->map_space()->Contains(map) ||
heap()->read_only_space()->Contains(map));
@@ -2352,7 +2462,7 @@ void NewSpace::Verify(Isolate* isolate) {
object->IterateBody(map, size, &visitor);
if (object->IsExternalString()) {
- ExternalString* external_string = ExternalString::cast(object);
+ ExternalString external_string = ExternalString::cast(object);
size_t size = external_string->ExternalPayloadSize();
external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
} else if (object->IsJSArrayBuffer()) {
@@ -2411,7 +2521,8 @@ bool SemiSpace::Commit() {
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- Page::kAllocatableMemory, this, NOT_EXECUTABLE);
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
return false;
@@ -2468,7 +2579,8 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
- Page::kAllocatableMemory, this, NOT_EXECUTABLE);
+ MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
+ NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
return false;
@@ -2762,9 +2874,9 @@ void FreeListCategory::Free(Address start, size_t size_in_bytes,
void FreeListCategory::RepairFreeList(Heap* heap) {
FreeSpace* n = top();
while (n != nullptr) {
- Map** map_location = reinterpret_cast<Map**>(n->address());
+ ObjectSlot map_location(n->address());
if (*map_location == nullptr) {
- *map_location = ReadOnlyRoots(heap).free_space_map();
+ map_location.store(ReadOnlyRoots(heap).free_space_map());
} else {
DCHECK(*map_location == ReadOnlyRoots(heap).free_space_map());
}
@@ -2973,7 +3085,8 @@ size_t FreeListCategory::SumFreeList() {
size_t sum = 0;
FreeSpace* cur = top();
while (cur != nullptr) {
- DCHECK(cur->map() == page()->heap()->root(RootIndex::kFreeSpaceMap));
+ DCHECK_EQ(cur->map(),
+ page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap));
sum += cur->relaxed_read_size();
cur = cur->next();
}
@@ -3034,34 +3147,6 @@ size_t PagedSpace::SizeOfObjects() {
return Size() - (limit() - top());
}
-// After we have booted, we have created a map which represents free space
-// on the heap. If there was already a free list then the elements on it
-// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
-// fix them.
-void PagedSpace::RepairFreeListsAfterDeserialization() {
- free_list_.RepairLists(heap());
- // Each page may have a small free space that is not tracked by a free list.
- // Those free spaces still contain null as their map pointer.
- // Overwrite them with new fillers.
- for (Page* page : *this) {
- int size = static_cast<int>(page->wasted_memory());
- if (size == 0) {
- // If there is no wasted memory then all free space is in the free list.
- continue;
- }
- Address start = page->HighWaterMark();
- Address end = page->area_end();
- if (start < end - size) {
- // A region at the high watermark is already in free list.
- HeapObject* filler = HeapObject::FromAddress(start);
- CHECK(filler->IsFiller());
- start += filler->Size();
- }
- CHECK_EQ(size, static_cast<int>(end - start));
- heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
- }
-}
-
bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
@@ -3179,7 +3264,8 @@ void ReadOnlyPage::MakeHeaderRelocatable() {
void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
const size_t page_size = MemoryAllocator::GetCommitPageSize();
- const size_t area_start_offset = RoundUp(Page::kObjectStartOffset, page_size);
+ const size_t area_start_offset =
+ RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage(), page_size);
MemoryAllocator* memory_allocator = heap()->memory_allocator();
for (Page* p : *this) {
ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
@@ -3196,6 +3282,34 @@ void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
}
}
+// After we have booted, we have created a map which represents free space
+// on the heap. If there was already a free list then the elements on it
+// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
+// fix them.
+void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
+ free_list_.RepairLists(heap());
+ // Each page may have a small free space that is not tracked by a free list.
+ // Those free spaces still contain null as their map pointer.
+ // Overwrite them with new fillers.
+ for (Page* page : *this) {
+ int size = static_cast<int>(page->wasted_memory());
+ if (size == 0) {
+ // If there is no wasted memory then all free space is in the free list.
+ continue;
+ }
+ Address start = page->HighWaterMark();
+ Address end = page->area_end();
+ if (start < end - size) {
+ // A region at the high watermark is already in free list.
+ HeapObject* filler = HeapObject::FromAddress(start);
+ CHECK(filler->IsFiller());
+ start += filler->Size();
+ }
+ CHECK_EQ(size, static_cast<int>(end - start));
+ heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
+ }
+}
+
void ReadOnlySpace::ClearStringPaddingIfNeeded() {
if (is_string_padding_cleared_) return;
@@ -3289,6 +3403,10 @@ void LargeObjectSpace::TearDown() {
}
}
+AllocationResult LargeObjectSpace::AllocateRaw(int object_size) {
+ return AllocateRaw(object_size, NOT_EXECUTABLE);
+}
+
AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
// Check if we want to force a GC before growing the old space further.
@@ -3326,13 +3444,6 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
HeapObject* object = page->GetObject();
- if (Heap::ShouldZapGarbage()) {
- // Make the object consistent so the heap can be verified in OldSpaceStep.
- // We only need to do this in debug builds or if verify_heap is on.
- reinterpret_cast<Object**>(object->address())[0] =
- ReadOnlyRoots(heap()).fixed_array_map();
- reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
- }
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
AllocationStep(object_size, object->address(), object_size);
@@ -3357,11 +3468,6 @@ Object* LargeObjectSpace::FindObject(Address a) {
return Smi::kZero; // Signaling not found.
}
-LargePage* LargeObjectSpace::FindPageThreadSafe(Address a) {
- base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
- return FindPage(a);
-}
-
LargePage* LargeObjectSpace::FindPage(Address a) {
const Address key = MemoryChunk::FromAddress(a)->address();
auto it = chunk_map_.find(key);
@@ -3394,7 +3500,7 @@ void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
// There may be concurrent access on the chunk map. We have to take the lock
// here.
- base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
+ base::MutexGuard guard(&chunk_map_mutex_);
for (Address current = reinterpret_cast<Address>(page);
current < reinterpret_cast<Address>(page) + page->size();
current += MemoryChunk::kPageSize) {
@@ -3452,6 +3558,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* current = first_page();
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
+ // Right-trimming does not update the objects_size_ counter. We are lazily
+ // updating it after every GC.
objects_size_ = 0;
while (current) {
LargePage* next_current = current->next_page();
@@ -3490,7 +3598,6 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
}
-
bool LargeObjectSpace::Contains(HeapObject* object) {
Address address = object->address();
MemoryChunk* chunk = MemoryChunk::FromAddress(address);
@@ -3526,7 +3633,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// The first word should be a map, and we expect all map pointers to be
// in map space or read-only space.
- Map* map = object->map();
+ Map map = object->map();
CHECK(map->IsMap());
CHECK(heap()->map_space()->Contains(map) ||
heap()->read_only_space()->Contains(map));
@@ -3538,7 +3645,8 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
object->IsWeakFixedArray() || object->IsWeakArrayList() ||
object->IsPropertyArray() || object->IsByteArray() ||
object->IsFeedbackVector() || object->IsBigInt() ||
- object->IsFreeSpace() || object->IsFeedbackMetadata());
+ object->IsFreeSpace() || object->IsFeedbackMetadata() ||
+ object->IsContext());
// The object itself should look OK.
object->ObjectVerify(isolate);
@@ -3552,7 +3660,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
VerifyPointersVisitor code_visitor(heap());
object->IterateBody(map, object->Size(), &code_visitor);
} else if (object->IsFixedArray()) {
- FixedArray* array = FixedArray::cast(object);
+ FixedArray array = FixedArray::cast(object);
for (int j = 0; j < array->length(); j++) {
Object* element = array->get(j);
if (element->IsHeapObject()) {
@@ -3562,7 +3670,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
}
}
} else if (object->IsPropertyArray()) {
- PropertyArray* array = PropertyArray::cast(object);
+ PropertyArray array = PropertyArray::cast(object);
for (int j = 0; j < array->length(); j++) {
Object* property = array->get(j);
if (property->IsHeapObject()) {
@@ -3643,5 +3751,27 @@ void NewLargeObjectSpace::Flip() {
chunk->ClearFlag(MemoryChunk::IN_TO_SPACE);
}
}
+
+void NewLargeObjectSpace::FreeAllObjects() {
+ LargePage* current = first_page();
+ while (current) {
+ LargePage* next_current = current->next_page();
+ Unregister(current, static_cast<size_t>(current->GetObject()->Size()));
+ heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
+ current);
+ current = next_current;
+ }
+ // Right-trimming does not update the objects_size_ counter. We are lazily
+ // updating it after every GC.
+ objects_size_ = 0;
+}
+
+CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
+ : LargeObjectSpace(heap, CODE_LO_SPACE) {}
+
+AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
+ return LargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/spaces.h b/chromium/v8/src/heap/spaces.h
index 018e9da47b5..5334c14503b 100644
--- a/chromium/v8/src/heap/spaces.h
+++ b/chromium/v8/src/heap/spaces.h
@@ -15,6 +15,7 @@
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/bounded-page-allocator.h"
+#include "src/base/export-template.h"
#include "src/base/iterator.h"
#include "src/base/list.h"
#include "src/base/platform/mutex.h"
@@ -25,6 +26,7 @@
#include "src/heap/invalidated-slots.h"
#include "src/heap/marking.h"
#include "src/objects.h"
+#include "src/objects/heap-object.h"
#include "src/objects/map.h"
#include "src/utils.h"
@@ -45,6 +47,7 @@ class LinearAllocationArea;
class LocalArrayBufferTracker;
class MemoryAllocator;
class MemoryChunk;
+class MemoryChunkLayout;
class Page;
class PagedSpace;
class SemiSpace;
@@ -121,9 +124,6 @@ class Space;
#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
DCHECK((0 < size) && (size <= code_space->AreaSize()))
-#define DCHECK_PAGE_OFFSET(offset) \
- DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
-
enum FreeListCategoryType {
kTiniest,
kTiny,
@@ -239,6 +239,19 @@ class FreeListCategory {
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
};
+class MemoryChunkLayout {
+ public:
+ static size_t CodePageGuardStartOffset();
+ static size_t CodePageGuardSize();
+ static intptr_t ObjectStartOffsetInCodePage();
+ static intptr_t ObjectEndOffsetInCodePage();
+ static size_t AllocatableMemoryInCodePage();
+ static intptr_t ObjectStartOffsetInDataPage();
+ V8_EXPORT_PRIVATE static size_t AllocatableMemoryInDataPage();
+ static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
+ static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
+};
+
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
@@ -346,18 +359,17 @@ class MemoryChunk {
static const intptr_t kSizeOffset = 0;
static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
- static const intptr_t kAreaStartOffset = kFlagsOffset + kIntptrSize;
- static const intptr_t kAreaEndOffset = kAreaStartOffset + kPointerSize;
- static const intptr_t kReservationOffset = kAreaEndOffset + kPointerSize;
- static const intptr_t kOwnerOffset = kReservationOffset + 2 * kPointerSize;
+ static const intptr_t kMarkBitmapOffset = kFlagsOffset + kPointerSize;
+ static const intptr_t kReservationOffset = kMarkBitmapOffset + kPointerSize;
- static const size_t kMinHeaderSize =
+ static const size_t kHeaderSize =
kSizeOffset // NOLINT
+ kSizetSize // size_t size
+ kUIntptrSize // uintptr_t flags_
+ + kPointerSize // Bitmap* marking_bitmap_
+ + 3 * kPointerSize // VirtualMemory reservation_
+ kPointerSize // Address area_start_
+ kPointerSize // Address area_end_
- + 3 * kPointerSize // VirtualMemory reservation_
+ kPointerSize // Address owner_
+ kPointerSize // Heap* heap_
+ kIntptrSize // intptr_t progress_bar_
@@ -383,27 +395,9 @@ class MemoryChunk {
+ kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
+ kPointerSize; // Bitmap* young_generation_bitmap_
- // We add some more space to the computed header size to amount for missing
- // alignment requirements in our computation.
- // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
- static const size_t kHeaderSize = kMinHeaderSize;
-
- static const int kBodyOffset =
- CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
-
- // The start offset of the object area in a page. Aligned to both maps and
- // code alignment to be suitable for both. Also aligned to 32 words because
- // the marking bitmap is arranged in 32 bit chunks.
- static const int kObjectStartAlignment = 32 * kPointerSize;
- static const int kObjectStartOffset =
- kBodyOffset - 1 +
- (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
-
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
- static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
-
// Maximum number of nested code memory modification scopes.
// TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
static const int kMaxWriteUnprotectCounter = 4;
@@ -417,6 +411,10 @@ class MemoryChunk {
return reinterpret_cast<MemoryChunk*>(reinterpret_cast<Address>(o) &
~kAlignmentMask);
}
+ // Only works if the object is in the first kPageSize of the MemoryChunk.
+ static MemoryChunk* FromHeapObject(const HeapObjectPtr o) {
+ return reinterpret_cast<MemoryChunk*>(o.ptr() & ~kAlignmentMask);
+ }
void SetOldGenerationPageFlags(bool is_marking);
void SetYoungGenerationPageFlags(bool is_marking);
@@ -442,6 +440,8 @@ class MemoryChunk {
ExternalBackingStoreType type, MemoryChunk* from, MemoryChunk* to,
size_t amount);
+ void DiscardUnusedMemory(Address addr, size_t size);
+
Address address() const {
return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
}
@@ -524,6 +524,9 @@ class MemoryChunk {
void AllocateYoungGenerationBitmap();
void ReleaseYoungGenerationBitmap();
+ void AllocateMarkingBitmap();
+ void ReleaseMarkingBitmap();
+
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
@@ -634,6 +637,8 @@ class MemoryChunk {
bool InLargeObjectSpace() const;
+ inline bool IsInNewLargeObjectSpace() const;
+
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; }
@@ -663,13 +668,15 @@ class MemoryChunk {
size_t size_;
uintptr_t flags_;
- // Start and end of allocatable memory on this chunk.
- Address area_start_;
- Address area_end_;
+ Bitmap* marking_bitmap_;
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
+ // Start and end of allocatable memory on this chunk.
+ Address area_start_;
+ Address area_end_;
+
// The space owning this memory chunk.
std::atomic<Space*> owner_;
@@ -752,10 +759,6 @@ class MemoryChunk {
static_assert(sizeof(std::atomic<intptr_t>) == kPointerSize,
"sizeof(std::atomic<intptr_t>) == kPointerSize");
-static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
- "kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
-
-
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 512K. Large object pages may be larger.
//
@@ -786,7 +789,7 @@ class Page : public MemoryChunk {
// Returns the page containing the address provided. The address can
// potentially point righter after the page. To be also safe for tagged values
// we subtract a hole word. The valid address ranges from
- // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
+ // [page_addr + area_start_ .. page_addr + kPageSize + kPointerSize].
static Page* FromAllocationAreaAddress(Address address) {
return Page::FromAddress(address - kPointerSize);
}
@@ -801,10 +804,6 @@ class Page : public MemoryChunk {
return (addr & kPageAlignmentMask) == 0;
}
- static bool IsAtObjectStart(Address addr) {
- return (addr & kPageAlignmentMask) == kObjectStartOffset;
- }
-
static Page* ConvertNewToOld(Page* old_page);
inline void MarkNeverAllocateForTesting();
@@ -826,8 +825,10 @@ class Page : public MemoryChunk {
// Returns the address for a given offset to the this page.
Address OffsetToAddress(size_t offset) {
- DCHECK_PAGE_OFFSET(offset);
- return address() + offset;
+ Address address_in_page = address() + offset;
+ DCHECK_GE(address_in_page, area_start_);
+ DCHECK_LT(address_in_page, area_end_);
+ return address_in_page;
}
// WaitUntilSweepingCompleted only works when concurrent sweeping is in
@@ -1202,11 +1203,12 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return chunk;
}
- void FreeQueuedChunks();
+ V8_EXPORT_PRIVATE void FreeQueuedChunks();
void CancelAndWaitForPendingTasks();
void PrepareForMarkCompact();
void EnsureUnmappingCompleted();
- void TearDown();
+ V8_EXPORT_PRIVATE void TearDown();
+ size_t NumberOfCommittedChunks();
int NumberOfChunks();
size_t CommittedBufferedMemory();
@@ -1229,13 +1231,13 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
template <ChunkQueueType type>
void AddMemoryChunkSafe(MemoryChunk* chunk) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
chunks_[type].push_back(chunk);
}
template <ChunkQueueType type>
MemoryChunk* GetMemoryChunkSafe() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
if (chunks_[type].empty()) return nullptr;
MemoryChunk* chunk = chunks_[type].back();
chunks_[type].pop_back();
@@ -1273,26 +1275,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
kPooledAndQueue,
};
- static size_t CodePageGuardStartOffset();
-
- static size_t CodePageGuardSize();
-
- static size_t CodePageAreaStartOffset();
-
- static size_t CodePageAreaEndOffset();
-
- static size_t CodePageAreaSize() {
- return CodePageAreaEndOffset() - CodePageAreaStartOffset();
- }
-
- static size_t PageAreaSize(AllocationSpace space) {
- DCHECK_NE(LO_SPACE, space);
- return (space == CODE_SPACE) ? CodePageAreaSize()
- : Page::kAllocatableMemory;
- }
-
static intptr_t GetCommitPageSize();
+ // Computes the memory area of discardable memory within a given memory area
+ // [addr, addr+size) and returns the result as base::AddressRegion. If the
+ // memory is not discardable base::AddressRegion is an empty region.
+ static base::AddressRegion ComputeDiscardMemoryArea(Address addr,
+ size_t size);
+
MemoryAllocator(Isolate* isolate, size_t max_capacity,
size_t code_range_size);
@@ -1303,12 +1293,14 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// should be tried first.
template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
typename SpaceType>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
Executability executable);
template <MemoryAllocator::FreeMode mode = kFull>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void Free(MemoryChunk* chunk);
// Returns allocated spaces in bytes.
@@ -1323,11 +1315,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return capacity_ < size ? 0 : capacity_ - size;
}
- // Returns maximum available bytes that the old space can have.
- size_t MaxAvailable() {
- return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
- }
-
// Returns an indication of whether a pointer is in a space that has
// been allocated by this MemoryAllocator.
V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
@@ -1908,7 +1895,10 @@ class V8_EXPORT_PRIVATE FreeList {
// The size range of blocks, in bytes.
static const size_t kMinBlockSize = 3 * kPointerSize;
- static const size_t kMaxBlockSize = Page::kAllocatableMemory;
+
+ // This is a conservative upper bound. The actual maximum block size takes
+ // padding and alignment of data and code pages into account.
+ static const size_t kMaxBlockSize = Page::kPageSize;
static const size_t kTiniestListMax = 0xa * kPointerSize;
static const size_t kTinyListMax = 0x1f * kPointerSize;
@@ -2093,10 +2083,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// Does the space need executable memory?
Executability executable() { return executable_; }
- // During boot the free_space_map is created, and afterwards we may need
- // to write it into the free list nodes that were already created.
- void RepairFreeListsAfterDeserialization();
-
// Prepares for a mark-compact GC.
void PrepareForMarkCompact();
@@ -2604,7 +2590,8 @@ class NewSpace : public SpaceWithLinearArea {
// Return the allocated bytes in the active semispace.
size_t Size() override {
DCHECK_GE(top(), to_space_.page_low());
- return to_space_.pages_used() * Page::kAllocatableMemory +
+ return to_space_.pages_used() *
+ MemoryChunkLayout::AllocatableMemoryInDataPage() +
static_cast<size_t>(top() - to_space_.page_low());
}
@@ -2614,7 +2601,7 @@ class NewSpace : public SpaceWithLinearArea {
size_t Capacity() {
SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return (to_space_.current_capacity() / Page::kPageSize) *
- Page::kAllocatableMemory;
+ MemoryChunkLayout::AllocatableMemoryInDataPage();
}
// Return the current size of a semispace, allocatable and non-allocatable
@@ -2669,7 +2656,7 @@ class NewSpace : public SpaceWithLinearArea {
}
while (current_page != last_page) {
DCHECK_NE(current_page, age_mark_page);
- allocated += Page::kAllocatableMemory;
+ allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
current_page = current_page->next_page();
}
DCHECK_GE(top(), current_page->area_start());
@@ -2701,13 +2688,17 @@ class NewSpace : public SpaceWithLinearArea {
}
void ResetOriginalTop() {
- DCHECK_GE(top(), original_top());
- DCHECK_LE(top(), original_limit());
- original_top_ = top();
+ DCHECK_GE(top(), original_top_);
+ DCHECK_LE(top(), original_limit_);
+ original_top_.store(top(), std::memory_order_release);
}
- Address original_top() { return original_top_; }
- Address original_limit() { return original_limit_; }
+ Address original_top_acquire() {
+ return original_top_.load(std::memory_order_acquire);
+ }
+ Address original_limit_relaxed() {
+ return original_limit_.load(std::memory_order_relaxed);
+ }
// Return the address of the first allocatable address in the active
// semispace. This may be the address where the first object resides.
@@ -2741,7 +2732,6 @@ class NewSpace : public SpaceWithLinearArea {
void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
inline bool ToSpaceContainsSlow(Address a);
- inline bool FromSpaceContainsSlow(Address a);
inline bool ToSpaceContains(Object* o);
inline bool FromSpaceContains(Object* o);
@@ -2874,6 +2864,11 @@ class OldSpace : public PagedSpace {
// Creates an old space object. The constructor does not allocate pages
// from OS.
explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
+
+ static bool IsAtPageStart(Address addr) {
+ return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
+ MemoryChunkLayout::ObjectStartOffsetInDataPage();
+ }
};
// -----------------------------------------------------------------------------
@@ -2886,7 +2881,6 @@ class CodeSpace : public PagedSpace {
explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
};
-
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
@@ -2940,6 +2934,10 @@ class ReadOnlySpace : public PagedSpace {
void ClearStringPaddingIfNeeded();
void MarkAsReadOnly();
+ // During boot the free_space_map is created, and afterwards we may need
+ // to write it into the free list nodes that were already created.
+ void RepairFreeListsAfterDeserialization();
+
private:
void MarkAsReadWrite();
void SetPermissionsForPages(PageAllocator::Permission access);
@@ -2954,9 +2952,7 @@ class ReadOnlySpace : public PagedSpace {
// -----------------------------------------------------------------------------
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
-// managed by the large object space. A large object is allocated from OS
-// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
-// A large object always starts at Page::kObjectStartOffset to a page.
+// managed by the large object space.
// Large objects do not move during garbage collections.
class LargeObjectSpace : public Space {
@@ -2971,13 +2967,8 @@ class LargeObjectSpace : public Space {
// Releases internal resources, frees objects in this space.
void TearDown();
- static size_t ObjectSizeFor(size_t chunk_size) {
- if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
- return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
- }
-
- V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
- Executability executable);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateRaw(int object_size);
// Available bytes for objects in this space.
size_t Available() override;
@@ -2994,9 +2985,6 @@ class LargeObjectSpace : public Space {
// The function iterates through all objects in this space, may be slow.
Object* FindObject(Address a);
- // Takes the chunk_map_mutex_ and calls FindPage after that.
- LargePage* FindPageThreadSafe(Address a);
-
// Finds a large object page containing the given address, returns nullptr
// if such a page doesn't exist.
LargePage* FindPage(Address a);
@@ -3049,12 +3037,14 @@ class LargeObjectSpace : public Space {
protected:
LargePage* AllocateLargePage(int object_size, Executability executable);
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
+ Executability executable);
- private:
size_t size_; // allocated bytes
int page_count_; // number of chunks
size_t objects_size_; // size of objects
+ private:
// The chunk_map_mutex_ has to be used when the chunk map is accessed
// concurrently.
base::Mutex chunk_map_mutex_;
@@ -3075,6 +3065,16 @@ class NewLargeObjectSpace : public LargeObjectSpace {
size_t Available() override;
void Flip();
+
+ void FreeAllObjects();
+};
+
+class CodeLargeObjectSpace : public LargeObjectSpace {
+ public:
+ explicit CodeLargeObjectSpace(Heap* heap);
+
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateRaw(int object_size);
};
class LargeObjectIterator : public ObjectIterator {
@@ -3089,9 +3089,9 @@ class LargeObjectIterator : public ObjectIterator {
// Iterates over the chunks (pages and large object pages) that can contain
// pointers to new space or to evacuation candidates.
-class MemoryChunkIterator {
+class OldGenerationMemoryChunkIterator {
public:
- inline explicit MemoryChunkIterator(Heap* heap);
+ inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
// Return nullptr when the iterator is done.
inline MemoryChunk* next();
@@ -3102,6 +3102,7 @@ class MemoryChunkIterator {
kMapState,
kCodeState,
kLargeObjectState,
+ kCodeLargeObjectState,
kFinishedState
};
Heap* heap_;
@@ -3110,6 +3111,7 @@ class MemoryChunkIterator {
PageIterator code_iterator_;
PageIterator map_iterator_;
LargePageIterator lo_iterator_;
+ LargePageIterator code_lo_iterator_;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/store-buffer.cc b/chromium/v8/src/heap/store-buffer.cc
index f737eb099d0..00ee993f2c5 100644
--- a/chromium/v8/src/heap/store-buffer.cc
+++ b/chromium/v8/src/heap/store-buffer.cc
@@ -6,6 +6,7 @@
#include <algorithm>
+#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/base/template-utils.h"
#include "src/counters.h"
@@ -32,9 +33,16 @@ StoreBuffer::StoreBuffer(Heap* heap)
void StoreBuffer::SetUp() {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- const size_t requested_size = kStoreBufferSize * kStoreBuffers;
+ // Round up the requested size in order to fulfill the VirtualMemory's
+ // requrements on the requested size alignment. This may cause a bit of
+ // memory wastage if the actual CommitPageSize() will be bigger than the
+ // kMinExpectedOSPageSize value but this is a trade-off for keeping the
+ // store buffer overflow check in write barriers cheap.
+ const size_t requested_size = RoundUp(kStoreBufferSize * kStoreBuffers,
+ page_allocator->CommitPageSize());
// Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
// use a bit test to detect the ends of the buffers.
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kStoreBufferSize));
const size_t alignment =
std::max<size_t>(kStoreBufferSize, page_allocator->AllocatePageSize());
void* hint = AlignedAddress(heap_->GetRandomMmapAddr(), alignment);
@@ -133,7 +141,7 @@ int StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
}
void StoreBuffer::FlipStoreBuffers() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
int other = (current_ + 1) % kStoreBuffers;
MoveEntriesToRememberedSet(other);
lazy_top_[current_] = top_;
@@ -155,7 +163,7 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
// We are taking the chunk map mutex here because the page lookup of addr
// below may require us to check if addr is part of a large page.
- base::LockGuard<base::Mutex> guard(heap_->lo_space()->chunk_map_mutex());
+ base::MutexGuard guard(heap_->lo_space()->chunk_map_mutex());
for (Address* current = start_[index]; current < lazy_top_[index];
current++) {
Address addr = *current;
@@ -184,7 +192,7 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
}
void StoreBuffer::MoveAllEntriesToRememberedSet() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
int other = (current_ + 1) % kStoreBuffers;
MoveEntriesToRememberedSet(other);
lazy_top_[current_] = top_;
@@ -193,7 +201,7 @@ void StoreBuffer::MoveAllEntriesToRememberedSet() {
}
void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
int other = (current_ + 1) % kStoreBuffers;
MoveEntriesToRememberedSet(other);
task_running_ = false;
diff --git a/chromium/v8/src/heap/store-buffer.h b/chromium/v8/src/heap/store-buffer.h
index 4dbb471b7ab..3f58b6cf29b 100644
--- a/chromium/v8/src/heap/store-buffer.h
+++ b/chromium/v8/src/heap/store-buffer.h
@@ -27,9 +27,11 @@ class StoreBuffer {
public:
enum StoreBufferMode { IN_GC, NOT_IN_GC };
- static const int kStoreBufferSize = 1 << (11 + kPointerSizeLog2);
- static const int kStoreBufferMask = kStoreBufferSize - 1;
static const int kStoreBuffers = 2;
+ static const int kStoreBufferSize =
+ Max(static_cast<int>(kMinExpectedOSPageSize / kStoreBuffers),
+ 1 << (11 + kPointerSizeLog2));
+ static const int kStoreBufferMask = kStoreBufferSize - 1;
static const intptr_t kDeletionTag = 1;
V8_EXPORT_PRIVATE static int StoreBufferOverflow(Isolate* isolate);
diff --git a/chromium/v8/src/heap/sweeper.cc b/chromium/v8/src/heap/sweeper.cc
index 4f5ad18bec5..65643c518bd 100644
--- a/chromium/v8/src/heap/sweeper.cc
+++ b/chromium/v8/src/heap/sweeper.cc
@@ -15,6 +15,20 @@
namespace v8 {
namespace internal {
+Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
+ : heap_(heap),
+ marking_state_(marking_state),
+ num_tasks_(0),
+ pending_sweeper_tasks_semaphore_(0),
+ incremental_sweeper_pending_(false),
+ sweeping_in_progress_(false),
+ num_sweeping_tasks_(0),
+ stop_sweeper_tasks_(false),
+ iterability_task_semaphore_(0),
+ iterability_in_progress_(false),
+ iterability_task_started_(false),
+ should_reduce_memory_(false) {}
+
Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
: sweeper_(sweeper) {
sweeper_->stop_sweeper_tasks_ = true;
@@ -136,6 +150,7 @@ void Sweeper::StartSweeping() {
CHECK(!stop_sweeper_tasks_);
sweeping_in_progress_ = true;
iterability_in_progress_ = true;
+ should_reduce_memory_ = heap_->ShouldReduceMemory();
MajorNonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
@@ -181,7 +196,7 @@ void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
}
Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
if (!list.empty()) {
auto last_page = list.back();
@@ -196,7 +211,7 @@ void Sweeper::AbortAndWaitForTasks() {
for (int i = 0; i < num_tasks_; i++) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
- CancelableTaskManager::kTaskAborted) {
+ TryAbortResult::kTaskAborted) {
pending_sweeper_tasks_semaphore_.Wait();
} else {
// Aborted case.
@@ -249,7 +264,6 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
ArrayBufferTracker::FreeDead(p, marking_state_);
Address free_start = p->area_start();
- DCHECK_EQ(0, free_start % (32 * kPointerSize));
// If we use the skip list for code space pages, we have to lock the skip
// list because it could be accessed concurrently by the runtime or the
@@ -290,6 +304,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
free_start, static_cast<int>(size), ClearRecordedSlots::kNo,
ClearFreedMemoryMode::kClearFreedMemory);
}
+ if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
@@ -300,7 +315,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
static_cast<uint32_t>(free_end - p->address())));
}
}
- Map* map = object->synchronized_map();
+ Map map = object->synchronized_map();
int size = object->SizeFromMap(map);
live_bytes += size;
if (rebuild_skip_list) {
@@ -330,7 +345,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
ClearRecordedSlots::kNo,
ClearFreedMemoryMode::kClearFreedMemory);
}
-
+ if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
SlotSet::KEEP_EMPTY_BUCKETS);
RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
@@ -346,11 +361,11 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
if (!free_ranges.empty()) {
TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
if (old_to_new != nullptr) {
- old_to_new->RemoveInvaldSlots(free_ranges);
+ old_to_new->ClearInvalidSlots(free_ranges);
}
TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
if (old_to_old != nullptr) {
- old_to_old->RemoveInvaldSlots(free_ranges);
+ old_to_old->ClearInvalidSlots(free_ranges);
}
}
@@ -411,7 +426,7 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
DCHECK(IsValidSweepingSpace(identity));
int max_freed = 0;
{
- base::LockGuard<base::Mutex> guard(page->mutex());
+ base::MutexGuard guard(page->mutex());
// If this page was already swept in the meantime, we can return here.
if (page->SweepingDone()) return 0;
@@ -438,7 +453,7 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
}
{
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
}
return max_freed;
@@ -457,7 +472,7 @@ void Sweeper::ScheduleIncrementalSweepingTask() {
void Sweeper::AddPage(AllocationSpace space, Page* page,
Sweeper::AddPageMode mode) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
DCHECK(IsValidSweepingSpace(space));
DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
if (mode == Sweeper::REGULAR) {
@@ -483,7 +498,7 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
}
Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
- base::LockGuard<base::Mutex> guard(&mutex_);
+ base::MutexGuard guard(&mutex_);
DCHECK(IsValidSweepingSpace(space));
int space_index = GetSweepSpaceIndex(space);
Page* page = nullptr;
@@ -509,7 +524,7 @@ void Sweeper::EnsureIterabilityCompleted() {
if (FLAG_concurrent_sweeping && iterability_task_started_) {
if (heap_->isolate()->cancelable_task_manager()->TryAbort(
- iterability_task_id_) != CancelableTaskManager::kTaskAborted) {
+ iterability_task_id_) != TryAbortResult::kTaskAborted) {
iterability_task_semaphore_.Wait();
}
iterability_task_started_ = false;
diff --git a/chromium/v8/src/heap/sweeper.h b/chromium/v8/src/heap/sweeper.h
index 90a429b3eac..ff806a0af62 100644
--- a/chromium/v8/src/heap/sweeper.h
+++ b/chromium/v8/src/heap/sweeper.h
@@ -77,18 +77,7 @@ class Sweeper {
};
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
- Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
- : heap_(heap),
- marking_state_(marking_state),
- num_tasks_(0),
- pending_sweeper_tasks_semaphore_(0),
- incremental_sweeper_pending_(false),
- sweeping_in_progress_(false),
- num_sweeping_tasks_(0),
- stop_sweeper_tasks_(false),
- iterability_task_semaphore_(0),
- iterability_in_progress_(false),
- iterability_task_started_(false) {}
+ Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state);
bool sweeping_in_progress() const { return sweeping_in_progress_; }
@@ -196,6 +185,7 @@ class Sweeper {
base::Semaphore iterability_task_semaphore_;
bool iterability_in_progress_;
bool iterability_task_started_;
+ bool should_reduce_memory_;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/worklist.h b/chromium/v8/src/heap/worklist.h
index db6e572df7d..c086b87e599 100644
--- a/chromium/v8/src/heap/worklist.h
+++ b/chromium/v8/src/heap/worklist.h
@@ -62,6 +62,7 @@ class Worklist {
Worklist() : Worklist(kMaxNumTasks) {}
explicit Worklist(int num_tasks) : num_tasks_(num_tasks) {
+ DCHECK_LE(num_tasks, kMaxNumTasks);
for (int i = 0; i < num_tasks_; i++) {
private_push_segment(i) = NewSegment();
private_pop_segment(i) = NewSegment();
@@ -282,13 +283,13 @@ class Worklist {
}
V8_INLINE void Push(Segment* segment) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
segment->set_next(top_);
set_top(segment);
}
V8_INLINE bool Pop(Segment** segment) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (top_ != nullptr) {
*segment = top_;
set_top(top_->next());
@@ -302,7 +303,7 @@ class Worklist {
}
void Clear() {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
Segment* current = top_;
while (current != nullptr) {
Segment* tmp = current;
@@ -315,7 +316,7 @@ class Worklist {
// See Worklist::Update.
template <typename Callback>
void Update(Callback callback) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
Segment* prev = nullptr;
Segment* current = top_;
while (current != nullptr) {
@@ -339,7 +340,7 @@ class Worklist {
// See Worklist::Iterate.
template <typename Callback>
void Iterate(Callback callback) {
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
for (Segment* current = top_; current != nullptr;
current = current->next()) {
current->Iterate(callback);
@@ -349,7 +350,7 @@ class Worklist {
std::pair<Segment*, Segment*> Extract() {
Segment* top = nullptr;
{
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
if (top_ == nullptr) return std::make_pair(nullptr, nullptr);
top = top_;
set_top(nullptr);
@@ -362,7 +363,7 @@ class Worklist {
void MergeList(Segment* start, Segment* end) {
if (start == nullptr) return;
{
- base::LockGuard<base::Mutex> guard(&lock_);
+ base::MutexGuard guard(&lock_);
end->set_next(top_);
set_top(start);
}