summaryrefslogtreecommitdiff
path: root/chromium/v8/src/heap
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/heap')
-rw-r--r--chromium/v8/src/heap/array-buffer-tracker.h6
-rw-r--r--chromium/v8/src/heap/code-stats.cc2
-rw-r--r--chromium/v8/src/heap/concurrent-marking.cc27
-rw-r--r--chromium/v8/src/heap/concurrent-marking.h2
-rw-r--r--chromium/v8/src/heap/embedder-tracing.h4
-rw-r--r--chromium/v8/src/heap/factory-inl.h32
-rw-r--r--chromium/v8/src/heap/factory.cc737
-rw-r--r--chromium/v8/src/heap/factory.h315
-rw-r--r--chromium/v8/src/heap/gc-tracer.cc40
-rw-r--r--chromium/v8/src/heap/gc-tracer.h2
-rw-r--r--chromium/v8/src/heap/heap-inl.h20
-rw-r--r--chromium/v8/src/heap/heap-write-barrier-inl.h27
-rw-r--r--chromium/v8/src/heap/heap-write-barrier.h3
-rw-r--r--chromium/v8/src/heap/heap.cc226
-rw-r--r--chromium/v8/src/heap/heap.h238
-rw-r--r--chromium/v8/src/heap/incremental-marking.cc31
-rw-r--r--chromium/v8/src/heap/invalidated-slots.h2
-rw-r--r--chromium/v8/src/heap/mark-compact-inl.h14
-rw-r--r--chromium/v8/src/heap/mark-compact.cc205
-rw-r--r--chromium/v8/src/heap/mark-compact.h19
-rw-r--r--chromium/v8/src/heap/marking.h2
-rw-r--r--chromium/v8/src/heap/object-stats.cc164
-rw-r--r--chromium/v8/src/heap/object-stats.h23
-rw-r--r--chromium/v8/src/heap/objects-visiting-inl.h1
-rw-r--r--chromium/v8/src/heap/objects-visiting.cc2
-rw-r--r--chromium/v8/src/heap/read-only-heap.cc63
-rw-r--r--chromium/v8/src/heap/read-only-heap.h38
-rw-r--r--chromium/v8/src/heap/scavenger-inl.h15
-rw-r--r--chromium/v8/src/heap/scavenger.cc129
-rw-r--r--chromium/v8/src/heap/scavenger.h17
-rw-r--r--chromium/v8/src/heap/setup-heap-internal.cc115
-rw-r--r--chromium/v8/src/heap/slot-set.h7
-rw-r--r--chromium/v8/src/heap/spaces-inl.h6
-rw-r--r--chromium/v8/src/heap/spaces.cc30
-rw-r--r--chromium/v8/src/heap/spaces.h147
-rw-r--r--chromium/v8/src/heap/sweeper.h8
-rw-r--r--chromium/v8/src/heap/worklist.h2
37 files changed, 1675 insertions, 1046 deletions
diff --git a/chromium/v8/src/heap/array-buffer-tracker.h b/chromium/v8/src/heap/array-buffer-tracker.h
index 3d1c2cab76f..dc29b95f368 100644
--- a/chromium/v8/src/heap/array-buffer-tracker.h
+++ b/chromium/v8/src/heap/array-buffer-tracker.h
@@ -53,7 +53,7 @@ class ArrayBufferTracker : public AllStatic {
static bool ProcessBuffers(Page* page, ProcessingMode mode);
// Returns whether a buffer is currently tracked.
- static bool IsTracked(JSArrayBuffer buffer);
+ V8_EXPORT_PRIVATE static bool IsTracked(JSArrayBuffer buffer);
// Tears down the tracker and frees up all registered array buffers.
static void TearDown(Heap* heap);
@@ -110,8 +110,8 @@ class LocalArrayBufferTracker {
// HeapNumber. The reason for tracking the length is that in the case of
// length being a HeapNumber, the buffer and its length may be stored on
// different memory pages, making it impossible to guarantee order of freeing.
- typedef std::unordered_map<JSArrayBuffer, JSArrayBuffer::Allocation, Hasher>
- TrackingData;
+ using TrackingData =
+ std::unordered_map<JSArrayBuffer, JSArrayBuffer::Allocation, Hasher>;
// Internal version of add that does not update counters. Requires separate
// logic for updating external memory counters.
diff --git a/chromium/v8/src/heap/code-stats.cc b/chromium/v8/src/heap/code-stats.cc
index 6c20e699fc6..710ebd4fa15 100644
--- a/chromium/v8/src/heap/code-stats.cc
+++ b/chromium/v8/src/heap/code-stats.cc
@@ -202,7 +202,7 @@ void CodeStatistics::CollectCodeCommentStatistics(HeapObject obj,
}
Code code = Code::cast(obj);
- CodeCommentsIterator cit(code->code_comments());
+ CodeCommentsIterator cit(code->code_comments(), code->code_comments_size());
int delta = 0;
int prev_pc_offset = 0;
while (cit.HasCurrent()) {
diff --git a/chromium/v8/src/heap/concurrent-marking.cc b/chromium/v8/src/heap/concurrent-marking.cc
index c993eadea0f..42f0f9f5625 100644
--- a/chromium/v8/src/heap/concurrent-marking.cc
+++ b/chromium/v8/src/heap/concurrent-marking.cc
@@ -93,7 +93,11 @@ class ConcurrentMarkingVisitor final
task_id_(task_id),
embedder_tracing_enabled_(embedder_tracing_enabled),
mark_compact_epoch_(mark_compact_epoch),
- is_forced_gc_(is_forced_gc) {}
+ is_forced_gc_(is_forced_gc) {
+ // It is not safe to access flags from concurrent marking visitor. So
+ // set the bytecode flush mode based on the flags here
+ bytecode_flush_mode_ = Heap::GetBytecodeFlushMode();
+ }
template <typename T>
static V8_INLINE T Cast(HeapObject object) {
@@ -228,8 +232,7 @@ class ConcurrentMarkingVisitor final
if (marking_state_.IsBlackOrGrey(target)) {
// Record the slot inside the JSWeakRef, since the
// VisitJSObjectSubclass above didn't visit it.
- ObjectSlot slot =
- HeapObject::RawField(weak_ref, JSWeakRef::kTargetOffset);
+ ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
MarkCompactCollector::RecordSlot(weak_ref, slot, target);
} else {
// JSWeakRef points to a potentially dead object. We have to process
@@ -251,8 +254,7 @@ class ConcurrentMarkingVisitor final
if (marking_state_.IsBlackOrGrey(target)) {
// Record the slot inside the WeakCell, since the IterateBody above
// didn't visit it.
- ObjectSlot slot =
- HeapObject::RawField(weak_cell, WeakCell::kTargetOffset);
+ ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
MarkCompactCollector::RecordSlot(weak_cell, slot, target);
} else {
// WeakCell points to a potentially dead object. We have to process
@@ -340,8 +342,7 @@ class ConcurrentMarkingVisitor final
int start = static_cast<int>(current_progress_bar);
int end = Min(size, start + kProgressBarScanningChunk);
if (start < end) {
- VisitPointers(object, HeapObject::RawField(object, start),
- HeapObject::RawField(object, end));
+ VisitPointers(object, object.RawField(start), object.RawField(end));
// Setting the progress bar can fail if the object that is currently
// scanned is also revisited. In this case, there may be two tasks racing
// on the progress counter. The looser can bail out because the progress
@@ -382,7 +383,7 @@ class ConcurrentMarkingVisitor final
// If the SharedFunctionInfo has old bytecode, mark it as flushable,
// otherwise visit the function data field strongly.
- if (shared_info->ShouldFlushBytecode()) {
+ if (shared_info->ShouldFlushBytecode(bytecode_flush_mode_)) {
weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
} else {
VisitPointer(shared_info, shared_info->RawField(
@@ -406,7 +407,8 @@ class ConcurrentMarkingVisitor final
int size = VisitJSObjectSubclass(map, object);
// Check if the JSFunction needs reset due to bytecode being flushed.
- if (object->NeedsResetDueToFlushedBytecode()) {
+ if (bytecode_flush_mode_ != BytecodeFlushMode::kDoNotFlushBytecode &&
+ object->NeedsResetDueToFlushedBytecode()) {
weak_objects_->flushed_js_functions.Push(task_id_, object);
}
@@ -520,7 +522,7 @@ class ConcurrentMarkingVisitor final
// Implements ephemeron semantics: Marks value if key is already reachable.
// Returns true if value was actually marked.
- bool VisitEphemeron(HeapObject key, HeapObject value) {
+ bool ProcessEphemeron(HeapObject key, HeapObject value) {
if (marking_state_.IsBlackOrGrey(key)) {
if (marking_state_.WhiteToGrey(value)) {
shared_.Push(value);
@@ -691,6 +693,7 @@ class ConcurrentMarkingVisitor final
bool embedder_tracing_enabled_;
const unsigned mark_compact_epoch_;
bool is_forced_gc_;
+ BytecodeFlushMode bytecode_flush_mode_;
};
// Strings can change maps due to conversion to thin string or external strings.
@@ -788,7 +791,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
Ephemeron ephemeron;
while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
- if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
+ if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
}
@@ -833,7 +836,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
Ephemeron ephemeron;
while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
- if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
+ if (visitor.ProcessEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
}
diff --git a/chromium/v8/src/heap/concurrent-marking.h b/chromium/v8/src/heap/concurrent-marking.h
index 9b6b5f4a75b..7fbc4450613 100644
--- a/chromium/v8/src/heap/concurrent-marking.h
+++ b/chromium/v8/src/heap/concurrent-marking.h
@@ -33,7 +33,7 @@ struct MemoryChunkData {
using MemoryChunkDataMap =
std::unordered_map<MemoryChunk*, MemoryChunkData, MemoryChunk::Hasher>;
-class ConcurrentMarking {
+class V8_EXPORT_PRIVATE ConcurrentMarking {
public:
// When the scope is entered, the concurrent marking tasks
// are preempted and are not looking at the heap objects, concurrent marking
diff --git a/chromium/v8/src/heap/embedder-tracing.h b/chromium/v8/src/heap/embedder-tracing.h
index 1102c8f2afe..1b305526609 100644
--- a/chromium/v8/src/heap/embedder-tracing.h
+++ b/chromium/v8/src/heap/embedder-tracing.h
@@ -17,8 +17,8 @@ class JSObject;
class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
public:
- typedef std::pair<void*, void*> WrapperInfo;
- typedef std::vector<WrapperInfo> WrapperCache;
+ using WrapperInfo = std::pair<void*, void*>;
+ using WrapperCache = std::vector<WrapperInfo>;
class V8_EXPORT_PRIVATE ProcessingScope {
public:
diff --git a/chromium/v8/src/heap/factory-inl.h b/chromium/v8/src/heap/factory-inl.h
index 25cbd06a7c9..dce99498dac 100644
--- a/chromium/v8/src/heap/factory-inl.h
+++ b/chromium/v8/src/heap/factory-inl.h
@@ -44,64 +44,64 @@ Handle<String> Factory::NewSubString(Handle<String> str, int begin, int end) {
}
Handle<Object> Factory::NewNumberFromSize(size_t value,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
// We can't use Smi::IsValid() here because that operates on a signed
// intptr_t, and casting from size_t could create a bogus sign bit.
if (value <= static_cast<size_t>(Smi::kMaxValue)) {
return Handle<Object>(Smi::FromIntptr(static_cast<intptr_t>(value)),
isolate());
}
- return NewNumber(static_cast<double>(value), pretenure);
+ return NewNumber(static_cast<double>(value), allocation);
}
Handle<Object> Factory::NewNumberFromInt64(int64_t value,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
if (value <= std::numeric_limits<int32_t>::max() &&
value >= std::numeric_limits<int32_t>::min() &&
Smi::IsValid(static_cast<int32_t>(value))) {
return Handle<Object>(Smi::FromInt(static_cast<int32_t>(value)), isolate());
}
- return NewNumber(static_cast<double>(value), pretenure);
+ return NewNumber(static_cast<double>(value), allocation);
}
Handle<HeapNumber> Factory::NewHeapNumber(double value,
- PretenureFlag pretenure) {
- Handle<HeapNumber> heap_number = NewHeapNumber(pretenure);
+ AllocationType allocation) {
+ Handle<HeapNumber> heap_number = NewHeapNumber(allocation);
heap_number->set_value(value);
return heap_number;
}
Handle<MutableHeapNumber> Factory::NewMutableHeapNumber(
- double value, PretenureFlag pretenure) {
- Handle<MutableHeapNumber> number = NewMutableHeapNumber(pretenure);
+ double value, AllocationType allocation) {
+ Handle<MutableHeapNumber> number = NewMutableHeapNumber(allocation);
number->set_value(value);
return number;
}
Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits,
- PretenureFlag pretenure) {
- Handle<HeapNumber> heap_number = NewHeapNumber(pretenure);
+ AllocationType allocation) {
+ Handle<HeapNumber> heap_number = NewHeapNumber(allocation);
heap_number->set_value_as_bits(bits);
return heap_number;
}
Handle<MutableHeapNumber> Factory::NewMutableHeapNumberFromBits(
- uint64_t bits, PretenureFlag pretenure) {
- Handle<MutableHeapNumber> number = NewMutableHeapNumber(pretenure);
+ uint64_t bits, AllocationType allocation) {
+ Handle<MutableHeapNumber> number = NewMutableHeapNumber(allocation);
number->set_value_as_bits(bits);
return number;
}
Handle<MutableHeapNumber> Factory::NewMutableHeapNumberWithHoleNaN(
- PretenureFlag pretenure) {
- return NewMutableHeapNumberFromBits(kHoleNanInt64, pretenure);
+ AllocationType allocation) {
+ return NewMutableHeapNumberFromBits(kHoleNanInt64, allocation);
}
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
ElementsKind elements_kind,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
return NewJSArrayWithElements(elements, elements_kind, elements->length(),
- pretenure);
+ allocation);
}
Handle<Object> Factory::NewURIError() {
diff --git a/chromium/v8/src/heap/factory.cc b/chromium/v8/src/heap/factory.cc
index 6dea2cae319..72737bdaf79 100644
--- a/chromium/v8/src/heap/factory.cc
+++ b/chromium/v8/src/heap/factory.cc
@@ -18,6 +18,7 @@
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
+#include "src/heap/read-only-heap.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
@@ -130,26 +131,26 @@ void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
} // namespace
HeapObject Factory::AllocateRawWithImmortalMap(int size,
- PretenureFlag pretenure, Map map,
+ AllocationType allocation,
+ Map map,
AllocationAlignment alignment) {
- AllocationType type = Heap::SelectType(Heap::SelectSpace(pretenure));
- HeapObject result =
- isolate()->heap()->AllocateRawWithRetryOrFail(size, type, alignment);
+ HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(
+ size, allocation, alignment);
result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
return result;
}
HeapObject Factory::AllocateRawWithAllocationSite(
- Handle<Map> map, PretenureFlag pretenure,
+ Handle<Map> map, AllocationType allocation,
Handle<AllocationSite> allocation_site) {
DCHECK(map->instance_type() != MAP_TYPE);
- AllocationType type = Heap::SelectType(Heap::SelectSpace(pretenure));
int size = map->instance_size();
if (!allocation_site.is_null()) size += AllocationMemento::kSize;
- AllocationSpace space = Heap::SelectSpace(pretenure);
- HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(size, type);
- WriteBarrierMode write_barrier_mode =
- space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ HeapObject result =
+ isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
+ WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
+ ? SKIP_WRITE_BARRIER
+ : UPDATE_WRITE_BARRIER;
result->set_map_after_allocation(*map, write_barrier_mode);
if (!allocation_site.is_null()) {
AllocationMemento alloc_memento = AllocationMemento::unchecked_cast(
@@ -169,9 +170,9 @@ void Factory::InitializeAllocationMemento(AllocationMemento memento,
}
}
-HeapObject Factory::AllocateRawArray(int size, PretenureFlag pretenure) {
- AllocationType type = Heap::SelectType(Heap::SelectSpace(pretenure));
- HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(size, type);
+HeapObject Factory::AllocateRawArray(int size, AllocationType allocation) {
+ HeapObject result =
+ isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
@@ -179,28 +180,29 @@ HeapObject Factory::AllocateRawArray(int size, PretenureFlag pretenure) {
return result;
}
-HeapObject Factory::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
+HeapObject Factory::AllocateRawFixedArray(int length,
+ AllocationType allocation) {
if (length < 0 || length > FixedArray::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
- return AllocateRawArray(FixedArray::SizeFor(length), pretenure);
+ return AllocateRawArray(FixedArray::SizeFor(length), allocation);
}
HeapObject Factory::AllocateRawWeakArrayList(int capacity,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
if (capacity < 0 || capacity > WeakArrayList::kMaxCapacity) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
- return AllocateRawArray(WeakArrayList::SizeForCapacity(capacity), pretenure);
+ return AllocateRawArray(WeakArrayList::SizeForCapacity(capacity), allocation);
}
-HeapObject Factory::New(Handle<Map> map, PretenureFlag pretenure) {
+HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
- AllocationType type = Heap::SelectType(Heap::SelectSpace(pretenure));
- HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(size, type);
+ HeapObject result =
+ isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
// New space objects are allocated white.
- WriteBarrierMode write_barrier_mode = type == AllocationType::kYoung
+ WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
result->set_map_after_allocation(*map, write_barrier_mode);
@@ -208,22 +210,18 @@ HeapObject Factory::New(Handle<Map> map, PretenureFlag pretenure) {
}
Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
- AllocationSpace space) {
+ AllocationType allocation) {
AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
Heap* heap = isolate()->heap();
- HeapObject result = heap->AllocateRawWithRetryOrFail(
- size, Heap::SelectType(space), alignment);
-#ifdef DEBUG
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
- DCHECK(chunk->owner()->identity() == space);
-#endif
+ HeapObject result =
+ heap->AllocateRawWithRetryOrFail(size, allocation, alignment);
heap->CreateFillerObjectAt(result->address(), size, ClearRecordedSlots::kNo);
return Handle<HeapObject>(result, isolate());
}
Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
- Handle<PrototypeInfo> result =
- Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE, TENURED));
+ Handle<PrototypeInfo> result = Handle<PrototypeInfo>::cast(
+ NewStruct(PROTOTYPE_INFO_TYPE, AllocationType::kOld));
result->set_prototype_users(Smi::kZero);
result->set_registry_slot(PrototypeInfo::UNREGISTERED);
result->set_bit_field(0);
@@ -233,13 +231,17 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<EnumCache> Factory::NewEnumCache(Handle<FixedArray> keys,
Handle<FixedArray> indices) {
- return Handle<EnumCache>::cast(NewTuple2(keys, indices, TENURED));
+ Handle<EnumCache> result = Handle<EnumCache>::cast(
+ NewStruct(ENUM_CACHE_TYPE, AllocationType::kOld));
+ result->set_keys(*keys);
+ result->set_indices(*indices);
+ return result;
}
Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
Handle<Tuple2> result =
- Handle<Tuple2>::cast(NewStruct(TUPLE2_TYPE, pretenure));
+ Handle<Tuple2>::cast(NewStruct(TUPLE2_TYPE, allocation));
result->set_value1(*value1);
result->set_value2(*value2);
return result;
@@ -247,9 +249,9 @@ Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
Handle<Tuple3> Factory::NewTuple3(Handle<Object> value1, Handle<Object> value2,
Handle<Object> value3,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
Handle<Tuple3> result =
- Handle<Tuple3>::cast(NewStruct(TUPLE3_TYPE, pretenure));
+ Handle<Tuple3>::cast(NewStruct(TUPLE3_TYPE, allocation));
result->set_value1(*value1);
result->set_value2(*value2);
result->set_value3(*value3);
@@ -260,7 +262,7 @@ Handle<ArrayBoilerplateDescription> Factory::NewArrayBoilerplateDescription(
ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) {
Handle<ArrayBoilerplateDescription> result =
Handle<ArrayBoilerplateDescription>::cast(
- NewStruct(ARRAY_BOILERPLATE_DESCRIPTION_TYPE, TENURED));
+ NewStruct(ARRAY_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld));
result->set_elements_kind(elements_kind);
result->set_constant_elements(*constant_values);
return result;
@@ -271,7 +273,8 @@ Handle<TemplateObjectDescription> Factory::NewTemplateObjectDescription(
DCHECK_EQ(raw_strings->length(), cooked_strings->length());
DCHECK_LT(0, raw_strings->length());
Handle<TemplateObjectDescription> result =
- Handle<TemplateObjectDescription>::cast(NewStruct(TUPLE2_TYPE, TENURED));
+ Handle<TemplateObjectDescription>::cast(
+ NewStruct(TUPLE2_TYPE, AllocationType::kOld));
result->set_raw_strings(*raw_strings);
result->set_cooked_strings(*cooked_strings);
return result;
@@ -280,23 +283,23 @@ Handle<TemplateObjectDescription> Factory::NewTemplateObjectDescription(
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number,
const char* type_of, byte kind,
- PretenureFlag pretenure) {
- Handle<Oddball> oddball(Oddball::cast(New(map, pretenure)), isolate());
+ AllocationType allocation) {
+ Handle<Oddball> oddball(Oddball::cast(New(map, allocation)), isolate());
Oddball::Initialize(isolate(), oddball, to_string, to_number, type_of, kind);
return oddball;
}
-Handle<Oddball> Factory::NewSelfReferenceMarker(PretenureFlag pretenure) {
+Handle<Oddball> Factory::NewSelfReferenceMarker(AllocationType allocation) {
return NewOddball(self_reference_marker_map(), "self_reference_marker",
handle(Smi::FromInt(-1), isolate()), "undefined",
- Oddball::kSelfReferenceMarker, pretenure);
+ Oddball::kSelfReferenceMarker, allocation);
}
Handle<PropertyArray> Factory::NewPropertyArray(int length,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
DCHECK_LE(0, length);
if (length == 0) return empty_property_array();
- HeapObject result = AllocateRawFixedArray(length, pretenure);
+ HeapObject result = AllocateRawFixedArray(length, allocation);
result->set_map_after_allocation(*property_array_map(), SKIP_WRITE_BARRIER);
Handle<PropertyArray> array(PropertyArray::cast(result), isolate());
array->initialize_length(length);
@@ -306,8 +309,8 @@ Handle<PropertyArray> Factory::NewPropertyArray(int length,
Handle<FixedArray> Factory::NewFixedArrayWithFiller(RootIndex map_root_index,
int length, Object filler,
- PretenureFlag pretenure) {
- HeapObject result = AllocateRawFixedArray(length, pretenure);
+ AllocationType allocation) {
+ HeapObject result = AllocateRawFixedArray(length, allocation);
DCHECK(RootsTable::IsImmortalImmovable(map_root_index));
Map map = Map::cast(isolate()->root(map_root_index));
result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
@@ -319,20 +322,20 @@ Handle<FixedArray> Factory::NewFixedArrayWithFiller(RootIndex map_root_index,
template <typename T>
Handle<T> Factory::NewFixedArrayWithMap(RootIndex map_root_index, int length,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
static_assert(std::is_base_of<FixedArray, T>::value,
"T must be a descendant of FixedArray");
// Zero-length case must be handled outside, where the knowledge about
// the map is.
DCHECK_LT(0, length);
return Handle<T>::cast(NewFixedArrayWithFiller(
- map_root_index, length, *undefined_value(), pretenure));
+ map_root_index, length, *undefined_value(), allocation));
}
template <typename T>
Handle<T> Factory::NewWeakFixedArrayWithMap(RootIndex map_root_index,
int length,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
static_assert(std::is_base_of<WeakFixedArray, T>::value,
"T must be a descendant of WeakFixedArray");
@@ -340,7 +343,7 @@ Handle<T> Factory::NewWeakFixedArrayWithMap(RootIndex map_root_index,
DCHECK_LT(0, length);
HeapObject result =
- AllocateRawArray(WeakFixedArray::SizeFor(length), pretenure);
+ AllocateRawArray(WeakFixedArray::SizeFor(length), allocation);
Map map = Map::cast(isolate()->root(map_root_index));
result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
@@ -352,21 +355,22 @@ Handle<T> Factory::NewWeakFixedArrayWithMap(RootIndex map_root_index,
}
template Handle<FixedArray> Factory::NewFixedArrayWithMap<FixedArray>(
- RootIndex, int, PretenureFlag);
+ RootIndex, int, AllocationType allocation);
-Handle<FixedArray> Factory::NewFixedArray(int length, PretenureFlag pretenure) {
+Handle<FixedArray> Factory::NewFixedArray(int length,
+ AllocationType allocation) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
return NewFixedArrayWithFiller(RootIndex::kFixedArrayMap, length,
- *undefined_value(), pretenure);
+ *undefined_value(), allocation);
}
Handle<WeakFixedArray> Factory::NewWeakFixedArray(int length,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
DCHECK_LE(0, length);
if (length == 0) return empty_weak_fixed_array();
HeapObject result =
- AllocateRawArray(WeakFixedArray::SizeFor(length), pretenure);
+ AllocateRawArray(WeakFixedArray::SizeFor(length), allocation);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kWeakFixedArrayMap));
result->set_map_after_allocation(*weak_fixed_array_map(), SKIP_WRITE_BARRIER);
Handle<WeakFixedArray> array(WeakFixedArray::cast(result), isolate());
@@ -375,16 +379,14 @@ Handle<WeakFixedArray> Factory::NewWeakFixedArray(int length,
return array;
}
-MaybeHandle<FixedArray> Factory::TryNewFixedArray(int length,
- PretenureFlag pretenure) {
+MaybeHandle<FixedArray> Factory::TryNewFixedArray(
+ int length, AllocationType allocation_type) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
int size = FixedArray::SizeFor(length);
- AllocationSpace space = Heap::SelectSpace(pretenure);
- AllocationType type = Heap::SelectType(space);
Heap* heap = isolate()->heap();
- AllocationResult allocation = heap->AllocateRaw(size, type);
+ AllocationResult allocation = heap->AllocateRaw(size, allocation_type);
HeapObject result;
if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
@@ -400,15 +402,15 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(int length,
}
Handle<FixedArray> Factory::NewFixedArrayWithHoles(int length,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
return NewFixedArrayWithFiller(RootIndex::kFixedArrayMap, length,
- *the_hole_value(), pretenure);
+ *the_hole_value(), allocation);
}
Handle<FixedArray> Factory::NewUninitializedFixedArray(
- int length, PretenureFlag pretenure) {
+ int length, AllocationType allocation) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
@@ -416,17 +418,30 @@ Handle<FixedArray> Factory::NewUninitializedFixedArray(
// array. After getting canary/performance coverage, either remove the
// function or revert to returning uninitilized array.
return NewFixedArrayWithFiller(RootIndex::kFixedArrayMap, length,
- *undefined_value(), pretenure);
+ *undefined_value(), allocation);
+}
+
+Handle<ClosureFeedbackCellArray> Factory::NewClosureFeedbackCellArray(
+ int length, AllocationType allocation) {
+ if (length == 0) return empty_closure_feedback_cell_array();
+
+ Handle<ClosureFeedbackCellArray> feedback_cell_array =
+ NewFixedArrayWithMap<ClosureFeedbackCellArray>(
+ RootIndex::kClosureFeedbackCellArrayMap, length, allocation);
+
+ return feedback_cell_array;
}
Handle<FeedbackVector> Factory::NewFeedbackVector(
- Handle<SharedFunctionInfo> shared, PretenureFlag pretenure) {
+ Handle<SharedFunctionInfo> shared,
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array,
+ AllocationType allocation) {
int length = shared->feedback_metadata()->slot_count();
DCHECK_LE(0, length);
int size = FeedbackVector::SizeFor(length);
HeapObject result =
- AllocateRawWithImmortalMap(size, pretenure, *feedback_vector_map());
+ AllocateRawWithImmortalMap(size, allocation, *feedback_vector_map());
Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate());
vector->set_shared_function_info(*shared);
vector->set_optimized_code_weak_or_smi(MaybeObject::FromSmi(Smi::FromEnum(
@@ -436,18 +451,20 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
vector->set_invocation_count(0);
vector->set_profiler_ticks(0);
vector->set_deopt_count(0);
+ vector->set_closure_feedback_cell_array(*closure_feedback_cell_array);
+
// TODO(leszeks): Initialize based on the feedback metadata.
MemsetTagged(ObjectSlot(vector->slots_start()), *undefined_value(), length);
return vector;
}
Handle<EmbedderDataArray> Factory::NewEmbedderDataArray(
- int length, PretenureFlag pretenure) {
+ int length, AllocationType allocation) {
DCHECK_LE(0, length);
int size = EmbedderDataArray::SizeFor(length);
HeapObject result =
- AllocateRawWithImmortalMap(size, pretenure, *embedder_data_array_map());
+ AllocateRawWithImmortalMap(size, allocation, *embedder_data_array_map());
Handle<EmbedderDataArray> array(EmbedderDataArray::cast(result), isolate());
array->set_length(length);
@@ -481,8 +498,9 @@ Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription(
}
Handle<ObjectBoilerplateDescription> description =
- Handle<ObjectBoilerplateDescription>::cast(NewFixedArrayWithMap(
- RootIndex::kObjectBoilerplateDescriptionMap, size, TENURED));
+ Handle<ObjectBoilerplateDescription>::cast(
+ NewFixedArrayWithMap(RootIndex::kObjectBoilerplateDescriptionMap,
+ size, AllocationType::kOld));
if (has_different_size_backing_store) {
DCHECK_IMPLIES((boilerplate == (all_properties - index_keys)),
@@ -496,7 +514,7 @@ Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription(
}
Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int length,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
if (length == 0) return empty_fixed_array();
if (length < 0 || length > FixedDoubleArray::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
@@ -504,30 +522,31 @@ Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int length,
int size = FixedDoubleArray::SizeFor(length);
Map map = *fixed_double_array_map();
HeapObject result =
- AllocateRawWithImmortalMap(size, pretenure, map, kDoubleAligned);
+ AllocateRawWithImmortalMap(size, allocation, map, kDoubleAligned);
Handle<FixedDoubleArray> array(FixedDoubleArray::cast(result), isolate());
array->set_length(length);
return array;
}
Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(
- int length, PretenureFlag pretenure) {
+ int length, AllocationType allocation) {
DCHECK_LE(0, length);
- Handle<FixedArrayBase> array = NewFixedDoubleArray(length, pretenure);
+ Handle<FixedArrayBase> array = NewFixedDoubleArray(length, allocation);
if (length > 0) {
Handle<FixedDoubleArray>::cast(array)->FillWithHoles(0, length);
}
return array;
}
-Handle<FeedbackMetadata> Factory::NewFeedbackMetadata(int slot_count,
- PretenureFlag tenure) {
+Handle<FeedbackMetadata> Factory::NewFeedbackMetadata(
+ int slot_count, int feedback_cell_count, AllocationType allocation) {
DCHECK_LE(0, slot_count);
int size = FeedbackMetadata::SizeFor(slot_count);
HeapObject result =
- AllocateRawWithImmortalMap(size, tenure, *feedback_metadata_map());
+ AllocateRawWithImmortalMap(size, allocation, *feedback_metadata_map());
Handle<FeedbackMetadata> data(FeedbackMetadata::cast(result), isolate());
data->set_slot_count(slot_count);
+ data->set_closure_feedback_cell_count(feedback_cell_count);
// Initialize the data section to 0.
int data_size = size - FeedbackMetadata::kHeaderSize;
@@ -539,17 +558,17 @@ Handle<FeedbackMetadata> Factory::NewFeedbackMetadata(int slot_count,
}
Handle<FrameArray> Factory::NewFrameArray(int number_of_frames,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
DCHECK_LE(0, number_of_frames);
Handle<FixedArray> result = NewFixedArrayWithHoles(
- FrameArray::LengthFor(number_of_frames), pretenure);
+ FrameArray::LengthFor(number_of_frames), allocation);
result->set(FrameArray::kFrameCountIndex, Smi::kZero);
return Handle<FrameArray>::cast(result);
}
template <typename T>
Handle<T> Factory::AllocateSmallOrderedHashTable(Handle<Map> map, int capacity,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
// Capacity must be a power of two, since we depend on being able
// to divide and multiple by 2 (kLoadFactor) to derive capacity
// from number of buckets. If we decide to change kLoadFactor
@@ -563,29 +582,29 @@ Handle<T> Factory::AllocateSmallOrderedHashTable(Handle<Map> map, int capacity,
DCHECK_EQ(0, capacity % T::kLoadFactor);
int size = T::SizeFor(capacity);
- HeapObject result = AllocateRawWithImmortalMap(size, pretenure, *map);
+ HeapObject result = AllocateRawWithImmortalMap(size, allocation, *map);
Handle<T> table(T::cast(result), isolate());
table->Initialize(isolate(), capacity);
return table;
}
Handle<SmallOrderedHashSet> Factory::NewSmallOrderedHashSet(
- int capacity, PretenureFlag pretenure) {
+ int capacity, AllocationType allocation) {
return AllocateSmallOrderedHashTable<SmallOrderedHashSet>(
- small_ordered_hash_set_map(), capacity, pretenure);
+ small_ordered_hash_set_map(), capacity, allocation);
}
Handle<SmallOrderedHashMap> Factory::NewSmallOrderedHashMap(
- int capacity, PretenureFlag pretenure) {
+ int capacity, AllocationType allocation) {
return AllocateSmallOrderedHashTable<SmallOrderedHashMap>(
- small_ordered_hash_map_map(), capacity, pretenure);
+ small_ordered_hash_map_map(), capacity, allocation);
}
Handle<SmallOrderedNameDictionary> Factory::NewSmallOrderedNameDictionary(
- int capacity, PretenureFlag pretenure) {
+ int capacity, AllocationType allocation) {
Handle<SmallOrderedNameDictionary> dict =
AllocateSmallOrderedHashTable<SmallOrderedNameDictionary>(
- small_ordered_name_dictionary_map(), capacity, pretenure);
+ small_ordered_name_dictionary_map(), capacity, allocation);
dict->SetHash(PropertyArray::kNoHashSentinel);
return dict;
}
@@ -604,8 +623,8 @@ Handle<OrderedNameDictionary> Factory::NewOrderedNameDictionary() {
}
Handle<AccessorPair> Factory::NewAccessorPair() {
- Handle<AccessorPair> accessors =
- Handle<AccessorPair>::cast(NewStruct(ACCESSOR_PAIR_TYPE, TENURED));
+ Handle<AccessorPair> accessors = Handle<AccessorPair>::cast(
+ NewStruct(ACCESSOR_PAIR_TYPE, AllocationType::kOld));
accessors->set_getter(*null_value(), SKIP_WRITE_BARRIER);
accessors->set_setter(*null_value(), SKIP_WRITE_BARRIER);
return accessors;
@@ -639,14 +658,14 @@ Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
}
MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
- PretenureFlag pretenure) {
- DCHECK_NE(pretenure, TENURED_READ_ONLY);
+ AllocationType allocation) {
+ DCHECK_NE(allocation, AllocationType::kReadOnly);
int length = string.length();
if (length == 0) return empty_string();
if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
Handle<SeqOneByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- NewRawOneByteString(string.length(), pretenure),
+ NewRawOneByteString(string.length(), allocation),
String);
DisallowHeapAllocation no_gc;
@@ -657,8 +676,8 @@ MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
}
MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
- PretenureFlag pretenure) {
- DCHECK_NE(pretenure, TENURED_READ_ONLY);
+ AllocationType allocation) {
+ DCHECK_NE(allocation, AllocationType::kReadOnly);
// Check for ASCII first since this is the common case.
const char* ascii_data = string.start();
int length = string.length();
@@ -666,7 +685,8 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
if (non_ascii_start >= length) {
// If the string is ASCII, we do not need to convert the characters
// since UTF8 is backwards compatible with ASCII.
- return NewStringFromOneByte(Vector<const uint8_t>::cast(string), pretenure);
+ return NewStringFromOneByte(Vector<const uint8_t>::cast(string),
+ allocation);
}
std::unique_ptr<uint16_t[]> buffer(new uint16_t[length - non_ascii_start]);
@@ -707,7 +727,7 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- NewRawTwoByteString(non_ascii_start + utf16_length, pretenure), String);
+ NewRawTwoByteString(non_ascii_start + utf16_length, allocation), String);
DCHECK_LE(non_ascii_start + utf16_length, length);
@@ -721,7 +741,7 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
MaybeHandle<String> Factory::NewStringFromUtf8SubString(
Handle<SeqOneByteString> str, int begin, int length,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
Access<UnicodeCache::Utf8Decoder> decoder(
isolate()->unicode_cache()->utf8_decoder());
int non_ascii_start;
@@ -743,7 +763,7 @@ MaybeHandle<String> Factory::NewStringFromUtf8SubString(
if (non_ascii_start >= length) {
// If the string is ASCII, we can just make a substring.
- // TODO(v8): the pretenure flag is ignored in this case.
+ // TODO(v8): the allocation flag is ignored in this case.
return NewSubString(str, begin, begin + length);
}
@@ -753,7 +773,7 @@ MaybeHandle<String> Factory::NewStringFromUtf8SubString(
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- NewRawTwoByteString(non_ascii_start + utf16_length, pretenure), String);
+ NewRawTwoByteString(non_ascii_start + utf16_length, allocation), String);
// Update pointer references, since the original string may have moved after
// allocation.
@@ -776,21 +796,21 @@ MaybeHandle<String> Factory::NewStringFromUtf8SubString(
MaybeHandle<String> Factory::NewStringFromTwoByte(const uc16* string,
int length,
- PretenureFlag pretenure) {
- DCHECK_NE(pretenure, TENURED_READ_ONLY);
+ AllocationType allocation) {
+ DCHECK_NE(allocation, AllocationType::kReadOnly);
if (length == 0) return empty_string();
if (String::IsOneByte(string, length)) {
if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
Handle<SeqOneByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- NewRawOneByteString(length, pretenure), String);
+ NewRawOneByteString(length, allocation), String);
DisallowHeapAllocation no_gc;
CopyChars(result->GetChars(no_gc), string, length);
return result;
} else {
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- NewRawTwoByteString(length, pretenure), String);
+ NewRawTwoByteString(length, allocation), String);
DisallowHeapAllocation no_gc;
CopyChars(result->GetChars(no_gc), string, length);
return result;
@@ -798,14 +818,14 @@ MaybeHandle<String> Factory::NewStringFromTwoByte(const uc16* string,
}
MaybeHandle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
- PretenureFlag pretenure) {
- return NewStringFromTwoByte(string.start(), string.length(), pretenure);
+ AllocationType allocation) {
+ return NewStringFromTwoByte(string.start(), string.length(), allocation);
}
MaybeHandle<String> Factory::NewStringFromTwoByte(
- const ZoneVector<uc16>* string, PretenureFlag pretenure) {
+ const ZoneVector<uc16>* string, AllocationType allocation) {
return NewStringFromTwoByte(string->data(), static_cast<int>(string->size()),
- pretenure);
+ allocation);
}
namespace {
@@ -863,11 +883,12 @@ Handle<SeqOneByteString> Factory::AllocateRawOneByteInternalizedString(
Map map = *one_byte_internalized_string_map();
int size = SeqOneByteString::SizeFor(length);
- HeapObject result = AllocateRawWithImmortalMap(
- size,
- isolate()->heap()->CanAllocateInReadOnlySpace() ? TENURED_READ_ONLY
- : TENURED,
- map);
+ HeapObject result =
+ AllocateRawWithImmortalMap(size,
+ isolate()->heap()->CanAllocateInReadOnlySpace()
+ ? AllocationType::kReadOnly
+ : AllocationType::kOld,
+ map);
Handle<SeqOneByteString> answer(SeqOneByteString::cast(result), isolate());
answer->set_length(length);
answer->set_hash_field(hash_field);
@@ -882,7 +903,8 @@ Handle<String> Factory::AllocateTwoByteInternalizedString(
Map map = *internalized_string_map();
int size = SeqTwoByteString::SizeFor(str.length());
- HeapObject result = AllocateRawWithImmortalMap(size, TENURED, map);
+ HeapObject result =
+ AllocateRawWithImmortalMap(size, AllocationType::kOld, map);
Handle<SeqTwoByteString> answer(SeqTwoByteString::cast(result), isolate());
answer->set_length(str.length());
answer->set_hash_field(hash_field);
@@ -912,11 +934,12 @@ Handle<String> Factory::AllocateInternalizedStringImpl(T t, int chars,
size = SeqTwoByteString::SizeFor(chars);
}
- HeapObject result = AllocateRawWithImmortalMap(
- size,
- isolate()->heap()->CanAllocateInReadOnlySpace() ? TENURED_READ_ONLY
- : TENURED,
- map);
+ HeapObject result =
+ AllocateRawWithImmortalMap(size,
+ isolate()->heap()->CanAllocateInReadOnlySpace()
+ ? AllocationType::kReadOnly
+ : AllocationType::kOld,
+ map);
Handle<String> answer(String::cast(result), isolate());
answer->set_length(chars);
answer->set_hash_field(hash_field);
@@ -1016,8 +1039,8 @@ template <class StringClass>
Handle<StringClass> Factory::InternalizeExternalString(Handle<String> string) {
Handle<StringClass> cast_string = Handle<StringClass>::cast(string);
Handle<Map> map = GetInternalizedStringMap(this, string).ToHandleChecked();
- Handle<StringClass> external_string(StringClass::cast(New(map, TENURED)),
- isolate());
+ Handle<StringClass> external_string(
+ StringClass::cast(New(map, AllocationType::kOld)), isolate());
external_string->set_length(cast_string->length());
external_string->set_hash_field(cast_string->hash_field());
external_string->SetResource(isolate(), nullptr);
@@ -1031,7 +1054,7 @@ template Handle<ExternalTwoByteString>
Factory::InternalizeExternalString<ExternalTwoByteString>(Handle<String>);
MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString(
- int length, PretenureFlag pretenure) {
+ int length, AllocationType allocation) {
if (length > String::kMaxLength || length < 0) {
THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqOneByteString);
}
@@ -1040,7 +1063,7 @@ MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString(
DCHECK_GE(SeqOneByteString::kMaxSize, size);
HeapObject result =
- AllocateRawWithImmortalMap(size, pretenure, *one_byte_string_map());
+ AllocateRawWithImmortalMap(size, allocation, *one_byte_string_map());
Handle<SeqOneByteString> string(SeqOneByteString::cast(result), isolate());
string->set_length(length);
string->set_hash_field(String::kEmptyHashField);
@@ -1049,7 +1072,7 @@ MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString(
}
MaybeHandle<SeqTwoByteString> Factory::NewRawTwoByteString(
- int length, PretenureFlag pretenure) {
+ int length, AllocationType allocation) {
if (length > String::kMaxLength || length < 0) {
THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqTwoByteString);
}
@@ -1058,7 +1081,7 @@ MaybeHandle<SeqTwoByteString> Factory::NewRawTwoByteString(
DCHECK_GE(SeqTwoByteString::kMaxSize, size);
HeapObject result =
- AllocateRawWithImmortalMap(size, pretenure, *string_map());
+ AllocateRawWithImmortalMap(size, allocation, *string_map());
Handle<SeqTwoByteString> string(SeqTwoByteString::cast(result), isolate());
string->set_length(length);
string->set_hash_field(String::kEmptyHashField);
@@ -1216,8 +1239,9 @@ Handle<String> Factory::NewConsString(Handle<String> left, Handle<String> right,
DCHECK_LE(length, String::kMaxLength);
Handle<ConsString> result(
- ConsString::cast(one_byte ? New(cons_one_byte_string_map(), NOT_TENURED)
- : New(cons_string_map(), NOT_TENURED)),
+ ConsString::cast(
+ one_byte ? New(cons_one_byte_string_map(), AllocationType::kYoung)
+ : New(cons_string_map(), AllocationType::kYoung)),
isolate());
DisallowHeapAllocation no_gc;
@@ -1302,8 +1326,8 @@ Handle<String> Factory::NewProperSubString(Handle<String> str, int begin,
Handle<Map> map = str->IsOneByteRepresentation()
? sliced_one_byte_string_map()
: sliced_string_map();
- Handle<SlicedString> slice(SlicedString::cast(New(map, NOT_TENURED)),
- isolate());
+ Handle<SlicedString> slice(
+ SlicedString::cast(New(map, AllocationType::kYoung)), isolate());
slice->set_hash_field(String::kEmptyHashField);
slice->set_length(length);
@@ -1324,7 +1348,7 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
? external_one_byte_string_map()
: uncached_external_one_byte_string_map();
Handle<ExternalOneByteString> external_string(
- ExternalOneByteString::cast(New(map, TENURED)), isolate());
+ ExternalOneByteString::cast(New(map, AllocationType::kOld)), isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->SetResource(isolate(), resource);
@@ -1344,7 +1368,7 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
Handle<Map> map = resource->IsCacheable() ? external_string_map()
: uncached_external_string_map();
Handle<ExternalTwoByteString> external_string(
- ExternalTwoByteString::cast(New(map, TENURED)), isolate());
+ ExternalTwoByteString::cast(New(map, AllocationType::kOld)), isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->SetResource(isolate(), resource);
@@ -1360,7 +1384,7 @@ Handle<ExternalOneByteString> Factory::NewNativeSourceString(
Handle<Map> map = native_source_string_map();
Handle<ExternalOneByteString> external_string(
- ExternalOneByteString::cast(New(map, TENURED)), isolate());
+ ExternalOneByteString::cast(New(map, AllocationType::kOld)), isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->SetResource(isolate(), resource);
@@ -1381,13 +1405,13 @@ Handle<JSStringIterator> Factory::NewJSStringIterator(Handle<String> string) {
return iterator;
}
-Handle<Symbol> Factory::NewSymbol(PretenureFlag flag) {
- DCHECK(flag != NOT_TENURED);
+Handle<Symbol> Factory::NewSymbol(AllocationType allocation) {
+ DCHECK(allocation != AllocationType::kYoung);
// Statically ensure that it is safe to allocate symbols in paged spaces.
STATIC_ASSERT(Symbol::kSize <= kMaxRegularHeapObjectSize);
HeapObject result =
- AllocateRawWithImmortalMap(Symbol::kSize, flag, *symbol_map());
+ AllocateRawWithImmortalMap(Symbol::kSize, allocation, *symbol_map());
// Generate a random hash value.
int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
@@ -1401,9 +1425,9 @@ Handle<Symbol> Factory::NewSymbol(PretenureFlag flag) {
return symbol;
}
-Handle<Symbol> Factory::NewPrivateSymbol(PretenureFlag flag) {
- DCHECK(flag != NOT_TENURED);
- Handle<Symbol> symbol = NewSymbol(flag);
+Handle<Symbol> Factory::NewPrivateSymbol(AllocationType allocation) {
+ DCHECK(allocation != AllocationType::kYoung);
+ Handle<Symbol> symbol = NewSymbol(allocation);
symbol->set_is_private(true);
return symbol;
}
@@ -1417,7 +1441,7 @@ Handle<Symbol> Factory::NewPrivateNameSymbol(Handle<String> name) {
Handle<Context> Factory::NewContext(RootIndex map_root_index, int size,
int variadic_part_length,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
DCHECK(RootsTable::IsImmortalImmovable(map_root_index));
DCHECK_LE(Context::kTodoHeaderSize, size);
DCHECK(IsAligned(size, kTaggedSize));
@@ -1425,7 +1449,7 @@ Handle<Context> Factory::NewContext(RootIndex map_root_index, int size,
DCHECK_LE(Context::SizeFor(variadic_part_length), size);
Map map = Map::cast(isolate()->root(map_root_index));
- HeapObject result = AllocateRawWithImmortalMap(size, pretenure, map);
+ HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
Handle<Context> context(Context::cast(result), isolate());
context->set_length(variadic_part_length);
DCHECK_EQ(context->SizeFromMap(map), size);
@@ -1441,7 +1465,7 @@ Handle<Context> Factory::NewContext(RootIndex map_root_index, int size,
Handle<NativeContext> Factory::NewNativeContext() {
Handle<NativeContext> context = Handle<NativeContext>::cast(
NewContext(RootIndex::kNativeContextMap, NativeContext::kSize,
- NativeContext::NATIVE_CONTEXT_SLOTS, TENURED));
+ NativeContext::NATIVE_CONTEXT_SLOTS, AllocationType::kOld));
context->set_scope_info(ReadOnlyRoots(isolate()).empty_scope_info());
context->set_previous(Context::unchecked_cast(Smi::zero()));
context->set_extension(*the_hole_value());
@@ -1457,9 +1481,9 @@ Handle<Context> Factory::NewScriptContext(Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), SCRIPT_SCOPE);
int variadic_part_length = scope_info->ContextLength();
- Handle<Context> context = NewContext(RootIndex::kScriptContextMap,
- Context::SizeFor(variadic_part_length),
- variadic_part_length, TENURED);
+ Handle<Context> context = NewContext(
+ RootIndex::kScriptContextMap, Context::SizeFor(variadic_part_length),
+ variadic_part_length, AllocationType::kOld);
context->set_scope_info(*scope_info);
context->set_previous(*outer);
context->set_extension(*the_hole_value());
@@ -1481,9 +1505,9 @@ Handle<Context> Factory::NewModuleContext(Handle<Module> module,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
int variadic_part_length = scope_info->ContextLength();
- Handle<Context> context = NewContext(RootIndex::kModuleContextMap,
- Context::SizeFor(variadic_part_length),
- variadic_part_length, TENURED);
+ Handle<Context> context = NewContext(
+ RootIndex::kModuleContextMap, Context::SizeFor(variadic_part_length),
+ variadic_part_length, AllocationType::kOld);
context->set_scope_info(*scope_info);
context->set_previous(*outer);
context->set_extension(*module);
@@ -1508,7 +1532,7 @@ Handle<Context> Factory::NewFunctionContext(Handle<Context> outer,
int variadic_part_length = scope_info->ContextLength();
Handle<Context> context =
NewContext(mapRootIndex, Context::SizeFor(variadic_part_length),
- variadic_part_length, NOT_TENURED);
+ variadic_part_length, AllocationType::kYoung);
context->set_scope_info(*scope_info);
context->set_previous(*outer);
context->set_extension(*the_hole_value());
@@ -1523,9 +1547,9 @@ Handle<Context> Factory::NewCatchContext(Handle<Context> previous,
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
// TODO(ishell): Take the details from CatchContext class.
int variadic_part_length = Context::MIN_CONTEXT_SLOTS + 1;
- Handle<Context> context = NewContext(RootIndex::kCatchContextMap,
- Context::SizeFor(variadic_part_length),
- variadic_part_length, NOT_TENURED);
+ Handle<Context> context = NewContext(
+ RootIndex::kCatchContextMap, Context::SizeFor(variadic_part_length),
+ variadic_part_length, AllocationType::kYoung);
context->set_scope_info(*scope_info);
context->set_previous(*previous);
context->set_extension(*the_hole_value());
@@ -1548,7 +1572,7 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
int variadic_part_length = Context::MIN_CONTEXT_SLOTS + 2;
Handle<Context> c = NewContext(RootIndex::kDebugEvaluateContextMap,
Context::SizeFor(variadic_part_length),
- variadic_part_length, NOT_TENURED);
+ variadic_part_length, AllocationType::kYoung);
c->set_scope_info(*scope_info);
c->set_previous(*previous);
c->set_native_context(previous->native_context());
@@ -1564,9 +1588,9 @@ Handle<Context> Factory::NewWithContext(Handle<Context> previous,
DCHECK_EQ(scope_info->scope_type(), WITH_SCOPE);
// TODO(ishell): Take the details from WithContext class.
int variadic_part_length = Context::MIN_CONTEXT_SLOTS;
- Handle<Context> context = NewContext(RootIndex::kWithContextMap,
- Context::SizeFor(variadic_part_length),
- variadic_part_length, NOT_TENURED);
+ Handle<Context> context = NewContext(
+ RootIndex::kWithContextMap, Context::SizeFor(variadic_part_length),
+ variadic_part_length, AllocationType::kYoung);
context->set_scope_info(*scope_info);
context->set_previous(*previous);
context->set_extension(*extension);
@@ -1576,11 +1600,12 @@ Handle<Context> Factory::NewWithContext(Handle<Context> previous,
Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
Handle<ScopeInfo> scope_info) {
- DCHECK_EQ(scope_info->scope_type(), BLOCK_SCOPE);
+ DCHECK_IMPLIES(scope_info->scope_type() != BLOCK_SCOPE,
+ scope_info->scope_type() == CLASS_SCOPE);
int variadic_part_length = scope_info->ContextLength();
- Handle<Context> context = NewContext(RootIndex::kBlockContextMap,
- Context::SizeFor(variadic_part_length),
- variadic_part_length, NOT_TENURED);
+ Handle<Context> context = NewContext(
+ RootIndex::kBlockContextMap, Context::SizeFor(variadic_part_length),
+ variadic_part_length, AllocationType::kYoung);
context->set_scope_info(*scope_info);
context->set_previous(*previous);
context->set_extension(*the_hole_value());
@@ -1591,9 +1616,9 @@ Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
int variadic_part_length) {
DCHECK_LE(Context::MIN_CONTEXT_SLOTS, variadic_part_length);
- Handle<Context> context = NewContext(RootIndex::kFunctionContextMap,
- Context::SizeFor(variadic_part_length),
- variadic_part_length, NOT_TENURED);
+ Handle<Context> context = NewContext(
+ RootIndex::kFunctionContextMap, Context::SizeFor(variadic_part_length),
+ variadic_part_length, AllocationType::kYoung);
context->set_scope_info(ReadOnlyRoots(isolate()).empty_scope_info());
context->set_previous(*native_context);
context->set_extension(*the_hole_value());
@@ -1601,7 +1626,8 @@ Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
return context;
}
-Handle<Struct> Factory::NewStruct(InstanceType type, PretenureFlag pretenure) {
+Handle<Struct> Factory::NewStruct(InstanceType type,
+ AllocationType allocation) {
Map map;
switch (type) {
#define MAKE_CASE(TYPE, Name, name) \
@@ -1614,7 +1640,7 @@ Handle<Struct> Factory::NewStruct(InstanceType type, PretenureFlag pretenure) {
UNREACHABLE();
}
int size = map->instance_size();
- HeapObject result = AllocateRawWithImmortalMap(size, pretenure, map);
+ HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
Handle<Struct> str(Struct::cast(result), isolate());
str->InitializeBody(size);
return str;
@@ -1623,14 +1649,14 @@ Handle<Struct> Factory::NewStruct(InstanceType type, PretenureFlag pretenure) {
Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
int aliased_context_slot) {
Handle<AliasedArgumentsEntry> entry = Handle<AliasedArgumentsEntry>::cast(
- NewStruct(ALIASED_ARGUMENTS_ENTRY_TYPE, NOT_TENURED));
+ NewStruct(ALIASED_ARGUMENTS_ENTRY_TYPE, AllocationType::kYoung));
entry->set_aliased_context_slot(aliased_context_slot);
return entry;
}
Handle<AccessorInfo> Factory::NewAccessorInfo() {
- Handle<AccessorInfo> info =
- Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE, TENURED));
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(
+ NewStruct(ACCESSOR_INFO_TYPE, AllocationType::kOld));
info->set_name(*empty_string());
info->set_flags(0); // Must clear the flags, it was initialized as undefined.
info->set_is_sloppy(true);
@@ -1638,17 +1664,20 @@ Handle<AccessorInfo> Factory::NewAccessorInfo() {
return info;
}
-Handle<Script> Factory::NewScript(Handle<String> source, PretenureFlag tenure) {
- return NewScriptWithId(source, isolate()->heap()->NextScriptId(), tenure);
+Handle<Script> Factory::NewScript(Handle<String> source,
+ AllocationType allocation) {
+ return NewScriptWithId(source, isolate()->heap()->NextScriptId(), allocation);
}
Handle<Script> Factory::NewScriptWithId(Handle<String> source, int script_id,
- PretenureFlag tenure) {
- DCHECK(tenure == TENURED || tenure == TENURED_READ_ONLY);
+ AllocationType allocation) {
+ DCHECK(allocation == AllocationType::kOld ||
+ allocation == AllocationType::kReadOnly);
// Create and initialize script object.
Heap* heap = isolate()->heap();
ReadOnlyRoots roots(heap);
- Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE, tenure));
+ Handle<Script> script =
+ Handle<Script>::cast(NewStruct(SCRIPT_TYPE, allocation));
script->set_source(*source);
script->set_name(roots.undefined_value());
script->set_id(script_id);
@@ -1668,6 +1697,9 @@ Handle<Script> Factory::NewScriptWithId(Handle<String> source, int script_id,
MaybeObjectHandle::Weak(script));
heap->set_script_list(*scripts);
LOG(isolate(), ScriptEvent(Logger::ScriptEventType::kCreate, script_id));
+ TRACE_EVENT_OBJECT_CREATED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("v8.compile"), "Script",
+ TRACE_ID_WITH_SCOPE(Script::kTraceScope, script_id));
return script;
}
@@ -1675,7 +1707,7 @@ Handle<Script> Factory::CloneScript(Handle<Script> script) {
Heap* heap = isolate()->heap();
int script_id = isolate()->heap()->NextScriptId();
Handle<Script> new_script =
- Handle<Script>::cast(NewStruct(SCRIPT_TYPE, TENURED));
+ Handle<Script>::cast(NewStruct(SCRIPT_TYPE, AllocationType::kOld));
new_script->set_source(script->source());
new_script->set_name(script->name());
new_script->set_id(script_id);
@@ -1742,25 +1774,25 @@ Factory::NewFinalizationGroupCleanupJobTask(
return microtask;
}
-Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
+Handle<Foreign> Factory::NewForeign(Address addr, AllocationType allocation) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
Map map = *foreign_map();
HeapObject result =
- AllocateRawWithImmortalMap(map->instance_size(), pretenure, map);
+ AllocateRawWithImmortalMap(map->instance_size(), allocation, map);
Handle<Foreign> foreign(Foreign::cast(result), isolate());
foreign->set_foreign_address(addr);
return foreign;
}
-Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
+Handle<ByteArray> Factory::NewByteArray(int length, AllocationType allocation) {
DCHECK_LE(0, length);
if (length > ByteArray::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
int size = ByteArray::SizeFor(length);
HeapObject result =
- AllocateRawWithImmortalMap(size, pretenure, *byte_array_map());
+ AllocateRawWithImmortalMap(size, allocation, *byte_array_map());
Handle<ByteArray> array(ByteArray::cast(result), isolate());
array->set_length(length);
array->clear_padding();
@@ -1774,19 +1806,19 @@ Handle<BytecodeArray> Factory::NewBytecodeArray(
if (length > BytecodeArray::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
}
- // Bytecode array is pretenured, so constant pool array should be too.
+ // Bytecode array is AllocationType::kOld, so constant pool array should be
+ // too.
DCHECK(!Heap::InYoungGeneration(*constant_pool));
int size = BytecodeArray::SizeFor(length);
- HeapObject result =
- AllocateRawWithImmortalMap(size, TENURED, *bytecode_array_map());
+ HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kOld,
+ *bytecode_array_map());
Handle<BytecodeArray> instance(BytecodeArray::cast(result), isolate());
instance->set_length(length);
instance->set_frame_size(frame_size);
instance->set_parameter_count(parameter_count);
instance->set_incoming_new_target_or_generator_register(
interpreter::Register::invalid_value());
- instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
instance->set_osr_loop_nesting_level(0);
instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance->set_constant_pool(*constant_pool);
@@ -1801,12 +1833,12 @@ Handle<BytecodeArray> Factory::NewBytecodeArray(
Handle<FixedTypedArrayBase> Factory::NewFixedTypedArrayWithExternalPointer(
int length, ExternalArrayType array_type, void* external_pointer,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
// TODO(7881): Smi length check
DCHECK(0 <= length && length <= Smi::kMaxValue);
int size = FixedTypedArrayBase::kHeaderSize;
HeapObject result = AllocateRawWithImmortalMap(
- size, pretenure,
+ size, allocation,
ReadOnlyRoots(isolate()).MapForFixedTypedArray(array_type));
Handle<FixedTypedArrayBase> elements(FixedTypedArrayBase::cast(result),
isolate());
@@ -1818,7 +1850,7 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArrayWithExternalPointer(
Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
size_t length, size_t byte_length, ExternalArrayType array_type,
- bool initialize, PretenureFlag pretenure) {
+ bool initialize, AllocationType allocation) {
// TODO(7881): Smi length check
DCHECK(0 <= length && length <= Smi::kMaxValue);
CHECK(byte_length <= kMaxInt - FixedTypedArrayBase::kDataOffset);
@@ -1828,7 +1860,7 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
AllocationAlignment alignment =
array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned;
HeapObject object = AllocateRawWithImmortalMap(static_cast<int>(size),
- pretenure, map, alignment);
+ allocation, map, alignment);
Handle<FixedTypedArrayBase> elements(FixedTypedArrayBase::cast(object),
isolate());
@@ -1843,8 +1875,8 @@ Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
Handle<Cell> Factory::NewCell(Handle<Object> value) {
AllowDeferredHandleDereference convert_to_cell;
STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
- HeapObject result =
- AllocateRawWithImmortalMap(Cell::kSize, TENURED, *cell_map());
+ HeapObject result = AllocateRawWithImmortalMap(
+ Cell::kSize, AllocationType::kOld, *cell_map());
Handle<Cell> cell(Cell::cast(result), isolate());
cell->set_value(*value);
return cell;
@@ -1852,48 +1884,43 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) {
Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
- HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
- *no_closures_cell_map());
+ HeapObject result = AllocateRawWithImmortalMap(
+ FeedbackCell::kSize, AllocationType::kOld, *no_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
+ cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
+ cell->clear_padding();
return cell;
}
Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
- HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
- *one_closure_cell_map());
+ HeapObject result = AllocateRawWithImmortalMap(
+ FeedbackCell::kSize, AllocationType::kOld, *one_closure_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
+ cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
+ cell->clear_padding();
return cell;
}
Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
- HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
- *many_closures_cell_map());
+ HeapObject result = AllocateRawWithImmortalMap(
+ FeedbackCell::kSize, AllocationType::kOld, *many_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
- return cell;
-}
-
-Handle<FeedbackCell> Factory::NewNoFeedbackCell() {
- AllowDeferredHandleDereference convert_to_cell;
- HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
- *no_feedback_cell_map());
- Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
- // Set the value to undefined. We wouldn't allocate feedback vectors with
- // NoFeedbackCell map type.
- cell->set_value(*undefined_value());
+ cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
+ cell->clear_padding();
return cell;
}
Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
DCHECK(name->IsUniqueName());
STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
- HeapObject result = AllocateRawWithImmortalMap(PropertyCell::kSize, pretenure,
- *global_property_cell_map());
+ HeapObject result = AllocateRawWithImmortalMap(
+ PropertyCell::kSize, allocation, *global_property_cell_map());
Handle<PropertyCell> cell(PropertyCell::cast(result), isolate());
cell->set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
SKIP_WRITE_BARRIER);
@@ -1905,13 +1932,14 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name,
Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
int slack,
- AllocationType type) {
- DCHECK(Heap::IsRegularObjectAllocation(type));
+ AllocationType allocation) {
+ DCHECK(Heap::IsRegularObjectAllocation(allocation));
int number_of_all_descriptors = number_of_descriptors + slack;
// Zero-length case must be handled outside.
DCHECK_LT(0, number_of_all_descriptors);
int size = DescriptorArray::SizeFor(number_of_all_descriptors);
- HeapObject obj = isolate()->heap()->AllocateRawWithRetryOrFail(size, type);
+ HeapObject obj =
+ isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
obj->set_map_after_allocation(*descriptor_array_map(), SKIP_WRITE_BARRIER);
DescriptorArray array = DescriptorArray::cast(obj);
array->Initialize(*empty_enum_cache(), *undefined_value(),
@@ -1923,9 +1951,10 @@ Handle<TransitionArray> Factory::NewTransitionArray(int number_of_transitions,
int slack) {
int capacity = TransitionArray::LengthFor(number_of_transitions + slack);
Handle<TransitionArray> array = NewWeakFixedArrayWithMap<TransitionArray>(
- RootIndex::kTransitionArrayMap, capacity, TENURED);
- // Transition arrays are tenured. When black allocation is on we have to
- // add the transition array to the list of encountered_transition_arrays.
+ RootIndex::kTransitionArrayMap, capacity, AllocationType::kOld);
+ // Transition arrays are AllocationType::kOld. When black allocation is on we
+ // have to add the transition array to the list of
+ // encountered_transition_arrays.
Heap* heap = isolate()->heap();
if (heap->incremental_marking()->black_allocation()) {
heap->mark_compact_collector()->AddTransitionArray(*array);
@@ -1941,8 +1970,8 @@ Handle<TransitionArray> Factory::NewTransitionArray(int number_of_transitions,
Handle<AllocationSite> Factory::NewAllocationSite(bool with_weak_next) {
Handle<Map> map = with_weak_next ? allocation_site_map()
: allocation_site_without_weaknext_map();
- Handle<AllocationSite> site(AllocationSite::cast(New(map, TENURED)),
- isolate());
+ Handle<AllocationSite> site(
+ AllocationSite::cast(New(map, AllocationType::kOld)), isolate());
site->Initialize();
if (with_weak_next) {
@@ -1977,7 +2006,7 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
map->set_constructor_or_backpointer(*null_value(), SKIP_WRITE_BARRIER);
map->set_instance_size(instance_size);
if (map->IsJSObjectMap()) {
- DCHECK(!isolate()->heap()->InReadOnlySpace(map));
+ DCHECK(!ReadOnlyHeap::Contains(map));
map->SetInObjectPropertiesStartInWords(instance_size / kTaggedSize -
inobject_properties);
DCHECK_EQ(map->GetInObjectProperties(), inobject_properties);
@@ -2104,7 +2133,7 @@ void initialize_length<PropertyArray>(Handle<PropertyArray> array, int length) {
template <typename T>
Handle<T> Factory::CopyArrayWithMap(Handle<T> src, Handle<Map> map) {
int len = src->length();
- HeapObject obj = AllocateRawFixedArray(len, NOT_TENURED);
+ HeapObject obj = AllocateRawFixedArray(len, AllocationType::kYoung);
obj->set_map_after_allocation(*map, SKIP_WRITE_BARRIER);
Handle<T> result(T::cast(obj), isolate());
@@ -2125,12 +2154,12 @@ Handle<T> Factory::CopyArrayWithMap(Handle<T> src, Handle<Map> map) {
template <typename T>
Handle<T> Factory::CopyArrayAndGrow(Handle<T> src, int grow_by,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
DCHECK_LT(0, grow_by);
DCHECK_LE(grow_by, kMaxInt - src->length());
int old_len = src->length();
int new_len = old_len + grow_by;
- HeapObject obj = AllocateRawFixedArray(new_len, pretenure);
+ HeapObject obj = AllocateRawFixedArray(new_len, allocation);
obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
Handle<T> result(T::cast(obj), isolate());
@@ -2151,18 +2180,18 @@ Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array,
Handle<FixedArray> Factory::CopyFixedArrayAndGrow(Handle<FixedArray> array,
int grow_by,
- PretenureFlag pretenure) {
- return CopyArrayAndGrow(array, grow_by, pretenure);
+ AllocationType allocation) {
+ return CopyArrayAndGrow(array, grow_by, allocation);
}
Handle<WeakFixedArray> Factory::CopyWeakFixedArrayAndGrow(
- Handle<WeakFixedArray> src, int grow_by, PretenureFlag pretenure) {
+ Handle<WeakFixedArray> src, int grow_by, AllocationType allocation) {
DCHECK(
!src->IsTransitionArray()); // Compacted by GC, this code doesn't work.
int old_len = src->length();
int new_len = old_len + grow_by;
DCHECK_GE(new_len, old_len);
- HeapObject obj = AllocateRawFixedArray(new_len, pretenure);
+ HeapObject obj = AllocateRawFixedArray(new_len, allocation);
DCHECK_EQ(old_len, src->length());
obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
@@ -2179,11 +2208,11 @@ Handle<WeakFixedArray> Factory::CopyWeakFixedArrayAndGrow(
}
Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow(
- Handle<WeakArrayList> src, int grow_by, PretenureFlag pretenure) {
+ Handle<WeakArrayList> src, int grow_by, AllocationType allocation) {
int old_capacity = src->capacity();
int new_capacity = old_capacity + grow_by;
DCHECK_GE(new_capacity, old_capacity);
- HeapObject obj = AllocateRawWeakArrayList(new_capacity, pretenure);
+ HeapObject obj = AllocateRawWeakArrayList(new_capacity, allocation);
obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
WeakArrayList result = WeakArrayList::cast(obj);
@@ -2200,18 +2229,18 @@ Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow(
}
Handle<PropertyArray> Factory::CopyPropertyArrayAndGrow(
- Handle<PropertyArray> array, int grow_by, PretenureFlag pretenure) {
- return CopyArrayAndGrow(array, grow_by, pretenure);
+ Handle<PropertyArray> array, int grow_by, AllocationType allocation) {
+ return CopyArrayAndGrow(array, grow_by, allocation);
}
Handle<FixedArray> Factory::CopyFixedArrayUpTo(Handle<FixedArray> array,
int new_len,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
DCHECK_LE(0, new_len);
DCHECK_LE(new_len, array->length());
if (new_len == 0) return empty_fixed_array();
- HeapObject obj = AllocateRawFixedArray(new_len, pretenure);
+ HeapObject obj = AllocateRawFixedArray(new_len, allocation);
obj->set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
Handle<FixedArray> result(FixedArray::cast(obj), isolate());
result->set_length(new_len);
@@ -2232,7 +2261,7 @@ Handle<FixedArray> Factory::CopyAndTenureFixedCOWArray(
Handle<FixedArray> array) {
DCHECK(Heap::InYoungGeneration(*array));
Handle<FixedArray> result =
- CopyFixedArrayUpTo(array, array->length(), TENURED);
+ CopyFixedArrayUpTo(array, array->length(), AllocationType::kOld);
// TODO(mvstanton): The map is set twice because of protection against calling
// set() on a COW FixedArray. Issue v8:3221 created to track this, and
@@ -2245,8 +2274,8 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<FixedDoubleArray> array) {
int len = array->length();
if (len == 0) return array;
- Handle<FixedDoubleArray> result =
- Handle<FixedDoubleArray>::cast(NewFixedDoubleArray(len, NOT_TENURED));
+ Handle<FixedDoubleArray> result = Handle<FixedDoubleArray>::cast(
+ NewFixedDoubleArray(len, AllocationType::kYoung));
Heap::CopyBlock(
result->address() + FixedDoubleArray::kLengthOffset,
array->address() + FixedDoubleArray::kLengthOffset,
@@ -2257,8 +2286,9 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<FeedbackVector> Factory::CopyFeedbackVector(
Handle<FeedbackVector> array) {
int len = array->length();
- HeapObject obj = AllocateRawWithImmortalMap(
- FeedbackVector::SizeFor(len), NOT_TENURED, *feedback_vector_map());
+ HeapObject obj = AllocateRawWithImmortalMap(FeedbackVector::SizeFor(len),
+ AllocationType::kYoung,
+ *feedback_vector_map());
Handle<FeedbackVector> result(FeedbackVector::cast(obj), isolate());
DisallowHeapAllocation no_gc;
@@ -2281,55 +2311,55 @@ Handle<FeedbackVector> Factory::CopyFeedbackVector(
return result;
}
-Handle<Object> Factory::NewNumber(double value, PretenureFlag pretenure) {
+Handle<Object> Factory::NewNumber(double value, AllocationType allocation) {
// Materialize as a SMI if possible.
int32_t int_value;
if (DoubleToSmiInteger(value, &int_value)) {
return handle(Smi::FromInt(int_value), isolate());
}
- return NewHeapNumber(value, pretenure);
+ return NewHeapNumber(value, allocation);
}
Handle<Object> Factory::NewNumberFromInt(int32_t value,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
if (Smi::IsValid(value)) return handle(Smi::FromInt(value), isolate());
// Bypass NewNumber to avoid various redundant checks.
- return NewHeapNumber(FastI2D(value), pretenure);
+ return NewHeapNumber(FastI2D(value), allocation);
}
Handle<Object> Factory::NewNumberFromUint(uint32_t value,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
int32_t int32v = static_cast<int32_t>(value);
if (int32v >= 0 && Smi::IsValid(int32v)) {
return handle(Smi::FromInt(int32v), isolate());
}
- return NewHeapNumber(FastUI2D(value), pretenure);
+ return NewHeapNumber(FastUI2D(value), allocation);
}
-Handle<HeapNumber> Factory::NewHeapNumber(PretenureFlag pretenure) {
+Handle<HeapNumber> Factory::NewHeapNumber(AllocationType allocation) {
STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
Map map = *heap_number_map();
- HeapObject result = AllocateRawWithImmortalMap(HeapNumber::kSize, pretenure,
+ HeapObject result = AllocateRawWithImmortalMap(HeapNumber::kSize, allocation,
map, kDoubleUnaligned);
return handle(HeapNumber::cast(result), isolate());
}
Handle<MutableHeapNumber> Factory::NewMutableHeapNumber(
- PretenureFlag pretenure) {
+ AllocationType allocation) {
STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
Map map = *mutable_heap_number_map();
HeapObject result = AllocateRawWithImmortalMap(
- MutableHeapNumber::kSize, pretenure, map, kDoubleUnaligned);
+ MutableHeapNumber::kSize, allocation, map, kDoubleUnaligned);
return handle(MutableHeapNumber::cast(result), isolate());
}
Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
if (length < 0 || length > BigInt::kMaxLength) {
isolate()->heap()->FatalProcessOutOfMemory("invalid BigInt length");
}
HeapObject result = AllocateRawWithImmortalMap(BigInt::SizeFor(length),
- pretenure, *bigint_map());
+ allocation, *bigint_map());
FreshlyAllocatedBigInt bigint = FreshlyAllocatedBigInt::cast(result);
bigint->clear_padding();
return handle(bigint, isolate());
@@ -2414,8 +2444,9 @@ DEFINE_ERROR(WasmRuntimeError, wasm_runtime_error)
Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
Handle<SharedFunctionInfo> info,
Handle<Context> context,
- PretenureFlag pretenure) {
- Handle<JSFunction> function(JSFunction::cast(New(map, pretenure)), isolate());
+ AllocationType allocation) {
+ Handle<JSFunction> function(JSFunction::cast(New(map, allocation)),
+ isolate());
function->initialize_properties();
function->initialize_elements();
@@ -2512,7 +2543,7 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
// TODO(littledan): Why do we have this is_generator test when
// NewFunctionPrototype already handles finding an appropriately
// shared prototype?
- Handle<Object> prototype = args.maybe_prototype_.ToHandleChecked();
+ Handle<HeapObject> prototype = args.maybe_prototype_.ToHandleChecked();
if (!IsResumableFunction(result->shared()->kind())) {
if (prototype->IsTheHole(isolate())) {
prototype = NewFunctionPrototype(result);
@@ -2562,41 +2593,42 @@ Handle<WeakCell> Factory::NewWeakCell() {
// Allocate the WeakCell object in the old space, because 1) WeakCell weakness
// handling is only implemented in the old space 2) they're supposedly
// long-living. TODO(marja, gsathya): Support WeakCells in Scavenger.
- Handle<WeakCell> result(WeakCell::cast(AllocateRawWithImmortalMap(
- WeakCell::kSize, TENURED, *weak_cell_map())),
- isolate());
+ Handle<WeakCell> result(
+ WeakCell::cast(AllocateRawWithImmortalMap(
+ WeakCell::kSize, AllocationType::kOld, *weak_cell_map())),
+ isolate());
return result;
}
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info, Handle<Context> context,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
Handle<Map> initial_map(
Map::cast(context->native_context()->get(info->function_map_index())),
isolate());
return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
- pretenure);
+ allocation);
}
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info, Handle<Context> context,
- Handle<FeedbackCell> feedback_cell, PretenureFlag pretenure) {
+ Handle<FeedbackCell> feedback_cell, AllocationType allocation) {
Handle<Map> initial_map(
Map::cast(context->native_context()->get(info->function_map_index())),
isolate());
return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
- feedback_cell, pretenure);
+ feedback_cell, allocation);
}
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
- Handle<Context> context, PretenureFlag pretenure) {
+ Handle<Context> context, AllocationType allocation) {
DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
Handle<JSFunction> result =
- NewFunction(initial_map, info, context, pretenure);
+ NewFunction(initial_map, info, context, allocation);
// Give compiler a chance to pre-initialize.
- Compiler::PostInstantiation(result, pretenure);
+ Compiler::PostInstantiation(result, allocation);
return result;
}
@@ -2604,10 +2636,10 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
Handle<Context> context, Handle<FeedbackCell> feedback_cell,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
Handle<JSFunction> result =
- NewFunction(initial_map, info, context, pretenure);
+ NewFunction(initial_map, info, context, allocation);
// Bump the closure count that is encoded in the feedback cell's map.
if (feedback_cell->map() == *no_closures_cell_map()) {
@@ -2615,8 +2647,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
} else if (feedback_cell->map() == *one_closure_cell_map()) {
feedback_cell->set_map(*many_closures_cell_map());
} else {
- DCHECK(feedback_cell->map() == *no_feedback_cell_map() ||
- feedback_cell->map() == *many_closures_cell_map());
+ DCHECK(feedback_cell->map() == *many_closures_cell_map());
}
// Check that the optimized code in the feedback cell wasn't marked for
@@ -2629,27 +2660,28 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
result->set_raw_feedback_cell(*feedback_cell);
// Give compiler a chance to pre-initialize.
- Compiler::PostInstantiation(result, pretenure);
+ Compiler::PostInstantiation(result, allocation);
return result;
}
Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
return NewFixedArrayWithMap<ScopeInfo>(RootIndex::kScopeInfoMap, length,
- TENURED);
+ AllocationType::kOld);
}
Handle<ModuleInfo> Factory::NewModuleInfo() {
- return NewFixedArrayWithMap<ModuleInfo>(RootIndex::kModuleInfoMap,
- ModuleInfo::kLength, TENURED);
+ return NewFixedArrayWithMap<ModuleInfo>(
+ RootIndex::kModuleInfoMap, ModuleInfo::kLength, AllocationType::kOld);
}
Handle<PreparseData> Factory::NewPreparseData(int data_length,
int children_length) {
int size = PreparseData::SizeFor(data_length, children_length);
- Handle<PreparseData> result(PreparseData::cast(AllocateRawWithImmortalMap(
- size, TENURED, *preparse_data_map())),
- isolate());
+ Handle<PreparseData> result(
+ PreparseData::cast(AllocateRawWithImmortalMap(size, AllocationType::kOld,
+ *preparse_data_map())),
+ isolate());
result->set_data_length(data_length);
result->set_children_length(children_length);
MemsetTagged(result->inner_data_start(), *null_value(), children_length);
@@ -2663,8 +2695,8 @@ Factory::NewUncompiledDataWithoutPreparseData(Handle<String> inferred_name,
int32_t end_position,
int32_t function_literal_id) {
Handle<UncompiledDataWithoutPreparseData> result(
- UncompiledDataWithoutPreparseData::cast(
- New(uncompiled_data_without_preparse_data_map(), TENURED)),
+ UncompiledDataWithoutPreparseData::cast(New(
+ uncompiled_data_without_preparse_data_map(), AllocationType::kOld)),
isolate());
UncompiledData::Initialize(*result, *inferred_name, start_position,
@@ -2680,7 +2712,7 @@ Factory::NewUncompiledDataWithPreparseData(Handle<String> inferred_name,
Handle<PreparseData> preparse_data) {
Handle<UncompiledDataWithPreparseData> result(
UncompiledDataWithPreparseData::cast(
- New(uncompiled_data_with_preparse_data_map(), TENURED)),
+ New(uncompiled_data_with_preparse_data_map(), AllocationType::kOld)),
isolate());
UncompiledDataWithPreparseData::Initialize(
@@ -2699,7 +2731,8 @@ Handle<JSObject> Factory::NewExternal(void* value) {
Handle<CodeDataContainer> Factory::NewCodeDataContainer(int flags) {
Handle<CodeDataContainer> data_container(
- CodeDataContainer::cast(New(code_data_container_map(), TENURED)),
+ CodeDataContainer::cast(
+ New(code_data_container_map(), AllocationType::kOld)),
isolate());
data_container->set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
data_container->set_kind_specific_flags(flags);
@@ -2713,9 +2746,10 @@ MaybeHandle<Code> Factory::TryNewCode(
MaybeHandle<DeoptimizationData> maybe_deopt_data, Movability movability,
bool is_turbofanned, int stack_slots) {
// Allocate objects needed for code initialization.
- Handle<ByteArray> reloc_info = NewByteArray(
- desc.reloc_size,
- Builtins::IsBuiltinId(builtin_index) ? TENURED_READ_ONLY : TENURED);
+ Handle<ByteArray> reloc_info =
+ NewByteArray(desc.reloc_size, Builtins::IsBuiltinId(builtin_index)
+ ? AllocationType::kReadOnly
+ : AllocationType::kOld);
Handle<CodeDataContainer> data_container = NewCodeDataContainer(0);
Handle<ByteArray> source_position_table =
maybe_source_position_table.is_null()
@@ -2768,9 +2802,10 @@ Handle<Code> Factory::NewCode(
MaybeHandle<DeoptimizationData> maybe_deopt_data, Movability movability,
bool is_turbofanned, int stack_slots) {
// Allocate objects needed for code initialization.
- Handle<ByteArray> reloc_info = NewByteArray(
- desc.reloc_size,
- Builtins::IsBuiltinId(builtin_index) ? TENURED_READ_ONLY : TENURED);
+ Handle<ByteArray> reloc_info =
+ NewByteArray(desc.reloc_size, Builtins::IsBuiltinId(builtin_index)
+ ? AllocationType::kReadOnly
+ : AllocationType::kOld);
Handle<CodeDataContainer> data_container = NewCodeDataContainer(0);
Handle<ByteArray> source_position_table =
maybe_source_position_table.is_null()
@@ -2905,8 +2940,8 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
Handle<BytecodeArray> Factory::CopyBytecodeArray(
Handle<BytecodeArray> bytecode_array) {
int size = BytecodeArray::SizeFor(bytecode_array->length());
- HeapObject result =
- AllocateRawWithImmortalMap(size, TENURED, *bytecode_array_map());
+ HeapObject result = AllocateRawWithImmortalMap(size, AllocationType::kOld,
+ *bytecode_array_map());
Handle<BytecodeArray> copy(BytecodeArray::cast(result), isolate());
copy->set_length(bytecode_array->length());
@@ -2917,7 +2952,6 @@ Handle<BytecodeArray> Factory::CopyBytecodeArray(
copy->set_constant_pool(bytecode_array->constant_pool());
copy->set_handler_table(bytecode_array->handler_table());
copy->set_source_position_table(bytecode_array->source_position_table());
- copy->set_interrupt_budget(bytecode_array->interrupt_budget());
copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
copy->set_bytecode_age(bytecode_array->bytecode_age());
bytecode_array->CopyBytecodesTo(*copy);
@@ -2925,15 +2959,15 @@ Handle<BytecodeArray> Factory::CopyBytecodeArray(
}
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
JSFunction::EnsureHasInitialMap(constructor);
Handle<Map> map(constructor->initial_map(), isolate());
- return NewJSObjectFromMap(map, pretenure);
+ return NewJSObjectFromMap(map, allocation);
}
-Handle<JSObject> Factory::NewJSObjectWithNullProto(PretenureFlag pretenure) {
+Handle<JSObject> Factory::NewJSObjectWithNullProto(AllocationType allocation) {
Handle<JSObject> result =
- NewJSObject(isolate()->object_function(), pretenure);
+ NewJSObject(isolate()->object_function(), allocation);
Handle<Map> new_map = Map::Copy(
isolate(), Handle<Map>(result->map(), isolate()), "ObjectWithNullProto");
Map::SetPrototype(isolate(), new_map, null_value());
@@ -2984,8 +3018,8 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
}
// Allocate the global object and initialize it with the backing store.
- Handle<JSGlobalObject> global(JSGlobalObject::cast(New(map, TENURED)),
- isolate());
+ Handle<JSGlobalObject> global(
+ JSGlobalObject::cast(New(map, AllocationType::kOld)), isolate());
InitializeJSObjectFromMap(global, dictionary, map);
// Create a new map for the global object.
@@ -3046,7 +3080,7 @@ void Factory::InitializeJSObjectBody(Handle<JSObject> obj, Handle<Map> map,
}
Handle<JSObject> Factory::NewJSObjectFromMap(
- Handle<Map> map, PretenureFlag pretenure,
+ Handle<Map> map, AllocationType allocation,
Handle<AllocationSite> allocation_site) {
// JSFunctions should be allocated using AllocateFunction to be
// properly initialized.
@@ -3057,7 +3091,7 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
HeapObject obj =
- AllocateRawWithAllocationSite(map, pretenure, allocation_site);
+ AllocateRawWithAllocationSite(map, allocation, allocation_site);
Handle<JSObject> js_obj(JSObject::cast(obj), isolate());
InitializeJSObjectFromMap(js_obj, empty_fixed_array(), map);
@@ -3069,24 +3103,24 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
}
Handle<JSObject> Factory::NewSlowJSObjectFromMap(Handle<Map> map, int capacity,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
DCHECK(map->is_dictionary_map());
Handle<NameDictionary> object_properties =
NameDictionary::New(isolate(), capacity);
- Handle<JSObject> js_object = NewJSObjectFromMap(map, pretenure);
+ Handle<JSObject> js_object = NewJSObjectFromMap(map, allocation);
js_object->set_raw_properties_or_hash(*object_properties);
return js_object;
}
Handle<JSObject> Factory::NewSlowJSObjectWithPropertiesAndElements(
- Handle<Object> prototype, Handle<NameDictionary> properties,
- Handle<FixedArrayBase> elements, PretenureFlag pretenure) {
+ Handle<HeapObject> prototype, Handle<NameDictionary> properties,
+ Handle<FixedArrayBase> elements, AllocationType allocation) {
Handle<Map> object_map = isolate()->slow_object_with_object_prototype_map();
if (object_map->prototype() != *prototype) {
object_map = Map::TransitionToPrototype(isolate(), object_map, prototype);
}
DCHECK(object_map->is_dictionary_map());
- Handle<JSObject> object = NewJSObjectFromMap(object_map, pretenure);
+ Handle<JSObject> object = NewJSObjectFromMap(object_map, allocation);
object->set_raw_properties_or_hash(*properties);
if (*elements != ReadOnlyRoots(isolate()).empty_fixed_array()) {
DCHECK(elements->IsNumberDictionary());
@@ -3099,7 +3133,7 @@ Handle<JSObject> Factory::NewSlowJSObjectWithPropertiesAndElements(
}
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
NativeContext native_context = isolate()->raw_native_context();
Map map = native_context->GetInitialJSArrayMap(elements_kind);
if (map.is_null()) {
@@ -3107,14 +3141,14 @@ Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
map = array_function->initial_map();
}
return Handle<JSArray>::cast(
- NewJSObjectFromMap(handle(map, isolate()), pretenure));
+ NewJSObjectFromMap(handle(map, isolate()), allocation));
}
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind, int length,
int capacity,
ArrayStorageAllocationMode mode,
- PretenureFlag pretenure) {
- Handle<JSArray> array = NewJSArray(elements_kind, pretenure);
+ AllocationType allocation) {
+ Handle<JSArray> array = NewJSArray(elements_kind, allocation);
NewJSArrayStorage(array, length, capacity, mode);
return array;
}
@@ -3122,9 +3156,9 @@ Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind, int length,
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
ElementsKind elements_kind,
int length,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
DCHECK(length <= elements->length());
- Handle<JSArray> array = NewJSArray(elements_kind, pretenure);
+ Handle<JSArray> array = NewJSArray(elements_kind, allocation);
array->set_elements(*elements);
array->set_length(Smi::FromInt(length));
@@ -3217,7 +3251,8 @@ Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
: empty_fixed_array();
ReadOnlyRoots roots(isolate());
- Handle<Module> module = Handle<Module>::cast(NewStruct(MODULE_TYPE, TENURED));
+ Handle<Module> module =
+ Handle<Module>::cast(NewStruct(MODULE_TYPE, AllocationType::kOld));
module->set_code(*code);
module->set_exports(*exports);
module->set_regular_exports(*regular_exports);
@@ -3235,14 +3270,14 @@ Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
}
Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
Handle<JSFunction> array_buffer_fun(
shared == SharedFlag::kShared
? isolate()->native_context()->shared_array_buffer_fun()
: isolate()->native_context()->array_buffer_fun(),
isolate());
Handle<Map> map(array_buffer_fun->initial_map(), isolate());
- return Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, pretenure));
+ return Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
}
Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
@@ -3363,26 +3398,26 @@ void SetupArrayBufferView(i::Isolate* isolate,
} // namespace
Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
Handle<JSFunction> typed_array_fun(GetTypedArrayFun(type, isolate()),
isolate());
Handle<Map> map(typed_array_fun->initial_map(), isolate());
- return Handle<JSTypedArray>::cast(NewJSObjectFromMap(map, pretenure));
+ return Handle<JSTypedArray>::cast(NewJSObjectFromMap(map, allocation));
}
Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
Handle<JSFunction> typed_array_fun(GetTypedArrayFun(elements_kind, isolate()),
isolate());
Handle<Map> map(typed_array_fun->initial_map(), isolate());
- return Handle<JSTypedArray>::cast(NewJSObjectFromMap(map, pretenure));
+ return Handle<JSTypedArray>::cast(NewJSObjectFromMap(map, allocation));
}
Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<JSArrayBuffer> buffer,
size_t byte_offset, size_t length,
- PretenureFlag pretenure) {
- Handle<JSTypedArray> obj = NewJSTypedArray(type, pretenure);
+ AllocationType allocation) {
+ Handle<JSTypedArray> obj = NewJSTypedArray(type, allocation);
size_t element_size;
ElementsKind elements_kind;
@@ -3396,12 +3431,12 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
size_t byte_length = length * element_size;
SetupArrayBufferView(isolate(), obj, buffer, byte_offset, byte_length);
- Handle<Object> length_object = NewNumberFromSize(length, pretenure);
+ Handle<Object> length_object = NewNumberFromSize(length, allocation);
obj->set_length(*length_object);
Handle<FixedTypedArrayBase> elements = NewFixedTypedArrayWithExternalPointer(
static_cast<int>(length), type,
- static_cast<uint8_t*>(buffer->backing_store()) + byte_offset, pretenure);
+ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset, allocation);
Handle<Map> map = JSObject::GetElementsTransitionMap(obj, elements_kind);
JSObject::SetMapAndElements(obj, map, elements);
return obj;
@@ -3409,8 +3444,8 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
size_t number_of_elements,
- PretenureFlag pretenure) {
- Handle<JSTypedArray> obj = NewJSTypedArray(elements_kind, pretenure);
+ AllocationType allocation) {
+ Handle<JSTypedArray> obj = NewJSTypedArray(elements_kind, allocation);
DCHECK_EQ(obj->GetEmbedderFieldCount(),
v8::ArrayBufferView::kEmbedderFieldCount);
for (int i = 0; i < v8::ArrayBufferView::kEmbedderFieldCount; i++) {
@@ -3432,12 +3467,12 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
obj->set_length(Smi::FromIntptr(static_cast<intptr_t>(number_of_elements)));
Handle<JSArrayBuffer> buffer =
- NewJSArrayBuffer(SharedFlag::kNotShared, pretenure);
+ NewJSArrayBuffer(SharedFlag::kNotShared, allocation);
JSArrayBuffer::Setup(buffer, isolate(), true, nullptr, byte_length,
SharedFlag::kNotShared);
obj->set_buffer(*buffer);
Handle<FixedTypedArrayBase> elements = NewFixedTypedArray(
- number_of_elements, byte_length, array_type, true, pretenure);
+ number_of_elements, byte_length, array_type, true, allocation);
obj->set_elements(*elements);
return obj;
}
@@ -3464,7 +3499,7 @@ MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
}
// Determine the prototype of the {target_function}.
- Handle<Object> prototype;
+ Handle<HeapObject> prototype;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), prototype,
JSReceiver::GetPrototype(isolate(), target_function), JSBoundFunction);
@@ -3515,7 +3550,8 @@ Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
map = Handle<Map>(isolate()->proxy_map());
}
DCHECK(map->prototype()->IsNull(isolate()));
- Handle<JSProxy> result(JSProxy::cast(New(map, NOT_TENURED)), isolate());
+ Handle<JSProxy> result(JSProxy::cast(New(map, AllocationType::kYoung)),
+ isolate());
result->initialize_properties();
result->set_target(*target);
result->set_handler(*handler);
@@ -3530,7 +3566,8 @@ Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy(int size) {
map->set_is_access_check_needed(true);
map->set_may_have_interesting_symbols(true);
LOG(isolate(), MapDetails(*map));
- return Handle<JSGlobalProxy>::cast(NewJSObjectFromMap(map, NOT_TENURED));
+ return Handle<JSGlobalProxy>::cast(
+ NewJSObjectFromMap(map, AllocationType::kYoung));
}
void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
@@ -3574,6 +3611,13 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
SharedFunctionInfo::InitFromFunctionLiteral(shared, literal, is_toplevel);
SharedFunctionInfo::SetScript(shared, script, literal->function_literal_id(),
false);
+ TRACE_EVENT_OBJECT_CREATED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("v8.compile"), "SharedFunctionInfo",
+ TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope, shared->TraceID()));
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("v8.compile"), "SharedFunctionInfo",
+ TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope, shared->TraceID()),
+ shared->ToTracedValue());
return shared;
}
@@ -3582,7 +3626,7 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
int end_position, Handle<Script> script, Handle<Object> stack_frames) {
Handle<Map> map = message_object_map();
Handle<JSMessageObject> message_obj(
- JSMessageObject::cast(New(map, NOT_TENURED)), isolate());
+ JSMessageObject::cast(New(map, AllocationType::kYoung)), isolate());
message_obj->set_raw_properties_or_hash(*empty_fixed_array(),
SKIP_WRITE_BARRIER);
message_obj->initialize_elements();
@@ -3620,12 +3664,12 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> shared_name;
bool has_shared_name = maybe_name.ToHandle(&shared_name);
if (has_shared_name) {
- shared_name = String::Flatten(isolate(), shared_name, TENURED);
+ shared_name = String::Flatten(isolate(), shared_name, AllocationType::kOld);
}
Handle<Map> map = shared_function_info_map();
- Handle<SharedFunctionInfo> share(SharedFunctionInfo::cast(New(map, TENURED)),
- isolate());
+ Handle<SharedFunctionInfo> share(
+ SharedFunctionInfo::cast(New(map, AllocationType::kOld)), isolate());
{
DisallowHeapAllocation no_allocation;
@@ -3666,8 +3710,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_length(0);
share->set_internal_formal_parameter_count(0);
share->set_expected_nof_properties(0);
- share->set_builtin_function_id(
- BuiltinFunctionId::kInvalidBuiltinFunctionId);
share->set_raw_function_token_offset(0);
// All flags default to false or 0.
share->set_flags(0);
@@ -3711,14 +3753,15 @@ Handle<String> Factory::NumberToStringCacheSet(Handle<Object> number, int hash,
bool check_cache) {
// We tenure the allocated string since it is referenced from the
// number-string cache which lives in the old space.
- Handle<String> js_string =
- NewStringFromAsciiChecked(string, check_cache ? TENURED : NOT_TENURED);
+ Handle<String> js_string = NewStringFromAsciiChecked(
+ string, check_cache ? AllocationType::kOld : AllocationType::kYoung);
if (!check_cache) return js_string;
if (!number_string_cache()->get(hash * 2)->IsUndefined(isolate())) {
int full_size = isolate()->heap()->MaxNumberToStringCacheSize();
if (number_string_cache()->length() != full_size) {
- Handle<FixedArray> new_cache = NewFixedArray(full_size, TENURED);
+ Handle<FixedArray> new_cache =
+ NewFixedArray(full_size, AllocationType::kOld);
isolate()->heap()->set_number_string_cache(*new_cache);
return js_string;
}
@@ -3781,8 +3824,8 @@ Handle<String> Factory::NumberToString(Smi number, bool check_cache) {
}
Handle<ClassPositions> Factory::NewClassPositions(int start, int end) {
- Handle<ClassPositions> class_positions =
- Handle<ClassPositions>::cast(NewStruct(CLASS_POSITIONS_TYPE, TENURED));
+ Handle<ClassPositions> class_positions = Handle<ClassPositions>::cast(
+ NewStruct(CLASS_POSITIONS_TYPE, AllocationType::kOld));
class_positions->set_start(start);
class_positions->set_end(end);
return class_positions;
@@ -3793,7 +3836,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
Heap* heap = isolate()->heap();
Handle<DebugInfo> debug_info =
- Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE, TENURED));
+ Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE, AllocationType::kOld));
debug_info->set_flags(DebugInfo::kNone);
debug_info->set_shared(*shared);
debug_info->set_debugger_hints(0);
@@ -3828,8 +3871,8 @@ Handle<CoverageInfo> Factory::NewCoverageInfo(
}
Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
- Handle<BreakPointInfo> new_break_point_info =
- Handle<BreakPointInfo>::cast(NewStruct(TUPLE2_TYPE, TENURED));
+ Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
+ NewStruct(TUPLE2_TYPE, AllocationType::kOld));
new_break_point_info->set_source_position(source_position);
new_break_point_info->set_break_points(*undefined_value());
return new_break_point_info;
@@ -3837,7 +3880,7 @@ Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
Handle<BreakPoint> Factory::NewBreakPoint(int id, Handle<String> condition) {
Handle<BreakPoint> new_break_point =
- Handle<BreakPoint>::cast(NewStruct(TUPLE2_TYPE, TENURED));
+ Handle<BreakPoint>::cast(NewStruct(TUPLE2_TYPE, AllocationType::kOld));
new_break_point->set_id(id);
new_break_point->set_condition(*condition);
return new_break_point;
@@ -3846,7 +3889,7 @@ Handle<BreakPoint> Factory::NewBreakPoint(int id, Handle<String> condition) {
Handle<StackTraceFrame> Factory::NewStackTraceFrame(
Handle<FrameArray> frame_array, int index) {
Handle<StackTraceFrame> frame = Handle<StackTraceFrame>::cast(
- NewStruct(STACK_TRACE_FRAME_TYPE, NOT_TENURED));
+ NewStruct(STACK_TRACE_FRAME_TYPE, AllocationType::kYoung));
frame->set_frame_array(*frame_array);
frame->set_frame_index(index);
frame->set_frame_info(*undefined_value());
@@ -3859,7 +3902,7 @@ Handle<StackTraceFrame> Factory::NewStackTraceFrame(
Handle<StackFrameInfo> Factory::NewStackFrameInfo() {
Handle<StackFrameInfo> stack_frame_info = Handle<StackFrameInfo>::cast(
- NewStruct(STACK_FRAME_INFO_TYPE, NOT_TENURED));
+ NewStruct(STACK_FRAME_INFO_TYPE, AllocationType::kYoung));
stack_frame_info->set_line_number(0);
stack_frame_info->set_column_number(0);
stack_frame_info->set_script_id(0);
@@ -3876,7 +3919,7 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
DCHECK(it.HasFrame());
Handle<StackFrameInfo> info = Handle<StackFrameInfo>::cast(
- NewStruct(STACK_FRAME_INFO_TYPE, NOT_TENURED));
+ NewStruct(STACK_FRAME_INFO_TYPE, AllocationType::kYoung));
info->set_flag(0);
const bool is_wasm = frame_array->IsAnyWasmFrame(index);
@@ -3922,7 +3965,7 @@ Factory::NewSourcePositionTableWithFrameCache(
Handle<SourcePositionTableWithFrameCache>
source_position_table_with_frame_cache =
Handle<SourcePositionTableWithFrameCache>::cast(
- NewStruct(TUPLE2_TYPE, TENURED));
+ NewStruct(TUPLE2_TYPE, AllocationType::kOld));
source_position_table_with_frame_cache->set_source_position_table(
*source_position_table);
source_position_table_with_frame_cache->set_stack_frame_cache(
@@ -3977,7 +4020,7 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
Handle<Object> maybe_cache(context->map_cache(), isolate());
if (maybe_cache->IsUndefined(isolate())) {
// Allocate the new map cache for the native context.
- maybe_cache = NewWeakFixedArray(kMapCacheSize, TENURED);
+ maybe_cache = NewWeakFixedArray(kMapCacheSize, AllocationType::kOld);
context->set_map_cache(*maybe_cache);
} else {
// Check to see whether there is a matching element in the cache.
@@ -4015,7 +4058,7 @@ Handle<LoadHandler> Factory::NewLoadHandler(int data_count) {
UNREACHABLE();
break;
}
- return handle(LoadHandler::cast(New(map, TENURED)), isolate());
+ return handle(LoadHandler::cast(New(map, AllocationType::kOld)), isolate());
}
Handle<StoreHandler> Factory::NewStoreHandler(int data_count) {
@@ -4037,7 +4080,7 @@ Handle<StoreHandler> Factory::NewStoreHandler(int data_count) {
UNREACHABLE();
break;
}
- return handle(StoreHandler::cast(New(map, TENURED)), isolate());
+ return handle(StoreHandler::cast(New(map, AllocationType::kOld)), isolate());
}
void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp, JSRegExp::Type type,
@@ -4301,9 +4344,9 @@ Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
return map;
}
-Handle<JSPromise> Factory::NewJSPromiseWithoutHook(PretenureFlag pretenure) {
+Handle<JSPromise> Factory::NewJSPromiseWithoutHook(AllocationType allocation) {
Handle<JSPromise> promise = Handle<JSPromise>::cast(
- NewJSObject(isolate()->promise_function(), pretenure));
+ NewJSObject(isolate()->promise_function(), allocation));
promise->set_reactions_or_result(Smi::kZero);
promise->set_flags(0);
for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
@@ -4312,8 +4355,8 @@ Handle<JSPromise> Factory::NewJSPromiseWithoutHook(PretenureFlag pretenure) {
return promise;
}
-Handle<JSPromise> Factory::NewJSPromise(PretenureFlag pretenure) {
- Handle<JSPromise> promise = NewJSPromiseWithoutHook(pretenure);
+Handle<JSPromise> Factory::NewJSPromise(AllocationType allocation) {
+ Handle<JSPromise> promise = NewJSPromiseWithoutHook(allocation);
isolate()->RunPromiseHook(PromiseHookType::kInit, promise, undefined_value());
return promise;
}
@@ -4322,8 +4365,8 @@ Handle<CallHandlerInfo> Factory::NewCallHandlerInfo(bool has_no_side_effect) {
Handle<Map> map = has_no_side_effect
? side_effect_free_call_handler_info_map()
: side_effect_call_handler_info_map();
- Handle<CallHandlerInfo> info(CallHandlerInfo::cast(New(map, TENURED)),
- isolate());
+ Handle<CallHandlerInfo> info(
+ CallHandlerInfo::cast(New(map, AllocationType::kOld)), isolate());
Object undefined_value = ReadOnlyRoots(isolate()).undefined_value();
info->set_callback(undefined_value);
info->set_js_callback(undefined_value);
@@ -4379,7 +4422,7 @@ NewFunctionArgs NewFunctionArgs::ForFunctionWithoutCode(
// static
NewFunctionArgs NewFunctionArgs::ForBuiltinWithPrototype(
- Handle<String> name, Handle<Object> prototype, InstanceType type,
+ Handle<String> name, Handle<HeapObject> prototype, InstanceType type,
int instance_size, int inobject_properties, int builtin_id,
MutableMode prototype_mutability) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
diff --git a/chromium/v8/src/heap/factory.h b/chromium/v8/src/heap/factory.h
index 3ac69cb44dc..94646517a0b 100644
--- a/chromium/v8/src/heap/factory.h
+++ b/chromium/v8/src/heap/factory.h
@@ -18,7 +18,6 @@
#include "src/objects/dictionary.h"
#include "src/objects/js-array.h"
#include "src/objects/js-regexp.h"
-#include "src/objects/ordered-hash-table.h"
#include "src/objects/string.h"
namespace v8 {
@@ -104,38 +103,41 @@ enum FunctionMode {
// Interface for handle based allocation.
class V8_EXPORT_PRIVATE Factory {
public:
- Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
- Handle<Object> to_number, const char* type_of,
- byte kind,
- PretenureFlag pretenure = TENURED_READ_ONLY);
+ Handle<Oddball> NewOddball(
+ Handle<Map> map, const char* to_string, Handle<Object> to_number,
+ const char* type_of, byte kind,
+ AllocationType allocation = AllocationType::kReadOnly);
// Marks self references within code generation.
- Handle<Oddball> NewSelfReferenceMarker(PretenureFlag pretenure = TENURED);
+ Handle<Oddball> NewSelfReferenceMarker(
+ AllocationType allocation = AllocationType::kOld);
// Allocates a fixed array-like object with given map and initialized with
// undefined values.
template <typename T = FixedArray>
- Handle<T> NewFixedArrayWithMap(RootIndex map_root_index, int length,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<T> NewFixedArrayWithMap(
+ RootIndex map_root_index, int length,
+ AllocationType allocation = AllocationType::kYoung);
// Allocates a weak fixed array-like object with given map and initialized
// with undefined values.
template <typename T = WeakFixedArray>
- Handle<T> NewWeakFixedArrayWithMap(RootIndex map_root_index, int length,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<T> NewWeakFixedArrayWithMap(
+ RootIndex map_root_index, int length,
+ AllocationType allocation = AllocationType::kYoung);
// Allocates a fixed array initialized with undefined values.
- Handle<FixedArray> NewFixedArray(int length,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<FixedArray> NewFixedArray(
+ int length, AllocationType allocation = AllocationType::kYoung);
// Allocates a fixed array which may contain in-place weak references. The
// array is initialized with undefined values
Handle<WeakFixedArray> NewWeakFixedArray(
- int length, PretenureFlag pretenure = NOT_TENURED);
+ int length, AllocationType allocation = AllocationType::kYoung);
// Allocates a property array initialized with undefined values.
- Handle<PropertyArray> NewPropertyArray(int length,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<PropertyArray> NewPropertyArray(
+ int length, AllocationType allocation = AllocationType::kYoung);
// Tries allocating a fixed array initialized with undefined values.
// In case of an allocation failure (OOM) an empty handle is returned.
// The caller has to manually signal an
@@ -143,24 +145,31 @@ class V8_EXPORT_PRIVATE Factory {
// NewFixedArray as a fallback.
V8_WARN_UNUSED_RESULT
MaybeHandle<FixedArray> TryNewFixedArray(
- int length, PretenureFlag pretenure = NOT_TENURED);
+ int length, AllocationType allocation = AllocationType::kYoung);
// Allocate a new fixed array with non-existing entries (the hole).
Handle<FixedArray> NewFixedArrayWithHoles(
- int length, PretenureFlag pretenure = NOT_TENURED);
+ int length, AllocationType allocation = AllocationType::kYoung);
// Allocates an uninitialized fixed array. It must be filled by the caller.
Handle<FixedArray> NewUninitializedFixedArray(
- int length, PretenureFlag pretenure = NOT_TENURED);
+ int length, AllocationType allocation = AllocationType::kYoung);
+
+ // Allocates a closure feedback cell array whose feedback cells are
+ // initialized with undefined values.
+ Handle<ClosureFeedbackCellArray> NewClosureFeedbackCellArray(
+ int num_slots, AllocationType allocation = AllocationType::kYoung);
// Allocates a feedback vector whose slots are initialized with undefined
// values.
Handle<FeedbackVector> NewFeedbackVector(
- Handle<SharedFunctionInfo> shared, PretenureFlag pretenure = NOT_TENURED);
+ Handle<SharedFunctionInfo> shared,
+ Handle<ClosureFeedbackCellArray> closure_feedback_cell_array,
+ AllocationType allocation = AllocationType::kYoung);
// Allocates a clean embedder data array with given capacity.
Handle<EmbedderDataArray> NewEmbedderDataArray(
- int length, PretenureFlag pretenure = NOT_TENURED);
+ int length, AllocationType allocation = AllocationType::kYoung);
// Allocates a fixed array for name-value pairs of boilerplate properties and
// calculates the number of properties we need to store in the backing store.
@@ -171,32 +180,33 @@ class V8_EXPORT_PRIVATE Factory {
// The function returns a pre-allocated empty fixed array for length = 0,
// so the return type must be the general fixed array class.
Handle<FixedArrayBase> NewFixedDoubleArray(
- int length, PretenureFlag pretenure = NOT_TENURED);
+ int length, AllocationType allocation = AllocationType::kYoung);
// Allocate a new fixed double array with hole values.
Handle<FixedArrayBase> NewFixedDoubleArrayWithHoles(
- int size, PretenureFlag pretenure = NOT_TENURED);
+ int size, AllocationType allocation = AllocationType::kYoung);
// Allocates a FeedbackMedata object and zeroes the data section.
- Handle<FeedbackMetadata> NewFeedbackMetadata(int slot_count,
- PretenureFlag tenure = TENURED);
+ Handle<FeedbackMetadata> NewFeedbackMetadata(
+ int slot_count, int feedback_cell_count,
+ AllocationType allocation = AllocationType::kOld);
- Handle<FrameArray> NewFrameArray(int number_of_frames,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<FrameArray> NewFrameArray(
+ int number_of_frames, AllocationType allocation = AllocationType::kYoung);
Handle<OrderedHashSet> NewOrderedHashSet();
Handle<OrderedHashMap> NewOrderedHashMap();
Handle<OrderedNameDictionary> NewOrderedNameDictionary();
Handle<SmallOrderedHashSet> NewSmallOrderedHashSet(
- int capacity = SmallOrderedHashSet::kMinCapacity,
- PretenureFlag pretenure = NOT_TENURED);
+ int capacity = kSmallOrderedHashSetMinCapacity,
+ AllocationType allocation = AllocationType::kYoung);
Handle<SmallOrderedHashMap> NewSmallOrderedHashMap(
- int capacity = SmallOrderedHashMap::kMinCapacity,
- PretenureFlag pretenure = NOT_TENURED);
+ int capacity = kSmallOrderedHashMapMinCapacity,
+ AllocationType allocation = AllocationType::kYoung);
Handle<SmallOrderedNameDictionary> NewSmallOrderedNameDictionary(
- int capacity = SmallOrderedHashMap::kMinCapacity,
- PretenureFlag pretenure = NOT_TENURED);
+ int capacity = kSmallOrderedHashMapMinCapacity,
+ AllocationType allocation = AllocationType::kYoung);
// Create a new PrototypeInfo struct.
Handle<PrototypeInfo> NewPrototypeInfo();
@@ -207,11 +217,11 @@ class V8_EXPORT_PRIVATE Factory {
// Create a new Tuple2 struct.
Handle<Tuple2> NewTuple2(Handle<Object> value1, Handle<Object> value2,
- PretenureFlag pretenure);
+ AllocationType allocation);
// Create a new Tuple3 struct.
Handle<Tuple3> NewTuple3(Handle<Object> value1, Handle<Object> value2,
- Handle<Object> value3, PretenureFlag pretenure);
+ Handle<Object> value3, AllocationType allocation);
// Create a new ArrayBoilerplateDescription struct.
Handle<ArrayBoilerplateDescription> NewArrayBoilerplateDescription(
@@ -246,9 +256,9 @@ class V8_EXPORT_PRIVATE Factory {
inline Handle<Name> InternalizeName(Handle<Name> name);
// String creation functions. Most of the string creation functions take
- // a Heap::PretenureFlag argument to optionally request that they be
- // allocated in the old generation. The pretenure flag defaults to
- // DONT_TENURE.
+ // an AllocationType argument to optionally request that they be
+ // allocated in the old generation. Otherwise the default is
+ // AllocationType::kYoung.
//
// Creates a new String object. There are two String encodings: one-byte and
// two-byte. One should choose between the three string factory functions
@@ -266,36 +276,41 @@ class V8_EXPORT_PRIVATE Factory {
//
// One-byte strings are pretenured when used as keys in the SourceCodeCache.
V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromOneByte(
- Vector<const uint8_t> str, PretenureFlag pretenure = NOT_TENURED);
+ Vector<const uint8_t> str,
+ AllocationType allocation = AllocationType::kYoung);
template <size_t N>
inline Handle<String> NewStringFromStaticChars(
- const char (&str)[N], PretenureFlag pretenure = NOT_TENURED) {
+ const char (&str)[N],
+ AllocationType allocation = AllocationType::kYoung) {
DCHECK(N == StrLength(str) + 1);
- return NewStringFromOneByte(StaticCharVector(str), pretenure)
+ return NewStringFromOneByte(StaticCharVector(str), allocation)
.ToHandleChecked();
}
inline Handle<String> NewStringFromAsciiChecked(
- const char* str, PretenureFlag pretenure = NOT_TENURED) {
- return NewStringFromOneByte(OneByteVector(str), pretenure)
+ const char* str, AllocationType allocation = AllocationType::kYoung) {
+ return NewStringFromOneByte(OneByteVector(str), allocation)
.ToHandleChecked();
}
// UTF8 strings are pretenured when used for regexp literal patterns and
// flags in the parser.
V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromUtf8(
- Vector<const char> str, PretenureFlag pretenure = NOT_TENURED);
+ Vector<const char> str,
+ AllocationType allocation = AllocationType::kYoung);
V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromUtf8SubString(
Handle<SeqOneByteString> str, int begin, int end,
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromTwoByte(
- Vector<const uc16> str, PretenureFlag pretenure = NOT_TENURED);
+ Vector<const uc16> str,
+ AllocationType allocation = AllocationType::kYoung);
V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromTwoByte(
- const ZoneVector<uc16>* str, PretenureFlag pretenure = NOT_TENURED);
+ const ZoneVector<uc16>* str,
+ AllocationType allocation = AllocationType::kYoung);
Handle<JSStringIterator> NewJSStringIterator(Handle<String> string);
@@ -331,9 +346,9 @@ class V8_EXPORT_PRIVATE Factory {
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
V8_WARN_UNUSED_RESULT MaybeHandle<SeqOneByteString> NewRawOneByteString(
- int length, PretenureFlag pretenure = NOT_TENURED);
+ int length, AllocationType allocation = AllocationType::kYoung);
V8_WARN_UNUSED_RESULT MaybeHandle<SeqTwoByteString> NewRawTwoByteString(
- int length, PretenureFlag pretenure = NOT_TENURED);
+ int length, AllocationType allocation = AllocationType::kYoung);
// Creates a single character string where the character has given code.
// A cache is used for Latin1 codes.
@@ -372,8 +387,9 @@ class V8_EXPORT_PRIVATE Factory {
const ExternalOneByteString::Resource* resource);
// Create a symbol in old or read-only space.
- Handle<Symbol> NewSymbol(PretenureFlag pretenure = TENURED);
- Handle<Symbol> NewPrivateSymbol(PretenureFlag pretenure = TENURED);
+ Handle<Symbol> NewSymbol(AllocationType allocation = AllocationType::kOld);
+ Handle<Symbol> NewPrivateSymbol(
+ AllocationType allocation = AllocationType::kOld);
Handle<Symbol> NewPrivateNameSymbol(Handle<String> name);
// Create a global (but otherwise uninitialized) context.
@@ -424,7 +440,7 @@ class V8_EXPORT_PRIVATE Factory {
int length);
Handle<Struct> NewStruct(InstanceType type,
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
Handle<AliasedArgumentsEntry> NewAliasedArgumentsEntry(
int aliased_context_slot);
@@ -432,9 +448,10 @@ class V8_EXPORT_PRIVATE Factory {
Handle<AccessorInfo> NewAccessorInfo();
Handle<Script> NewScript(Handle<String> source,
- PretenureFlag tenure = TENURED);
- Handle<Script> NewScriptWithId(Handle<String> source, int script_id,
- PretenureFlag tenure = TENURED);
+ AllocationType allocation = AllocationType::kOld);
+ Handle<Script> NewScriptWithId(
+ Handle<String> source, int script_id,
+ AllocationType allocation = AllocationType::kOld);
Handle<Script> CloneScript(Handle<Script> script);
Handle<BreakPointInfo> NewBreakPointInfo(int source_position);
@@ -461,11 +478,11 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSFinalizationGroup> finalization_group);
// Foreign objects are pretenured when allocated by the bootstrapper.
- Handle<Foreign> NewForeign(Address addr,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<Foreign> NewForeign(
+ Address addr, AllocationType allocation = AllocationType::kYoung);
- Handle<ByteArray> NewByteArray(int length,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<ByteArray> NewByteArray(
+ int length, AllocationType allocation = AllocationType::kYoung);
Handle<BytecodeArray> NewBytecodeArray(int length, const byte* raw_bytecodes,
int frame_size, int parameter_count,
@@ -473,25 +490,24 @@ class V8_EXPORT_PRIVATE Factory {
Handle<FixedTypedArrayBase> NewFixedTypedArrayWithExternalPointer(
int length, ExternalArrayType array_type, void* external_pointer,
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
Handle<FixedTypedArrayBase> NewFixedTypedArray(
size_t length, size_t byte_length, ExternalArrayType array_type,
- bool initialize, PretenureFlag pretenure = NOT_TENURED);
+ bool initialize, AllocationType allocation = AllocationType::kYoung);
Handle<Cell> NewCell(Handle<Object> value);
- Handle<PropertyCell> NewPropertyCell(Handle<Name> name,
- PretenureFlag pretenure = TENURED);
+ Handle<PropertyCell> NewPropertyCell(
+ Handle<Name> name, AllocationType allocation = AllocationType::kOld);
Handle<FeedbackCell> NewNoClosuresCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewOneClosureCell(Handle<HeapObject> value);
Handle<FeedbackCell> NewManyClosuresCell(Handle<HeapObject> value);
- Handle<FeedbackCell> NewNoFeedbackCell();
Handle<DescriptorArray> NewDescriptorArray(
int number_of_entries, int slack = 0,
- AllocationType type = AllocationType::kYoung);
+ AllocationType allocation = AllocationType::kYoung);
Handle<TransitionArray> NewTransitionArray(int number_of_transitions,
int slack = 0);
@@ -507,10 +523,10 @@ class V8_EXPORT_PRIVATE Factory {
Map InitializeMap(Map map, InstanceType type, int instance_size,
ElementsKind elements_kind, int inobject_properties);
- // Allocate a block of memory in the given space (filled with a filler).
- // Used as a fall-back for generated code when the space is full.
+ // Allocate a block of memory of the given AllocationType (filled with a
+ // filler). Used as a fall-back for generated code when the space is full.
Handle<HeapObject> NewFillerObject(int size, bool double_align,
- AllocationSpace space);
+ AllocationType allocation);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@@ -529,22 +545,23 @@ class V8_EXPORT_PRIVATE Factory {
Handle<FixedArray> CopyFixedArrayAndGrow(
Handle<FixedArray> array, int grow_by,
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
Handle<WeakFixedArray> CopyWeakFixedArrayAndGrow(
Handle<WeakFixedArray> array, int grow_by,
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
Handle<WeakArrayList> CopyWeakArrayListAndGrow(
Handle<WeakArrayList> array, int grow_by,
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
Handle<PropertyArray> CopyPropertyArrayAndGrow(
Handle<PropertyArray> array, int grow_by,
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
- Handle<FixedArray> CopyFixedArrayUpTo(Handle<FixedArray> array, int new_len,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<FixedArray> CopyFixedArrayUpTo(
+ Handle<FixedArray> array, int new_len,
+ AllocationType allocation = AllocationType::kYoung);
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
@@ -558,37 +575,39 @@ class V8_EXPORT_PRIVATE Factory {
// Numbers (e.g. literals) are pretenured by the parser.
// The return value may be a smi or a heap number.
- Handle<Object> NewNumber(double value, PretenureFlag pretenure = NOT_TENURED);
+ Handle<Object> NewNumber(double value,
+ AllocationType allocation = AllocationType::kYoung);
- Handle<Object> NewNumberFromInt(int32_t value,
- PretenureFlag pretenure = NOT_TENURED);
- Handle<Object> NewNumberFromUint(uint32_t value,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<Object> NewNumberFromInt(
+ int32_t value, AllocationType allocation = AllocationType::kYoung);
+ Handle<Object> NewNumberFromUint(
+ uint32_t value, AllocationType allocation = AllocationType::kYoung);
inline Handle<Object> NewNumberFromSize(
- size_t value, PretenureFlag pretenure = NOT_TENURED);
+ size_t value, AllocationType allocation = AllocationType::kYoung);
inline Handle<Object> NewNumberFromInt64(
- int64_t value, PretenureFlag pretenure = NOT_TENURED);
+ int64_t value, AllocationType allocation = AllocationType::kYoung);
inline Handle<HeapNumber> NewHeapNumber(
- double value, PretenureFlag pretenure = NOT_TENURED);
+ double value, AllocationType allocation = AllocationType::kYoung);
inline Handle<HeapNumber> NewHeapNumberFromBits(
- uint64_t bits, PretenureFlag pretenure = NOT_TENURED);
+ uint64_t bits, AllocationType allocation = AllocationType::kYoung);
// Creates heap number object with not yet set value field.
- Handle<HeapNumber> NewHeapNumber(PretenureFlag pretenure = NOT_TENURED);
+ Handle<HeapNumber> NewHeapNumber(
+ AllocationType allocation = AllocationType::kYoung);
Handle<MutableHeapNumber> NewMutableHeapNumber(
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
inline Handle<MutableHeapNumber> NewMutableHeapNumber(
- double value, PretenureFlag pretenure = NOT_TENURED);
+ double value, AllocationType allocation = AllocationType::kYoung);
inline Handle<MutableHeapNumber> NewMutableHeapNumberFromBits(
- uint64_t bits, PretenureFlag pretenure = NOT_TENURED);
+ uint64_t bits, AllocationType allocation = AllocationType::kYoung);
inline Handle<MutableHeapNumber> NewMutableHeapNumberWithHoleNaN(
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
// Allocates a new BigInt with {length} digits. Only to be used by
// MutableBigInt::New*.
Handle<FreshlyAllocatedBigInt> NewBigInt(
- int length, PretenureFlag pretenure = NOT_TENURED);
+ int length, AllocationType allocation = AllocationType::kYoung);
Handle<JSObject> NewArgumentsObject(Handle<JSFunction> callee, int length);
@@ -596,11 +615,12 @@ class V8_EXPORT_PRIVATE Factory {
// constructor.
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
- Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSObject> NewJSObject(
+ Handle<JSFunction> constructor,
+ AllocationType allocation = AllocationType::kYoung);
// JSObject without a prototype.
Handle<JSObject> NewJSObjectWithNullProto(
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
// Global objects are pretenured and initialized based on a constructor.
Handle<JSGlobalObject> NewJSGlobalObject(Handle<JSFunction> constructor);
@@ -611,12 +631,12 @@ class V8_EXPORT_PRIVATE Factory {
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
Handle<JSObject> NewJSObjectFromMap(
- Handle<Map> map, PretenureFlag pretenure = NOT_TENURED,
+ Handle<Map> map, AllocationType allocation = AllocationType::kYoung,
Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
Handle<JSObject> NewSlowJSObjectFromMap(
Handle<Map> map,
int number_of_slow_properties = NameDictionary::kInitialCapacity,
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
// Allocates and initializes a new JavaScript object with the given
// {prototype} and {properties}. The newly created object will be
// in dictionary properties mode. The {elements} can either be the
@@ -624,8 +644,9 @@ class V8_EXPORT_PRIVATE Factory {
// fast elements, or a NumberDictionary, in which case the resulting
// object will have dictionary elements.
Handle<JSObject> NewSlowJSObjectWithPropertiesAndElements(
- Handle<Object> prototype, Handle<NameDictionary> properties,
- Handle<FixedArrayBase> elements, PretenureFlag pretenure = NOT_TENURED);
+ Handle<HeapObject> prototype, Handle<NameDictionary> properties,
+ Handle<FixedArrayBase> elements,
+ AllocationType allocation = AllocationType::kYoung);
// JS arrays are pretenured when allocated by the parser.
@@ -634,27 +655,27 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSArray> NewJSArray(
ElementsKind elements_kind, int length, int capacity,
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
Handle<JSArray> NewJSArray(
int capacity, ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
- PretenureFlag pretenure = NOT_TENURED) {
+ AllocationType allocation = AllocationType::kYoung) {
if (capacity != 0) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
return NewJSArray(elements_kind, 0, capacity,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, pretenure);
+ INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE, allocation);
}
// Create a JSArray with the given elements.
- Handle<JSArray> NewJSArrayWithElements(Handle<FixedArrayBase> elements,
- ElementsKind elements_kind, int length,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArray> NewJSArrayWithElements(
+ Handle<FixedArrayBase> elements, ElementsKind elements_kind, int length,
+ AllocationType allocation = AllocationType::kYoung);
inline Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
- PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
void NewJSArrayStorage(
Handle<JSArray> array, int length, int capacity,
@@ -668,29 +689,30 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Module> NewModule(Handle<SharedFunctionInfo> code);
- Handle<JSArrayBuffer> NewJSArrayBuffer(SharedFlag shared,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArrayBuffer> NewJSArrayBuffer(
+ SharedFlag shared, AllocationType allocation = AllocationType::kYoung);
static void TypeAndSizeForElementsKind(ElementsKind kind,
ExternalArrayType* array_type,
size_t* element_size);
- Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSTypedArray> NewJSTypedArray(
+ ExternalArrayType type,
+ AllocationType allocation = AllocationType::kYoung);
- Handle<JSTypedArray> NewJSTypedArray(ElementsKind elements_kind,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSTypedArray> NewJSTypedArray(
+ ElementsKind elements_kind,
+ AllocationType allocation = AllocationType::kYoung);
// Creates a new JSTypedArray with the specified buffer.
- Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type,
- Handle<JSArrayBuffer> buffer,
- size_t byte_offset, size_t length,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSTypedArray> NewJSTypedArray(
+ ExternalArrayType type, Handle<JSArrayBuffer> buffer, size_t byte_offset,
+ size_t length, AllocationType allocation = AllocationType::kYoung);
// Creates a new on-heap JSTypedArray.
- Handle<JSTypedArray> NewJSTypedArray(ElementsKind elements_kind,
- size_t number_of_elements,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSTypedArray> NewJSTypedArray(
+ ElementsKind elements_kind, size_t number_of_elements,
+ AllocationType allocation = AllocationType::kYoung);
Handle<JSDataView> NewJSDataView(Handle<JSArrayBuffer> buffer,
size_t byte_offset, size_t byte_length);
@@ -732,26 +754,27 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
Handle<Context> context, Handle<FeedbackCell> feedback_cell,
- PretenureFlag pretenure = TENURED);
+ AllocationType allocation = AllocationType::kOld);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info, Handle<Context> context,
- Handle<FeedbackCell> feedback_cell, PretenureFlag pretenure = TENURED);
+ Handle<FeedbackCell> feedback_cell,
+ AllocationType allocation = AllocationType::kOld);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
- Handle<Context> context, PretenureFlag pretenure = TENURED);
+ Handle<Context> context,
+ AllocationType allocation = AllocationType::kOld);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info, Handle<Context> context,
- PretenureFlag pretenure = TENURED);
+ AllocationType allocation = AllocationType::kOld);
// The choke-point for JSFunction creation. Handles allocation and
// initialization. All other utility methods call into this.
- Handle<JSFunction> NewFunction(Handle<Map> map,
- Handle<SharedFunctionInfo> info,
- Handle<Context> context,
- PretenureFlag pretenure = TENURED);
+ Handle<JSFunction> NewFunction(
+ Handle<Map> map, Handle<SharedFunctionInfo> info, Handle<Context> context,
+ AllocationType allocation = AllocationType::kOld);
// Create a serialized scope info.
Handle<ScopeInfo> NewScopeInfo(int length);
@@ -932,13 +955,14 @@ class V8_EXPORT_PRIVATE Factory {
Handle<String> ToPrimitiveHintString(ToPrimitiveHint hint);
Handle<JSPromise> NewJSPromiseWithoutHook(
- PretenureFlag pretenure = NOT_TENURED);
- Handle<JSPromise> NewJSPromise(PretenureFlag pretenure = NOT_TENURED);
+ AllocationType allocation = AllocationType::kYoung);
+ Handle<JSPromise> NewJSPromise(
+ AllocationType allocation = AllocationType::kYoung);
Handle<CallHandlerInfo> NewCallHandlerInfo(bool has_no_side_effect = false);
- HeapObject NewForTest(Handle<Map> map, PretenureFlag pretenure) {
- return New(map, pretenure);
+ HeapObject NewForTest(Handle<Map> map, AllocationType allocation) {
+ return New(map, allocation);
}
private:
@@ -951,39 +975,40 @@ class V8_EXPORT_PRIVATE Factory {
}
HeapObject AllocateRawWithImmortalMap(
- int size, PretenureFlag pretenure, Map map,
+ int size, AllocationType allocation, Map map,
AllocationAlignment alignment = kWordAligned);
HeapObject AllocateRawWithAllocationSite(
- Handle<Map> map, PretenureFlag pretenure,
+ Handle<Map> map, AllocationType allocation,
Handle<AllocationSite> allocation_site);
// Allocate memory for an uninitialized array (e.g., a FixedArray or similar).
- HeapObject AllocateRawArray(int size, PretenureFlag pretenure);
- HeapObject AllocateRawFixedArray(int length, PretenureFlag pretenure);
- HeapObject AllocateRawWeakArrayList(int length, PretenureFlag pretenure);
+ HeapObject AllocateRawArray(int size, AllocationType allocation);
+ HeapObject AllocateRawFixedArray(int length, AllocationType allocation);
+ HeapObject AllocateRawWeakArrayList(int length, AllocationType allocation);
Handle<FixedArray> NewFixedArrayWithFiller(RootIndex map_root_index,
int length, Object filler,
- PretenureFlag pretenure);
+ AllocationType allocation);
// Allocates new context with given map, sets length and initializes the
// after-header part with uninitialized values and leaves the context header
// uninitialized.
Handle<Context> NewContext(RootIndex map_root_index, int size,
- int variadic_part_length, PretenureFlag pretenure);
+ int variadic_part_length,
+ AllocationType allocation);
template <typename T>
Handle<T> AllocateSmallOrderedHashTable(Handle<Map> map, int capacity,
- PretenureFlag pretenure);
+ AllocationType allocation);
// Creates a heap object based on the map. The fields of the heap object are
// not initialized, it's the responsibility of the caller to do that.
- HeapObject New(Handle<Map> map, PretenureFlag pretenure);
+ HeapObject New(Handle<Map> map, AllocationType allocation);
template <typename T>
Handle<T> CopyArrayWithMap(Handle<T> src, Handle<Map> map);
template <typename T>
Handle<T> CopyArrayAndGrow(Handle<T> src, int grow_by,
- PretenureFlag pretenure);
+ AllocationType allocation);
template <bool is_one_byte, typename T>
Handle<String> AllocateInternalizedStringImpl(T t, int chars,
@@ -996,7 +1021,7 @@ class V8_EXPORT_PRIVATE Factory {
uint32_t hash_field);
MaybeHandle<String> NewStringFromTwoByte(const uc16* string, int length,
- PretenureFlag pretenure);
+ AllocationType allocation);
// Attempt to find the number in a small cache. If we finds it, return
// the string representation of the number. Otherwise return undefined.
@@ -1007,8 +1032,9 @@ class V8_EXPORT_PRIVATE Factory {
const char* string, bool check_cache);
// Create a JSArray with no elements and no length.
- Handle<JSArray> NewJSArray(ElementsKind elements_kind,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArray> NewJSArray(
+ ElementsKind elements_kind,
+ AllocationType allocation = AllocationType::kYoung);
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
MaybeHandle<String> name, MaybeHandle<HeapObject> maybe_function_data,
@@ -1031,13 +1057,14 @@ class NewFunctionArgs final {
static NewFunctionArgs ForWasm(
Handle<String> name,
Handle<WasmExportedFunctionData> exported_function_data, Handle<Map> map);
- static NewFunctionArgs ForBuiltin(Handle<String> name, Handle<Map> map,
- int builtin_id);
+ V8_EXPORT_PRIVATE static NewFunctionArgs ForBuiltin(Handle<String> name,
+ Handle<Map> map,
+ int builtin_id);
static NewFunctionArgs ForFunctionWithoutCode(Handle<String> name,
Handle<Map> map,
LanguageMode language_mode);
static NewFunctionArgs ForBuiltinWithPrototype(
- Handle<String> name, Handle<Object> prototype, InstanceType type,
+ Handle<String> name, Handle<HeapObject> prototype, InstanceType type,
int instance_size, int inobject_properties, int builtin_id,
MutableMode prototype_mutability);
static NewFunctionArgs ForBuiltinWithoutPrototype(Handle<String> name,
@@ -1066,7 +1093,7 @@ class NewFunctionArgs final {
int inobject_properties_ = kUninitialized;
bool should_set_prototype_ = false;
- MaybeHandle<Object> maybe_prototype_;
+ MaybeHandle<HeapObject> maybe_prototype_;
bool should_set_language_mode_ = false;
LanguageMode language_mode_;
diff --git a/chromium/v8/src/heap/gc-tracer.cc b/chromium/v8/src/heap/gc-tracer.cc
index 423dc66a2e0..4afc012c62f 100644
--- a/chromium/v8/src/heap/gc-tracer.cc
+++ b/chromium/v8/src/heap/gc-tracer.cc
@@ -38,7 +38,7 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
: tracer_(tracer), scope_(scope) {
start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (V8_LIKELY(!FLAG_runtime_stats)) return;
+ if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
runtime_stats_ = tracer_->heap_->isolate()->counters()->runtime_call_stats();
runtime_stats_->Enter(&timer_, GCTracer::RCSCounterFromScope(scope));
}
@@ -55,7 +55,7 @@ GCTracer::BackgroundScope::BackgroundScope(GCTracer* tracer, ScopeId scope)
: tracer_(tracer), scope_(scope), runtime_stats_enabled_(false) {
start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (V8_LIKELY(!base::AsAtomic32::Relaxed_Load(&FLAG_runtime_stats))) return;
+ if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
timer_.Start(&counter_, nullptr);
runtime_stats_enabled_ = true;
}
@@ -266,10 +266,6 @@ void GCTracer::Start(GarbageCollector collector,
}
void GCTracer::ResetIncrementalMarkingCounters() {
- if (incremental_marking_duration_ > 0) {
- heap_->isolate()->counters()->incremental_marking_sum()->AddSample(
- static_cast<int>(incremental_marking_duration_));
- }
incremental_marking_bytes_ = 0;
incremental_marking_duration_ = 0;
for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
@@ -1083,7 +1079,7 @@ void GCTracer::FetchBackgroundCounters(int first_global_scope,
background_counter_[first_background_scope + i].total_duration_ms;
background_counter_[first_background_scope + i].total_duration_ms = 0;
}
- if (V8_LIKELY(!FLAG_runtime_stats)) return;
+ if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
RuntimeCallStats* runtime_stats =
heap_->isolate()->counters()->runtime_call_stats();
if (!runtime_stats) return;
@@ -1127,6 +1123,36 @@ void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
static_cast<int>(current_.scopes[Scope::MC_PROLOGUE]));
counters->gc_finalize_sweep()->AddSample(
static_cast<int>(current_.scopes[Scope::MC_SWEEP]));
+ if (incremental_marking_duration_ > 0) {
+ heap_->isolate()->counters()->incremental_marking_sum()->AddSample(
+ static_cast<int>(incremental_marking_duration_));
+ }
+ const double overall_marking_time =
+ incremental_marking_duration_ + current_.scopes[Scope::MC_MARK];
+ heap_->isolate()->counters()->gc_marking_sum()->AddSample(
+ static_cast<int>(overall_marking_time));
+
+ constexpr size_t kMinObjectSizeForReportingThroughput = 1024 * 1024;
+ if (base::TimeTicks::IsHighResolution() &&
+ heap_->SizeOfObjects() > kMinObjectSizeForReportingThroughput) {
+ DCHECK_GT(overall_marking_time, 0.0);
+ const double overall_v8_marking_time =
+ overall_marking_time -
+ current_.scopes[Scope::MC_MARK_EMBEDDER_PROLOGUE] -
+ current_.scopes[Scope::MC_MARK_EMBEDDER_TRACING] -
+ current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE] -
+ current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_TRACING];
+ DCHECK_GT(overall_v8_marking_time, 0.0);
+ const int main_thread_marking_throughput_mb_per_s =
+ static_cast<int>(static_cast<double>(heap_->SizeOfObjects()) /
+ overall_v8_marking_time * 1000 / 1024 / 1024);
+ heap_->isolate()
+ ->counters()
+ ->gc_main_thread_marking_throughput()
+ ->AddSample(
+ static_cast<int>(main_thread_marking_throughput_mb_per_s));
+ }
+
DCHECK_EQ(Scope::LAST_TOP_MC_SCOPE, Scope::MC_SWEEP);
} else if (gc_timer == counters->gc_scavenger()) {
counters->gc_scavenger_scavenge_main()->AddSample(
diff --git a/chromium/v8/src/heap/gc-tracer.h b/chromium/v8/src/heap/gc-tracer.h
index 0ad1f59b417..b9604bdff0c 100644
--- a/chromium/v8/src/heap/gc-tracer.h
+++ b/chromium/v8/src/heap/gc-tracer.h
@@ -17,7 +17,7 @@
namespace v8 {
namespace internal {
-typedef std::pair<uint64_t, double> BytesAndDuration;
+using BytesAndDuration = std::pair<uint64_t, double>;
inline BytesAndDuration MakeBytesAndDuration(uint64_t bytes, double duration) {
return std::make_pair(bytes, duration);
diff --git a/chromium/v8/src/heap/heap-inl.h b/chromium/v8/src/heap/heap-inl.h
index b143a33af5d..d1293462957 100644
--- a/chromium/v8/src/heap/heap-inl.h
+++ b/chromium/v8/src/heap/heap-inl.h
@@ -119,9 +119,9 @@ void Heap::SetMessageListeners(TemplateList value) {
roots_table()[RootIndex::kMessageListeners] = value->ptr();
}
-void Heap::SetPendingOptimizeForTestBytecode(Object bytecode) {
- DCHECK(bytecode->IsBytecodeArray() || bytecode->IsUndefined(isolate()));
- roots_table()[RootIndex::kPendingOptimizeForTestBytecode] = bytecode->ptr();
+void Heap::SetPendingOptimizeForTestBytecode(Object hash_table) {
+ DCHECK(hash_table->IsObjectHashTable() || hash_table->IsUndefined(isolate()));
+ roots_table()[RootIndex::kPendingOptimizeForTestBytecode] = hash_table->ptr();
}
PagedSpace* Heap::paged_space(int idx) {
@@ -300,15 +300,7 @@ void Heap::FinalizeExternalString(String string) {
ExternalBackingStoreType::kExternalString,
ext_string->ExternalPayloadSize());
- v8::String::ExternalStringResourceBase** resource_addr =
- reinterpret_cast<v8::String::ExternalStringResourceBase**>(
- string->address() + ExternalString::kResourceOffset);
-
- // Dispose of the C++ object if it has not already been disposed.
- if (*resource_addr != nullptr) {
- (*resource_addr)->Dispose();
- *resource_addr = nullptr;
- }
+ ext_string->DisposeResource();
}
Address Heap::NewSpaceTop() { return new_space_->top(); }
@@ -376,10 +368,6 @@ bool Heap::InToPage(HeapObject heap_object) {
bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
-bool Heap::InReadOnlySpace(Object object) {
- return read_only_space_->Contains(object);
-}
-
// static
Heap* Heap::FromWritableHeapObject(const HeapObject obj) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
diff --git a/chromium/v8/src/heap/heap-write-barrier-inl.h b/chromium/v8/src/heap/heap-write-barrier-inl.h
index 63d16ca82dc..b33fd5d4c11 100644
--- a/chromium/v8/src/heap/heap-write-barrier-inl.h
+++ b/chromium/v8/src/heap/heap-write-barrier-inl.h
@@ -15,6 +15,7 @@
// elsewhere.
#include "src/isolate.h"
#include "src/objects/code.h"
+#include "src/objects/compressed-slots-inl.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-object.h"
#include "src/objects/maybe-object-inl.h"
@@ -114,6 +115,23 @@ inline void GenerationalBarrierInternal(HeapObject object, Address slot,
Heap_GenerationalBarrierSlow(object, slot, value);
}
+inline void GenerationalEphemeronKeyBarrierInternal(EphemeronHashTable table,
+ Address slot,
+ HeapObject value) {
+ DCHECK(Heap::PageFlagsAreConsistent(table));
+ heap_internals::MemoryChunk* value_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(value);
+ heap_internals::MemoryChunk* table_chunk =
+ heap_internals::MemoryChunk::FromHeapObject(table);
+
+ if (!value_chunk->InYoungGeneration() || table_chunk->InYoungGeneration()) {
+ return;
+ }
+
+ Heap* heap = GetHeapFromWritableObject(table);
+ heap->RecordEphemeronKeyWrite(table, slot);
+}
+
inline void MarkingBarrierInternal(HeapObject object, Address slot,
HeapObject value) {
DCHECK(Heap_PageFlagsAreConsistent(object));
@@ -148,6 +166,15 @@ inline void GenerationalBarrier(HeapObject object, ObjectSlot slot,
HeapObject::cast(value));
}
+inline void GenerationalEphemeronKeyBarrier(EphemeronHashTable table,
+ ObjectSlot slot, Object value) {
+ DCHECK(!HasWeakHeapObjectTag(*slot));
+ DCHECK(!HasWeakHeapObjectTag(value));
+ DCHECK(value->IsHeapObject());
+ heap_internals::GenerationalEphemeronKeyBarrierInternal(
+ table, slot.address(), HeapObject::cast(value));
+}
+
inline void GenerationalBarrier(HeapObject object, MaybeObjectSlot slot,
MaybeObject value) {
HeapObject value_heap_object;
diff --git a/chromium/v8/src/heap/heap-write-barrier.h b/chromium/v8/src/heap/heap-write-barrier.h
index 9fcb64b94b3..803f022fcd3 100644
--- a/chromium/v8/src/heap/heap-write-barrier.h
+++ b/chromium/v8/src/heap/heap-write-barrier.h
@@ -18,6 +18,7 @@ class HeapObject;
class MaybeObject;
class Object;
class RelocInfo;
+class EphemeronHashTable;
// Note: In general it is preferred to use the macros defined in
// object-macros.h.
@@ -37,6 +38,8 @@ void WriteBarrierForCode(Code host);
void GenerationalBarrier(HeapObject object, ObjectSlot slot, Object value);
void GenerationalBarrier(HeapObject object, MaybeObjectSlot slot,
MaybeObject value);
+void GenerationalEphemeronKeyBarrier(EphemeronHashTable table, ObjectSlot slot,
+ Object value);
void GenerationalBarrierForElements(Heap* heap, FixedArray array, int offset,
int length);
void GenerationalBarrierForCode(Code host, RelocInfo* rinfo, HeapObject object);
diff --git a/chromium/v8/src/heap/heap.cc b/chromium/v8/src/heap/heap.cc
index e72269d40a3..a6b3f5dd1d9 100644
--- a/chromium/v8/src/heap/heap.cc
+++ b/chromium/v8/src/heap/heap.cc
@@ -428,7 +428,7 @@ void Heap::PrintShortHeapStatistics() {
", available: %6" PRIuS
" KB"
", committed: %6" PRIuS " KB\n",
- lo_space_->SizeOfObjects() / KB,
+ code_lo_space_->SizeOfObjects() / KB,
code_lo_space_->Available() / KB,
code_lo_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
@@ -652,9 +652,9 @@ size_t Heap::SizeOfObjects() {
return total;
}
-
-const char* Heap::GetSpaceName(int idx) {
- switch (idx) {
+// static
+const char* Heap::GetSpaceName(AllocationSpace space) {
+ switch (space) {
case NEW_SPACE:
return "new_space";
case OLD_SPACE:
@@ -671,10 +671,8 @@ const char* Heap::GetSpaceName(int idx) {
return "code_large_object_space";
case RO_SPACE:
return "read_only_space";
- default:
- UNREACHABLE();
}
- return nullptr;
+ UNREACHABLE();
}
void Heap::MergeAllocationSitePretenuringFeedback(
@@ -843,7 +841,7 @@ void Heap::ProcessPretenuringFeedback() {
if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
trigger_deoptimization = true;
}
- if (site->GetPretenureMode() == TENURED) {
+ if (site->GetAllocationType() == AllocationType::kOld) {
tenure_decisions++;
} else {
dont_tenure_decisions++;
@@ -1499,7 +1497,7 @@ void Heap::MoveElements(FixedArray array, int dst_index, int src_index, int len,
WriteBarrierMode mode) {
if (len == 0) return;
- DCHECK(array->map() != ReadOnlyRoots(this).fixed_cow_array_map());
+ DCHECK_NE(array->map(), ReadOnlyRoots(this).fixed_cow_array_map());
ObjectSlot dst = array->RawFieldOfElementAt(dst_index);
ObjectSlot src = array->RawFieldOfElementAt(src_index);
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
@@ -1526,6 +1524,41 @@ void Heap::MoveElements(FixedArray array, int dst_index, int src_index, int len,
FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
}
+void Heap::CopyElements(FixedArray dst_array, FixedArray src_array,
+ int dst_index, int src_index, int len,
+ WriteBarrierMode mode) {
+ DCHECK_NE(dst_array, src_array);
+ if (len == 0) return;
+
+ DCHECK_NE(dst_array->map(), ReadOnlyRoots(this).fixed_cow_array_map());
+ ObjectSlot dst = dst_array->RawFieldOfElementAt(dst_index);
+ ObjectSlot src = src_array->RawFieldOfElementAt(src_index);
+ // Ensure ranges do not overlap.
+ DCHECK(dst + len <= src || src + len <= dst);
+ if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
+ if (dst < src) {
+ for (int i = 0; i < len; i++) {
+ dst.Relaxed_Store(src.Relaxed_Load());
+ ++dst;
+ ++src;
+ }
+ } else {
+ // Copy backwards.
+ dst += len - 1;
+ src += len - 1;
+ for (int i = 0; i < len; i++) {
+ dst.Relaxed_Store(src.Relaxed_Load());
+ --dst;
+ --src;
+ }
+ }
+ } else {
+ MemCopy(dst.ToVoidPtr(), src.ToVoidPtr(), len * kTaggedSize);
+ }
+ if (mode == SKIP_WRITE_BARRIER) return;
+ FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, dst_array, dst_index, len);
+}
+
#ifdef VERIFY_HEAP
// Helper class for verifying the string table.
class StringTableVerifier : public ObjectVisitor {
@@ -2380,13 +2413,13 @@ void Heap::ForeachAllocationSite(
}
}
-void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
+void Heap::ResetAllAllocationSitesDependentCode(AllocationType allocation) {
DisallowHeapAllocation no_allocation_scope;
bool marked = false;
ForeachAllocationSite(allocation_sites_list(),
- [&marked, flag, this](AllocationSite site) {
- if (site->GetPretenureMode() == flag) {
+ [&marked, allocation, this](AllocationSite site) {
+ if (site->GetAllocationType() == allocation) {
site->ResetPretenureDecision();
site->set_deopt_dependent_code(true);
marked = true;
@@ -2397,7 +2430,6 @@ void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
}
-
void Heap::EvaluateOldSpaceLocalPretenuring(
uint64_t size_of_objects_before_gc) {
uint64_t size_of_objects_after_gc = SizeOfObjects();
@@ -2410,7 +2442,7 @@ void Heap::EvaluateOldSpaceLocalPretenuring(
// allocation sites may be the cause for that. We have to deopt all
// dependent code registered in the allocation sites to re-evaluate
// our pretenuring decisions.
- ResetAllAllocationSitesDependentCode(TENURED);
+ ResetAllAllocationSitesDependentCode(AllocationType::kOld);
if (FLAG_trace_pretenuring) {
PrintF(
"Deopt all allocation sites dependent code due to low survival "
@@ -2486,6 +2518,10 @@ int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
return 0;
}
+size_t Heap::GetCodeRangeReservedAreaSize() {
+ return kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
+}
+
HeapObject Heap::PrecedeWithFiller(HeapObject object, int filler_size) {
CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
return HeapObject::FromAddress(object->address() + filler_size);
@@ -2722,9 +2758,9 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
FixedArrayBase::cast(HeapObject::FromAddress(new_start));
// Remove recorded slots for the new map and length offset.
- ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
- ClearRecordedSlot(new_object, HeapObject::RawField(
- new_object, FixedArrayBase::kLengthOffset));
+ ClearRecordedSlot(new_object, new_object.RawField(0));
+ ClearRecordedSlot(new_object,
+ new_object.RawField(FixedArrayBase::kLengthOffset));
// Handle invalidated old-to-old slots.
if (incremental_marking()->IsCompacting() &&
@@ -2738,7 +2774,7 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
// we need pointer granularity writes to avoid race with the concurrent
// marking.
if (filler->Size() > FreeSpace::kSize) {
- MemsetTagged(HeapObject::RawField(filler, FreeSpace::kSize),
+ MemsetTagged(filler.RawField(FreeSpace::kSize),
ReadOnlyRoots(this).undefined_value(),
(filler->Size() - FreeSpace::kSize) / kTaggedSize);
}
@@ -3551,14 +3587,21 @@ const char* Heap::GarbageCollectionReasonToString(
}
bool Heap::Contains(HeapObject value) {
+ // Check RO_SPACE first because IsOutsideAllocatedSpace cannot account for a
+ // shared RO_SPACE.
+ // TODO(goszczycki): Exclude read-only space. Use ReadOnlyHeap::Contains where
+ // appropriate.
+ if (read_only_space_ != nullptr && read_only_space_->Contains(value)) {
+ return true;
+ }
if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
return false;
}
return HasBeenSetUp() &&
(new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
code_space_->Contains(value) || map_space_->Contains(value) ||
- lo_space_->Contains(value) || read_only_space_->Contains(value) ||
- code_lo_space_->Contains(value) || new_lo_space_->Contains(value));
+ lo_space_->Contains(value) || code_lo_space_->Contains(value) ||
+ new_lo_space_->Contains(value));
}
bool Heap::InSpace(HeapObject value, AllocationSpace space) {
@@ -3641,14 +3684,14 @@ class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
void VerifyPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
if (!host.is_null()) {
- CHECK(heap_->InReadOnlySpace(host->map()));
+ CHECK(ReadOnlyHeap::Contains(host->map()));
}
VerifyPointersVisitor::VerifyPointers(host, start, end);
for (MaybeObjectSlot current = start; current < end; ++current) {
HeapObject heap_object;
if ((*current)->GetHeapObject(&heap_object)) {
- CHECK(heap_->InReadOnlySpace(heap_object));
+ CHECK(ReadOnlyHeap::Contains(heap_object));
}
}
}
@@ -3735,6 +3778,11 @@ class SlotVerifyingVisitor : public ObjectVisitor {
}
}
+ protected:
+ bool InUntypedSet(ObjectSlot slot) {
+ return untyped_->count(slot.address()) > 0;
+ }
+
private:
bool InTypedSet(SlotType type, Address slot) {
return typed_->count(std::make_pair(type, slot)) > 0;
@@ -3746,8 +3794,10 @@ class SlotVerifyingVisitor : public ObjectVisitor {
class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
public:
OldToNewSlotVerifyingVisitor(std::set<Address>* untyped,
- std::set<std::pair<SlotType, Address>>* typed)
- : SlotVerifyingVisitor(untyped, typed) {}
+ std::set<std::pair<SlotType, Address>>* typed,
+ EphemeronRememberedSet* ephemeron_remembered_set)
+ : SlotVerifyingVisitor(untyped, typed),
+ ephemeron_remembered_set_(ephemeron_remembered_set) {}
bool ShouldHaveBeenRecorded(HeapObject host, MaybeObject target) override {
DCHECK_IMPLIES(target->IsStrongOrWeak() && Heap::InYoungGeneration(target),
@@ -3755,6 +3805,30 @@ class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
return target->IsStrongOrWeak() && Heap::InYoungGeneration(target) &&
!Heap::InYoungGeneration(host);
}
+
+ void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
+ ObjectSlot target) override {
+ VisitPointer(host, target);
+ if (FLAG_minor_mc) {
+ VisitPointer(host, target);
+ } else {
+ // Keys are handled separately and should never appear in this set.
+ CHECK(!InUntypedSet(key));
+ Object k = *key;
+ if (!ObjectInYoungGeneration(host) && ObjectInYoungGeneration(k)) {
+ EphemeronHashTable table = EphemeronHashTable::cast(host);
+ auto it = ephemeron_remembered_set_->find(table);
+ CHECK(it != ephemeron_remembered_set_->end());
+ int slot_index =
+ EphemeronHashTable::SlotToIndex(table.address(), key.address());
+ int entry = EphemeronHashTable::IndexToEntry(slot_index);
+ CHECK(it->second.find(entry) != it->second.end());
+ }
+ }
+ }
+
+ private:
+ EphemeronRememberedSet* ephemeron_remembered_set_;
};
template <RememberedSetType direction>
@@ -3781,7 +3855,7 @@ void CollectSlots(MemoryChunk* chunk, Address start, Address end,
void Heap::VerifyRememberedSetFor(HeapObject object) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
- DCHECK_IMPLIES(chunk->mutex() == nullptr, InReadOnlySpace(object));
+ DCHECK_IMPLIES(chunk->mutex() == nullptr, ReadOnlyHeap::Contains(object));
// In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
chunk->mutex());
@@ -3792,7 +3866,8 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
if (!InYoungGeneration(object)) {
store_buffer()->MoveAllEntriesToRememberedSet();
CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
- OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new);
+ OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new,
+ &this->ephemeron_remembered_set_);
object->IterateBody(&visitor);
}
// TODO(ulan): Add old to old slot set verification once all weak objects
@@ -4425,10 +4500,10 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
return heap_object;
}
-HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType type,
+HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
AllocationAlignment alignment) {
HeapObject result;
- AllocationResult alloc = AllocateRaw(size, type, alignment);
+ AllocationResult alloc = AllocateRaw(size, allocation, alignment);
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
@@ -4437,7 +4512,7 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType type,
for (int i = 0; i < 2; i++) {
CollectGarbage(alloc.RetrySpace(),
GarbageCollectionReason::kAllocationFailure);
- alloc = AllocateRaw(size, type, alignment);
+ alloc = AllocateRaw(size, allocation, alignment);
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
@@ -4446,17 +4521,17 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType type,
return HeapObject();
}
-HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationType type,
+HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationType allocation,
AllocationAlignment alignment) {
AllocationResult alloc;
- HeapObject result = AllocateRawWithLightRetry(size, type, alignment);
+ HeapObject result = AllocateRawWithLightRetry(size, allocation, alignment);
if (!result.is_null()) return result;
isolate()->counters()->gc_last_resort_from_handles()->Increment();
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
{
AlwaysAllocateScope scope(isolate());
- alloc = AllocateRaw(size, type, alignment);
+ alloc = AllocateRaw(size, allocation, alignment);
}
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
@@ -4501,7 +4576,7 @@ HeapObject Heap::AllocateRawCodeInLargeObjectSpace(int size) {
return HeapObject();
}
-void Heap::SetUp(ReadOnlyHeap* ro_heap) {
+void Heap::SetUp() {
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
allocation_timeout_ = NextAllocationTimeout();
#endif
@@ -4514,9 +4589,6 @@ void Heap::SetUp(ReadOnlyHeap* ro_heap) {
// and old_generation_size_ otherwise.
if (!configured_) ConfigureHeapDefault();
- DCHECK_NOT_NULL(ro_heap);
- read_only_heap_ = ro_heap;
-
mmap_region_base_ =
reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
~kMmapRegionMask;
@@ -4551,8 +4623,18 @@ void Heap::SetUp(ReadOnlyHeap* ro_heap) {
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
space_[i] = nullptr;
}
+}
+void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
+ DCHECK_NOT_NULL(ro_heap);
+ DCHECK_IMPLIES(read_only_space_ != nullptr,
+ read_only_space_ == ro_heap->read_only_space());
+ read_only_heap_ = ro_heap;
space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
+}
+
+void Heap::SetUpSpaces() {
+ // Ensure SetUpFromReadOnlySpace has been ran.
DCHECK_NOT_NULL(read_only_space_);
space_[NEW_SPACE] = new_space_ =
new NewSpace(this, memory_allocator_->data_page_allocator(),
@@ -4579,7 +4661,7 @@ void Heap::SetUp(ReadOnlyHeap* ro_heap) {
array_buffer_collector_.reset(new ArrayBufferCollector(this));
gc_idle_time_handler_.reset(new GCIdleTimeHandler());
memory_reducer_.reset(new MemoryReducer(this));
- if (V8_UNLIKELY(FLAG_gc_stats)) {
+ if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
live_object_stats_.reset(new ObjectStats(this));
dead_object_stats_.reset(new ObjectStats(this));
}
@@ -4697,7 +4779,6 @@ void Heap::NotifyDeserializationComplete() {
#endif // DEBUG
}
- read_only_space()->MarkAsReadOnly();
deserialization_complete_ = true;
}
@@ -4901,7 +4982,7 @@ void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
namespace {
Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
Handle<WeakArrayList> array,
- PretenureFlag pretenure) {
+ AllocationType allocation) {
if (array->length() == 0) {
return array;
}
@@ -4913,7 +4994,7 @@ Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
heap->isolate(),
handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
- new_length, pretenure);
+ new_length, allocation);
// Allocation might have caused GC and turned some of the elements into
// cleared weak heap objects. Count the number of live references again and
// fill in the new array.
@@ -4929,7 +5010,7 @@ Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
} // anonymous namespace
-void Heap::CompactWeakArrayLists(PretenureFlag pretenure) {
+void Heap::CompactWeakArrayLists(AllocationType allocation) {
// Find known PrototypeUsers and compact them.
std::vector<Handle<PrototypeInfo>> prototype_infos;
{
@@ -4946,24 +5027,25 @@ void Heap::CompactWeakArrayLists(PretenureFlag pretenure) {
for (auto& prototype_info : prototype_infos) {
Handle<WeakArrayList> array(
WeakArrayList::cast(prototype_info->prototype_users()), isolate());
- DCHECK_IMPLIES(pretenure == TENURED,
+ DCHECK_IMPLIES(allocation == AllocationType::kOld,
InOldSpace(*array) ||
*array == ReadOnlyRoots(this).empty_weak_array_list());
WeakArrayList new_array = PrototypeUsers::Compact(
- array, this, JSObject::PrototypeRegistryCompactionCallback, pretenure);
+ array, this, JSObject::PrototypeRegistryCompactionCallback, allocation);
prototype_info->set_prototype_users(new_array);
}
// Find known WeakArrayLists and compact them.
Handle<WeakArrayList> scripts(script_list(), isolate());
- DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*scripts));
- scripts = CompactWeakArrayList(this, scripts, pretenure);
+ DCHECK_IMPLIES(allocation == AllocationType::kOld, InOldSpace(*scripts));
+ scripts = CompactWeakArrayList(this, scripts, allocation);
set_script_list(*scripts);
Handle<WeakArrayList> no_script_list(noscript_shared_function_infos(),
isolate());
- DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*no_script_list));
- no_script_list = CompactWeakArrayList(this, no_script_list, pretenure);
+ DCHECK_IMPLIES(allocation == AllocationType::kOld,
+ InOldSpace(*no_script_list));
+ no_script_list = CompactWeakArrayList(this, no_script_list, allocation);
set_noscript_shared_function_infos(*no_script_list);
}
@@ -5268,7 +5350,6 @@ HeapIterator::HeapIterator(Heap* heap,
space_iterator_(nullptr),
object_iterator_(nullptr) {
heap_->MakeHeapIterable();
- heap_->heap_iterator_start();
// Start the iteration.
space_iterator_ = new SpaceIterator(heap_);
switch (filtering_) {
@@ -5283,7 +5364,6 @@ HeapIterator::HeapIterator(Heap* heap,
HeapIterator::~HeapIterator() {
- heap_->heap_iterator_end();
#ifdef DEBUG
// Assert that in filtering mode we have iterated through all
// objects. Otherwise, heap will be left in an inconsistent state.
@@ -5540,28 +5620,6 @@ size_t Heap::NumberOfDetachedContexts() {
return detached_contexts()->length() / 2;
}
-const char* AllocationSpaceName(AllocationSpace space) {
- switch (space) {
- case NEW_SPACE:
- return "NEW_SPACE";
- case OLD_SPACE:
- return "OLD_SPACE";
- case CODE_SPACE:
- return "CODE_SPACE";
- case MAP_SPACE:
- return "MAP_SPACE";
- case LO_SPACE:
- return "LO_SPACE";
- case NEW_LO_SPACE:
- return "NEW_LO_SPACE";
- case RO_SPACE:
- return "RO_SPACE";
- default:
- UNREACHABLE();
- }
- return nullptr;
-}
-
void VerifyPointersVisitor::VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) {
VerifyPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
@@ -5661,7 +5719,7 @@ bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) {
}
void Heap::CreateObjectStats() {
- if (V8_LIKELY(FLAG_gc_stats == 0)) return;
+ if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
if (!live_object_stats_) {
live_object_stats_.reset(new ObjectStats(this));
}
@@ -5764,6 +5822,30 @@ void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
heap->store_buffer()->InsertEntry(slot);
}
+void Heap::RecordEphemeronKeyWrite(EphemeronHashTable table, Address slot) {
+ DCHECK(ObjectInYoungGeneration(HeapObjectSlot(slot).ToHeapObject()));
+ int slot_index = EphemeronHashTable::SlotToIndex(table.address(), slot);
+ int entry = EphemeronHashTable::IndexToEntry(slot_index);
+ auto it =
+ ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
+ it.first->second.insert(entry);
+}
+
+void Heap::EphemeronKeyWriteBarrierFromCode(Address raw_object,
+ Address key_slot_address,
+ Isolate* isolate) {
+ EphemeronHashTable table = EphemeronHashTable::cast(Object(raw_object));
+ MaybeObjectSlot key_slot(key_slot_address);
+ MaybeObject maybe_key = *key_slot;
+ HeapObject key;
+ if (!maybe_key.GetHeapObject(&key)) return;
+ if (!ObjectInYoungGeneration(table) && ObjectInYoungGeneration(key)) {
+ isolate->heap()->RecordEphemeronKeyWrite(table, key_slot_address);
+ }
+ isolate->heap()->incremental_marking()->RecordMaybeWeakWrite(table, key_slot,
+ maybe_key);
+}
+
void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
int offset, int length) {
for (int i = 0; i < length; i++) {
diff --git a/chromium/v8/src/heap/heap.h b/chromium/v8/src/heap/heap.h
index 7f687e8fdf3..1725a9ad870 100644
--- a/chromium/v8/src/heap/heap.h
+++ b/chromium/v8/src/heap/heap.h
@@ -29,11 +29,12 @@
#include "src/objects/string-table.h"
#include "src/roots.h"
#include "src/visitors.h"
+#include "testing/gtest/include/gtest/gtest_prod.h"
namespace v8 {
namespace debug {
-typedef void (*OutOfMemoryCallback)(void* data);
+using OutOfMemoryCallback = void (*)(void* data);
} // namespace debug
namespace internal {
@@ -198,8 +199,16 @@ struct CommentStatistic {
};
#endif
+using EphemeronRememberedSet =
+ std::unordered_map<EphemeronHashTable, std::unordered_set<int>,
+ Object::Hasher>;
+
class Heap {
public:
+ // Stores ephemeron entries where the EphemeronHashTable is in old-space,
+ // and the key of the entry is in new-space. Such keys do not appear in the
+ // usual OLD_TO_NEW remembered set.
+ EphemeronRememberedSet ephemeron_remembered_set_;
enum FindMementoMode { kForRuntime, kForGC };
enum HeapState {
@@ -224,7 +233,7 @@ class Heap {
Address start;
Address end;
};
- typedef std::vector<Chunk> Reservation;
+ using Reservation = std::vector<Chunk>;
static const int kInitalOldGenerationLimitFactor = 2;
@@ -270,10 +279,16 @@ class Heap {
// Calculates the maximum amount of filler that could be required by the
// given alignment.
- static int GetMaximumFillToAlign(AllocationAlignment alignment);
+ V8_EXPORT_PRIVATE static int GetMaximumFillToAlign(
+ AllocationAlignment alignment);
// Calculates the actual amount of filler required for a given address at the
// given alignment.
- static int GetFillToAlign(Address address, AllocationAlignment alignment);
+ V8_EXPORT_PRIVATE static int GetFillToAlign(Address address,
+ AllocationAlignment alignment);
+
+ // Returns the size of the initial area of a code-range, which is marked
+ // writable and reserved to contain unwind information.
+ static size_t GetCodeRangeReservedAreaSize();
void FatalProcessOutOfMemory(const char* location);
@@ -293,6 +308,17 @@ class Heap {
#endif
}
+ // Helper function to get the bytecode flushing mode based on the flags. This
+ // is required because it is not safe to acess flags in concurrent marker.
+ static inline BytecodeFlushMode GetBytecodeFlushMode() {
+ if (FLAG_stress_flush_bytecode) {
+ return BytecodeFlushMode::kStressFlushBytecode;
+ } else if (FLAG_flush_bytecode) {
+ return BytecodeFlushMode::kFlushBytecode;
+ }
+ return BytecodeFlushMode::kDoNotFlushBytecode;
+ }
+
static uintptr_t ZapValue() {
return FLAG_clear_free_memory ? kClearedFreeMemoryValue : kZapValue;
}
@@ -329,6 +355,10 @@ class Heap {
V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object,
Address slot,
HeapObject value);
+ V8_EXPORT_PRIVATE void RecordEphemeronKeyWrite(EphemeronHashTable table,
+ Address key_slot);
+ V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode(
+ Address raw_object, Address address, Isolate* isolate);
V8_EXPORT_PRIVATE static void GenerationalBarrierForElementsSlow(
Heap* heap, FixedArray array, int offset, int length);
V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
@@ -364,6 +394,10 @@ class Heap {
void MoveElements(FixedArray array, int dst_index, int src_index, int len,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ // Copy len elements from src_index of src array to dst_index of dst array.
+ void CopyElements(FixedArray dst, FixedArray src, int dst_index,
+ int src_index, int len, WriteBarrierMode mode);
+
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If slots could have been recorded in
// the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
@@ -386,17 +420,19 @@ class Heap {
// Trim the given array from the left. Note that this relocates the object
// start and hence is only valid if there is only a single reference to it.
- FixedArrayBase LeftTrimFixedArray(FixedArrayBase obj, int elements_to_trim);
+ V8_EXPORT_PRIVATE FixedArrayBase LeftTrimFixedArray(FixedArrayBase obj,
+ int elements_to_trim);
// Trim the given array from the right.
- void RightTrimFixedArray(FixedArrayBase obj, int elements_to_trim);
+ V8_EXPORT_PRIVATE void RightTrimFixedArray(FixedArrayBase obj,
+ int elements_to_trim);
void RightTrimWeakFixedArray(WeakFixedArray obj, int elements_to_trim);
// Converts the given boolean condition to JavaScript boolean value.
inline Oddball ToBoolean(bool condition);
// Notify the heap that a context has been disposed.
- int NotifyContextDisposed(bool dependant_context);
+ V8_EXPORT_PRIVATE int NotifyContextDisposed(bool dependant_context);
void set_native_contexts_list(Object object) {
native_contexts_list_ = object;
@@ -448,7 +484,7 @@ class Heap {
}
void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk);
- void UnprotectAndRegisterMemoryChunk(HeapObject object);
+ V8_EXPORT_PRIVATE void UnprotectAndRegisterMemoryChunk(HeapObject object);
void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
@@ -488,16 +524,18 @@ class Heap {
bool IdleNotification(double deadline_in_seconds);
bool IdleNotification(int idle_time_in_ms);
- void MemoryPressureNotification(MemoryPressureLevel level,
- bool is_isolate_locked);
+ V8_EXPORT_PRIVATE void MemoryPressureNotification(MemoryPressureLevel level,
+ bool is_isolate_locked);
void CheckMemoryPressure();
- void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
- void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
- size_t heap_limit);
- void AutomaticallyRestoreInitialHeapLimit(double threshold_percent);
+ V8_EXPORT_PRIVATE void AddNearHeapLimitCallback(v8::NearHeapLimitCallback,
+ void* data);
+ V8_EXPORT_PRIVATE void RemoveNearHeapLimitCallback(
+ v8::NearHeapLimitCallback callback, size_t heap_limit);
+ V8_EXPORT_PRIVATE void AutomaticallyRestoreInitialHeapLimit(
+ double threshold_percent);
- double MonotonicallyIncreasingTimeInMs();
+ V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs();
void RecordStats(HeapStats* stats, bool take_snapshot = false);
@@ -531,9 +569,9 @@ class Heap {
size_t backing_store_bytes() const { return backing_store_bytes_; }
- void CompactWeakArrayLists(PretenureFlag pretenure);
+ void CompactWeakArrayLists(AllocationType allocation);
- void AddRetainedMap(Handle<Map> map);
+ V8_EXPORT_PRIVATE void AddRetainedMap(Handle<Map> map);
// This event is triggered after successful allocation of a new object made
// by runtime. Allocations of target space for object evacuation do not
@@ -553,7 +591,7 @@ class Heap {
void ActivateMemoryReducerIfNeeded();
- bool ShouldOptimizeForMemoryUsage();
+ V8_EXPORT_PRIVATE bool ShouldOptimizeForMemoryUsage();
bool HighMemoryPressure() {
return memory_pressure_level_ != MemoryPressureLevel::kNone;
@@ -579,9 +617,14 @@ class Heap {
size_t code_range_size_in_mb);
void ConfigureHeapDefault();
- // Prepares the heap, setting up memory areas that are needed in the isolate
- // without actually creating any objects.
- void SetUp(ReadOnlyHeap* ro_heap);
+ // Prepares the heap, setting up for deserialization.
+ void SetUp();
+
+ // Sets read-only heap and space.
+ void SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap);
+
+ // Sets up the heap memory without creating any objects.
+ void SetUpSpaces();
// (Re-)Initialize hash seed from flag or RNG.
void InitializeHashSeed();
@@ -621,7 +664,7 @@ class Heap {
inline Space* space(int idx);
// Returns name of the space.
- const char* GetSpaceName(int idx);
+ V8_EXPORT_PRIVATE static const char* GetSpaceName(AllocationSpace space);
// ===========================================================================
// Getters to other components. ==============================================
@@ -694,7 +737,7 @@ class Heap {
std::function<void(HeapObject object, ObjectSlot slot, Object target)>
gc_notify_updated_slot);
- void AddKeepDuringJobTarget(Handle<JSReceiver> target);
+ V8_EXPORT_PRIVATE void AddKeepDuringJobTarget(Handle<JSReceiver> target);
void ClearKeepDuringJobSet();
// ===========================================================================
@@ -705,8 +748,8 @@ class Heap {
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
// Switch whether inline bump-pointer allocation should be used.
- void EnableInlineAllocation();
- void DisableInlineAllocation();
+ V8_EXPORT_PRIVATE void EnableInlineAllocation();
+ V8_EXPORT_PRIVATE void DisableInlineAllocation();
// ===========================================================================
// Methods triggering GCs. ===================================================
@@ -725,12 +768,13 @@ class Heap {
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
// Last hope GC, should try to squeeze as much as possible.
- void CollectAllAvailableGarbage(GarbageCollectionReason gc_reason);
+ V8_EXPORT_PRIVATE void CollectAllAvailableGarbage(
+ GarbageCollectionReason gc_reason);
// Precise garbage collection that potentially finalizes already running
// incremental marking before performing an atomic garbage collection.
// Only use if absolutely necessary or in tests to avoid floating garbage!
- void PreciseCollectAllGarbage(
+ V8_EXPORT_PRIVATE void PreciseCollectAllGarbage(
int flags, GarbageCollectionReason gc_reason,
const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
@@ -738,8 +782,8 @@ class Heap {
// completes incremental marking in order to free external resources.
void ReportExternalMemoryPressure();
- typedef v8::Isolate::GetExternallyAllocatedMemoryInBytesCallback
- GetExternallyAllocatedMemoryInBytesCallback;
+ using GetExternallyAllocatedMemoryInBytesCallback =
+ v8::Isolate::GetExternallyAllocatedMemoryInBytesCallback;
void SetGetExternallyAllocatedMemoryInBytesCallback(
GetExternallyAllocatedMemoryInBytesCallback callback) {
@@ -753,7 +797,7 @@ class Heap {
// Builtins. =================================================================
// ===========================================================================
- Code builtin(int index);
+ V8_EXPORT_PRIVATE Code builtin(int index);
Address builtin_address(int index);
void set_builtin(int index, Code builtin);
@@ -813,13 +857,13 @@ class Heap {
// Start incremental marking and ensure that idle time handler can perform
// incremental steps.
- void StartIdleIncrementalMarking(
+ V8_EXPORT_PRIVATE void StartIdleIncrementalMarking(
GarbageCollectionReason gc_reason,
GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
// Starts incremental marking assuming incremental marking is currently
// stopped.
- void StartIncrementalMarking(
+ V8_EXPORT_PRIVATE void StartIncrementalMarking(
int gc_flags, GarbageCollectionReason gc_reason,
GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
@@ -855,7 +899,8 @@ class Heap {
// This function checks that either
// - the map transition is safe,
// - or it was communicated to GC using NotifyObjectLayoutChange.
- void VerifyObjectLayoutChange(HeapObject object, Map new_map);
+ V8_EXPORT_PRIVATE void VerifyObjectLayoutChange(HeapObject object,
+ Map new_map);
#endif
// ===========================================================================
@@ -900,8 +945,8 @@ class Heap {
// Called when a string's resource is changed. The size of the payload is sent
// as argument of the method.
- void UpdateExternalString(String string, size_t old_payload,
- size_t new_payload);
+ V8_EXPORT_PRIVATE void UpdateExternalString(String string, size_t old_payload,
+ size_t new_payload);
// Finalizes an external string by deleting the associated external
// data and clearing the resource pointer.
@@ -928,16 +973,13 @@ class Heap {
// Returns whether the object resides in old space.
inline bool InOldSpace(Object object);
- // Returns whether the object resides in read-only space.
- inline bool InReadOnlySpace(Object object);
-
// Checks whether an address/object in the heap (including auxiliary
// area and unused area).
- bool Contains(HeapObject value);
+ V8_EXPORT_PRIVATE bool Contains(HeapObject value);
// Checks whether an address/object in a space.
// Currently used by tests, serialization and heap verification only.
- bool InSpace(HeapObject value, AllocationSpace space);
+ V8_EXPORT_PRIVATE bool InSpace(HeapObject value, AllocationSpace space);
// Slow methods that can be used for verification as they can also be used
// with off-heap Addresses.
@@ -982,7 +1024,7 @@ class Heap {
// ===========================================================================
// Returns the maximum amount of memory reserved for the heap.
- size_t MaxReserved();
+ V8_EXPORT_PRIVATE size_t MaxReserved();
size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
size_t MaxOldGenerationSize() { return max_old_generation_size_; }
@@ -1010,7 +1052,7 @@ class Heap {
size_t Capacity();
// Returns the capacity of the old generation.
- size_t OldGenerationCapacity();
+ V8_EXPORT_PRIVATE size_t OldGenerationCapacity();
// Returns the amount of memory currently held alive by the unmapper.
size_t CommittedMemoryOfUnmapper();
@@ -1040,7 +1082,7 @@ class Heap {
size_t Available();
// Returns of size of all objects residing in the heap.
- size_t SizeOfObjects();
+ V8_EXPORT_PRIVATE size_t SizeOfObjects();
void UpdateSurvivalStatistics(int start_new_space_size);
@@ -1124,7 +1166,7 @@ class Heap {
// Returns the size of objects residing in non-new spaces.
// Excludes external memory held by those objects.
- size_t OldGenerationSizeOfObjects();
+ V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects();
// ===========================================================================
// Prologue/epilogue callback methods.========================================
@@ -1148,8 +1190,8 @@ class Heap {
// ===========================================================================
// Creates a filler object and returns a heap object immediately after it.
- V8_WARN_UNUSED_RESULT HeapObject PrecedeWithFiller(HeapObject object,
- int filler_size);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT HeapObject
+ PrecedeWithFiller(HeapObject object, int filler_size);
// Creates a filler object if needed for alignment and returns a heap object
// immediately after it. If any space is left after the returned object,
@@ -1236,7 +1278,7 @@ class Heap {
// =============================================================================
#ifdef VERIFY_HEAP
// Verify the heap is in its normal state before or after a GC.
- void Verify();
+ V8_EXPORT_PRIVATE void Verify();
void VerifyRememberedSetFor(HeapObject object);
#endif
@@ -1283,8 +1325,8 @@ class Heap {
private:
class SkipStoreBufferScope;
- typedef String (*ExternalStringTableUpdaterCallback)(Heap* heap,
- FullObjectSlot pointer);
+ using ExternalStringTableUpdaterCallback = String (*)(Heap* heap,
+ FullObjectSlot pointer);
// External strings table is a place where all external strings are
// registered. We need to keep track of such strings to properly
@@ -1382,48 +1424,9 @@ class Heap {
Heap();
~Heap();
- // Selects the proper allocation space based on the pretenuring decision.
- static AllocationSpace SelectSpace(PretenureFlag pretenure) {
- switch (pretenure) {
- case TENURED_READ_ONLY:
- return RO_SPACE;
- case TENURED:
- return OLD_SPACE;
- case NOT_TENURED:
- return NEW_SPACE;
- default:
- UNREACHABLE();
- }
- }
-
- // TODO(hpayer): Remove this translation function as soon as all code is
- // converted to use AllocationType. Also remove PretenureFlag and use
- // Allocation Type instead.
- static AllocationType SelectType(AllocationSpace space) {
- switch (space) {
- case NEW_SPACE:
- return AllocationType::kYoung;
- case NEW_LO_SPACE:
- return AllocationType::kYoung;
- case OLD_SPACE:
- return AllocationType::kOld;
- case LO_SPACE:
- return AllocationType::kOld;
- case CODE_SPACE:
- return AllocationType::kCode;
- case CODE_LO_SPACE:
- return AllocationType::kCode;
- case MAP_SPACE:
- return AllocationType::kMap;
- case RO_SPACE:
- return AllocationType::kReadOnly;
- default:
- UNREACHABLE();
- }
- }
-
- static bool IsRegularObjectAllocation(AllocationType type) {
- return AllocationType::kYoung == type || AllocationType::kOld == type;
+ static bool IsRegularObjectAllocation(AllocationType allocation) {
+ return AllocationType::kYoung == allocation ||
+ AllocationType::kOld == allocation;
}
static size_t DefaultGetExternallyAllocatedMemoryInBytesCallback() {
@@ -1476,17 +1479,18 @@ class Heap {
void EnsureFromSpaceIsCommitted();
// Uncommit unused semi space.
- bool UncommitFromSpace();
+ V8_EXPORT_PRIVATE bool UncommitFromSpace();
// Fill in bogus values in from space
void ZapFromSpace();
// Zaps the memory of a code object.
- void ZapCodeObject(Address start_address, int size_in_bytes);
+ V8_EXPORT_PRIVATE void ZapCodeObject(Address start_address,
+ int size_in_bytes);
// Deopts all code that contains allocation instruction which are tenured or
// not tenured. Moreover it clears the pretenuring allocation site statistics.
- void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
+ void ResetAllAllocationSitesDependentCode(AllocationType allocation);
// Evaluates local pretenuring for the old space and calls
// ResetAllTenuredAllocationSitesDependentCode if too many objects died in
@@ -1672,7 +1676,7 @@ class Heap {
bool always_allocate() { return always_allocate_scope_count_ != 0; }
- bool CanExpandOldGeneration(size_t size);
+ V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
bool ShouldExpandOldGenerationOnSlowAllocation();
@@ -1691,16 +1695,6 @@ class Heap {
void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
// ===========================================================================
- // HeapIterator helpers. =====================================================
- // ===========================================================================
-
- void heap_iterator_start() { heap_iterator_depth_++; }
-
- void heap_iterator_end() { heap_iterator_depth_--; }
-
- bool in_heap_iterator() { return heap_iterator_depth_ > 0; }
-
- // ===========================================================================
// Allocation methods. =======================================================
// ===========================================================================
@@ -1715,7 +1709,7 @@ class Heap {
// performed by the runtime and should not be bypassed (to extend this to
// inlined allocations, use the Heap::DisableInlineAllocation() support).
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
- int size_in_bytes, AllocationType type,
+ int size_in_bytes, AllocationType allocation,
AllocationAlignment aligment = kWordAligned);
// This method will try to perform an allocation of a given size of a given
@@ -1724,7 +1718,7 @@ class Heap {
// times. If after that retry procedure the allocation still fails nullptr is
// returned.
HeapObject AllocateRawWithLightRetry(
- int size, AllocationType type,
+ int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned);
// This method will try to perform an allocation of a given size of a given
@@ -1734,13 +1728,13 @@ class Heap {
// garbage collection is triggered which tries to significantly reduce memory.
// If the allocation still fails after that a fatal error is thrown.
HeapObject AllocateRawWithRetryOrFail(
- int size, AllocationType type,
+ int size, AllocationType allocation,
AllocationAlignment alignment = kWordAligned);
HeapObject AllocateRawCodeInLargeObjectSpace(int size);
// Allocates a heap object based on the map.
V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
- AllocationSpace space);
+ AllocationType allocation);
// Takes a code object and checks if it is on memory which is not subject to
// compaction. This method will return a new code object on an immovable
@@ -1772,7 +1766,7 @@ class Heap {
void PrintRetainingPath(HeapObject object, RetainingPathOption option);
#ifdef DEBUG
- void IncrementObjectCounters();
+ V8_EXPORT_PRIVATE void IncrementObjectCounters();
#endif // DEBUG
// The amount of memory that has been freed concurrently.
@@ -1847,7 +1841,7 @@ class Heap {
int gc_post_processing_depth_ = 0;
// Returns the amount of external memory registered since last global gc.
- uint64_t PromotedExternalMemorySize();
+ V8_EXPORT_PRIVATE uint64_t PromotedExternalMemorySize();
// How many "runtime allocations" happened.
uint32_t allocations_count_ = 0;
@@ -2007,9 +2001,6 @@ class Heap {
bool deserialization_complete_ = false;
- // The depth of HeapIterator nestings.
- int heap_iterator_depth_ = 0;
-
bool fast_promotion_mode_ = false;
// Used for testing purposes.
@@ -2086,7 +2077,6 @@ class Heap {
DISALLOW_COPY_AND_ASSIGN(Heap);
};
-
class HeapStats {
public:
static const int kStartMarker = 0xDECADE00;
@@ -2256,7 +2246,7 @@ class SpaceIterator : public Malloced {
// nodes filtering uses GC marks, it can't be used during MS/MC GC
// phases. Also, it is forbidden to interrupt iteration in this mode,
// as this will leave heap objects marked (and thus, unusable).
-class HeapIterator {
+class V8_EXPORT_PRIVATE HeapIterator {
public:
enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
@@ -2335,8 +2325,6 @@ class AllocationObserver {
DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
};
-V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
-
// -----------------------------------------------------------------------------
// Allows observation of heap object allocations.
class HeapObjectAllocationTracker {
@@ -2347,6 +2335,20 @@ class HeapObjectAllocationTracker {
virtual ~HeapObjectAllocationTracker() = default;
};
+template <typename T>
+T ForwardingAddress(T heap_obj) {
+ MapWord map_word = heap_obj->map_word();
+
+ if (map_word.IsForwardingAddress()) {
+ return T::cast(map_word.ToForwardingAddress());
+ } else if (Heap::InFromPage(heap_obj)) {
+ return T();
+ } else {
+ // TODO(ulan): Support minor mark-compactor here.
+ return heap_obj;
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/incremental-marking.cc b/chromium/v8/src/heap/incremental-marking.cc
index 2a665394d34..9005fc3e575 100644
--- a/chromium/v8/src/heap/incremental-marking.cc
+++ b/chromium/v8/src/heap/incremental-marking.cc
@@ -609,22 +609,6 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
UpdateWeakReferencesAfterScavenge();
}
-namespace {
-template <typename T>
-T ForwardingAddress(T heap_obj) {
- MapWord map_word = heap_obj->map_word();
-
- if (map_word.IsForwardingAddress()) {
- return T::cast(map_word.ToForwardingAddress());
- } else if (Heap::InFromPage(heap_obj)) {
- return T();
- } else {
- // TODO(ulan): Support minor mark-compactor here.
- return heap_obj;
- }
-}
-} // namespace
-
void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
weak_objects_->weak_references.Update(
[](std::pair<HeapObject, HeapObjectSlot> slot_in,
@@ -780,10 +764,19 @@ intptr_t IncrementalMarking::ProcessMarkingWorklist(
while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
HeapObject obj = marking_worklist()->Pop();
if (obj.is_null()) break;
- // Left trimming may result in white, grey, or black filler objects on the
- // marking deque. Ignore these objects.
+ // Left trimming may result in grey or black filler objects on the marking
+ // worklist. Ignore these objects.
if (obj->IsFiller()) {
- DCHECK(!marking_state()->IsImpossible(obj));
+ // Due to copying mark bits and the fact that grey and black have their
+ // first bit set, one word fillers are always black.
+ DCHECK_IMPLIES(
+ obj->map() == ReadOnlyRoots(heap()).one_pointer_filler_map(),
+ marking_state()->IsBlack(obj));
+ // Other fillers may be black or grey depending on the color of the object
+ // that was trimmed.
+ DCHECK_IMPLIES(
+ obj->map() != ReadOnlyRoots(heap()).one_pointer_filler_map(),
+ marking_state()->IsBlackOrGrey(obj));
continue;
}
bytes_processed += VisitObject(obj->map(), obj);
diff --git a/chromium/v8/src/heap/invalidated-slots.h b/chromium/v8/src/heap/invalidated-slots.h
index 364bb227815..0480086a3a2 100644
--- a/chromium/v8/src/heap/invalidated-slots.h
+++ b/chromium/v8/src/heap/invalidated-slots.h
@@ -28,7 +28,7 @@ using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
// implementation with complexity O(m*log(m) + n), where
// m is the number of invalidated objects in the memory chunk.
// n is the number of IsValid queries.
-class InvalidatedSlotsFilter {
+class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
public:
explicit InvalidatedSlotsFilter(MemoryChunk* chunk);
inline bool IsValid(Address slot);
diff --git a/chromium/v8/src/heap/mark-compact-inl.h b/chromium/v8/src/heap/mark-compact-inl.h
index 925e98c1708..0b12330b5bc 100644
--- a/chromium/v8/src/heap/mark-compact-inl.h
+++ b/chromium/v8/src/heap/mark-compact-inl.h
@@ -86,12 +86,11 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
// If the SharedFunctionInfo has old bytecode, mark it as flushable,
// otherwise visit the function data field strongly.
- if (shared_info->ShouldFlushBytecode()) {
+ if (shared_info->ShouldFlushBytecode(Heap::GetBytecodeFlushMode())) {
collector_->AddBytecodeFlushingCandidate(shared_info);
} else {
VisitPointer(shared_info,
- HeapObject::RawField(shared_info,
- SharedFunctionInfo::kFunctionDataOffset));
+ shared_info.RawField(SharedFunctionInfo::kFunctionDataOffset));
}
return size;
}
@@ -249,8 +248,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
if (marking_state()->IsBlackOrGrey(target)) {
// Record the slot inside the JSWeakRef, since the IterateBody below
// won't visit it.
- ObjectSlot slot =
- HeapObject::RawField(weak_ref, JSWeakRef::kTargetOffset);
+ ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
collector_->RecordSlot(weak_ref, slot, target);
} else {
// JSWeakRef points to a potentially dead object. We have to process
@@ -272,8 +270,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
if (marking_state()->IsBlackOrGrey(target)) {
// Record the slot inside the WeakCell, since the IterateBody below
// won't visit it.
- ObjectSlot slot =
- HeapObject::RawField(weak_cell, WeakCell::kTargetOffset);
+ ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
collector_->RecordSlot(weak_cell, slot, target);
} else {
// WeakCell points to a potentially dead object. We have to process
@@ -413,8 +410,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
int start = static_cast<int>(current_progress_bar);
int end = Min(size, start + kProgressBarScanningChunk);
if (start < end) {
- VisitPointers(object, HeapObject::RawField(object, start),
- HeapObject::RawField(object, end));
+ VisitPointers(object, object.RawField(start), object.RawField(end));
// Setting the progress bar can fail if the object that is currently
// scanned is also revisited. In this case, there may be two tasks racing
// on the progress counter. The looser can bail out because the progress
diff --git a/chromium/v8/src/heap/mark-compact.cc b/chromium/v8/src/heap/mark-compact.cc
index 30bbd353b82..2c119d95932 100644
--- a/chromium/v8/src/heap/mark-compact.cc
+++ b/chromium/v8/src/heap/mark-compact.cc
@@ -506,9 +506,7 @@ void MarkCompactCollector::CollectGarbage() {
RecordObjectStats();
StartSweepSpaces();
-
Evacuate();
-
Finish();
}
@@ -631,7 +629,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
size_t area_size = space->AreaSize();
// Pairs of (live_bytes_in_page, page).
- typedef std::pair<size_t, Page*> LiveBytesPagePair;
+ using LiveBytesPagePair = std::pair<size_t, Page*>;
std::vector<LiveBytesPagePair> pages;
pages.reserve(number_of_pages);
@@ -1086,8 +1084,11 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
class RecordMigratedSlotVisitor : public ObjectVisitor {
public:
- explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
- : collector_(collector) {}
+ explicit RecordMigratedSlotVisitor(
+ MarkCompactCollector* collector,
+ EphemeronRememberedSet* ephemeron_remembered_set)
+ : collector_(collector),
+ ephemeron_remembered_set_(ephemeron_remembered_set) {}
inline void VisitPointer(HeapObject host, ObjectSlot p) final {
DCHECK(!HasWeakHeapObjectTag(*p));
@@ -1114,6 +1115,23 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
}
}
+ inline void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
+ ObjectSlot value) override {
+ DCHECK(host->IsEphemeronHashTable());
+ DCHECK(!Heap::InYoungGeneration(host));
+
+ VisitPointer(host, value);
+
+ if (ephemeron_remembered_set_ && Heap::InYoungGeneration(*key)) {
+ auto table = EphemeronHashTable::unchecked_cast(host);
+ auto insert_result =
+ ephemeron_remembered_set_->insert({table, std::unordered_set<int>()});
+ insert_result.first->second.insert(index);
+ } else {
+ VisitPointer(host, key);
+ }
+ }
+
inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
@@ -1157,6 +1175,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
}
MarkCompactCollector* collector_;
+ EphemeronRememberedSet* ephemeron_remembered_set_;
};
class MigrationObserver {
@@ -1201,9 +1220,9 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
protected:
enum MigrationMode { kFast, kObserved };
- typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject dst,
- HeapObject src, int size,
- AllocationSpace dest);
+ using MigrateFunction = void (*)(EvacuateVisitorBase* base, HeapObject dst,
+ HeapObject src, int size,
+ AllocationSpace dest);
template <MigrationMode mode>
static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject dst,
@@ -1455,7 +1474,8 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
inline bool Visit(HeapObject object, int size) override {
- RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
+ RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
+ &heap_->ephemeron_remembered_set_);
object->IterateBodyFast(&visitor);
return true;
}
@@ -1547,7 +1567,7 @@ bool MarkCompactCollector::ProcessEphemerons() {
// Drain current_ephemerons and push ephemerons where key and value are still
// unreachable into next_ephemerons.
while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
- if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
+ if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
}
@@ -1560,7 +1580,7 @@ bool MarkCompactCollector::ProcessEphemerons() {
// before) and push ephemerons where key and value are still unreachable into
// next_ephemerons.
while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
- if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
+ if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
ephemeron_marked = true;
}
}
@@ -1583,7 +1603,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
- VisitEphemeron(ephemeron.key, ephemeron.value);
+ ProcessEphemeron(ephemeron.key, ephemeron.value);
if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
@@ -1610,7 +1630,7 @@ void MarkCompactCollector::ProcessEphemeronsLinear() {
}
while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
- VisitEphemeron(ephemeron.key, ephemeron.value);
+ ProcessEphemeron(ephemeron.key, ephemeron.value);
if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
@@ -1682,7 +1702,21 @@ void MarkCompactCollector::ProcessMarkingWorklistInternal() {
HeapObject object;
MarkCompactMarkingVisitor visitor(this, marking_state());
while (!(object = marking_worklist()->Pop()).is_null()) {
- DCHECK(!object->IsFiller());
+ // Left trimming may result in grey or black filler objects on the marking
+ // worklist. Ignore these objects.
+ if (object->IsFiller()) {
+ // Due to copying mark bits and the fact that grey and black have their
+ // first bit set, one word fillers are always black.
+ DCHECK_IMPLIES(
+ object->map() == ReadOnlyRoots(heap()).one_pointer_filler_map(),
+ marking_state()->IsBlack(object));
+ // Other fillers may be black or grey depending on the color of the object
+ // that was trimmed.
+ DCHECK_IMPLIES(
+ object->map() != ReadOnlyRoots(heap()).one_pointer_filler_map(),
+ marking_state()->IsBlackOrGrey(object));
+ continue;
+ }
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
DCHECK(!(marking_state()->IsWhite(object)));
@@ -1697,7 +1731,7 @@ void MarkCompactCollector::ProcessMarkingWorklistInternal() {
}
}
-bool MarkCompactCollector::VisitEphemeron(HeapObject key, HeapObject value) {
+bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
if (marking_state()->IsBlackOrGrey(key)) {
if (marking_state()->WhiteToGrey(value)) {
marking_worklist()->Push(value);
@@ -1741,12 +1775,12 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
}
void MarkCompactCollector::RecordObjectStats() {
- if (V8_UNLIKELY(FLAG_gc_stats)) {
+ if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
heap()->CreateObjectStats();
ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
heap()->dead_object_stats_.get());
collector.Collect();
- if (V8_UNLIKELY(FLAG_gc_stats &
+ if (V8_UNLIKELY(TracingFlags::gc_stats.load(std::memory_order_relaxed) &
v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
std::stringstream live, dead;
heap()->live_object_stats_->Dump(live);
@@ -2076,8 +2110,8 @@ void MarkCompactCollector::ClearOldBytecodeCandidates() {
// Now record the slot, which has either been updated to an uncompiled data,
// or is the BytecodeArray which is still alive.
- ObjectSlot slot = HeapObject::RawField(
- flushing_candidate, SharedFunctionInfo::kFunctionDataOffset);
+ ObjectSlot slot =
+ flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset);
RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
}
}
@@ -2254,6 +2288,14 @@ void MarkCompactCollector::ClearWeakCollections() {
}
}
}
+ for (auto it = heap_->ephemeron_remembered_set_.begin();
+ it != heap_->ephemeron_remembered_set_.end();) {
+ if (!non_atomic_marking_state()->IsBlackOrGrey(it->first)) {
+ it = heap_->ephemeron_remembered_set_.erase(it);
+ } else {
+ ++it;
+ }
+ }
}
void MarkCompactCollector::ClearWeakReferences() {
@@ -2293,8 +2335,7 @@ void MarkCompactCollector::ClearJSWeakRefs() {
weak_ref->set_target(ReadOnlyRoots(isolate()).undefined_value());
} else {
// The value of the JSWeakRef is alive.
- ObjectSlot slot =
- HeapObject::RawField(weak_ref, JSWeakRef::kTargetOffset);
+ ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
RecordSlot(weak_ref, slot, target);
}
}
@@ -2328,8 +2369,7 @@ void MarkCompactCollector::ClearJSWeakRefs() {
DCHECK(finalization_group->scheduled_for_cleanup());
} else {
// The value of the WeakCell is alive.
- ObjectSlot slot =
- HeapObject::RawField(weak_cell, WeakCell::kTargetOffset);
+ ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
}
}
@@ -2764,16 +2804,36 @@ void Evacuator::Finalize() {
class FullEvacuator : public Evacuator {
public:
- FullEvacuator(MarkCompactCollector* collector,
- RecordMigratedSlotVisitor* record_visitor)
- : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
+ explicit FullEvacuator(MarkCompactCollector* collector)
+ : Evacuator(collector->heap(), &record_visitor_),
+ record_visitor_(collector, &ephemeron_remembered_set_),
+ collector_(collector) {}
GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY;
}
+ inline void Finalize() {
+ Evacuator::Finalize();
+
+ for (auto it = ephemeron_remembered_set_.begin();
+ it != ephemeron_remembered_set_.end(); ++it) {
+ auto insert_result =
+ heap()->ephemeron_remembered_set_.insert({it->first, it->second});
+ if (!insert_result.second) {
+ // Insertion didn't happen, there was already an item.
+ auto set = insert_result.first->second;
+ for (int entry : it->second) {
+ set.insert(entry);
+ }
+ }
+ }
+ }
+
protected:
void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
+ EphemeronRememberedSet ephemeron_remembered_set_;
+ RecordMigratedSlotVisitor record_visitor_;
MarkCompactCollector* collector_;
};
@@ -2860,7 +2920,6 @@ class PageEvacuationTask : public ItemParallelJob::Task {
template <class Evacuator, class Collector>
void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
Collector* collector, ItemParallelJob* job,
- RecordMigratedSlotVisitor* record_visitor,
MigrationObserver* migration_observer, const intptr_t live_bytes) {
// Used for trace summary.
double compaction_speed = 0;
@@ -2875,7 +2934,7 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
NumberOfParallelCompactionTasks(job->NumberOfItems());
Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
for (int i = 0; i < wanted_num_tasks; i++) {
- evacuators[i] = new Evacuator(collector, record_visitor);
+ evacuators[i] = new Evacuator(collector);
if (profiling) evacuators[i]->AddObserver(&profiling_observer);
if (migration_observer != nullptr)
evacuators[i]->AddObserver(migration_observer);
@@ -2957,9 +3016,8 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
if (evacuation_job.NumberOfItems() == 0) return;
- RecordMigratedSlotVisitor record_visitor(this);
- CreateAndExecuteEvacuationTasks<FullEvacuator>(
- this, &evacuation_job, &record_visitor, nullptr, live_bytes);
+ CreateAndExecuteEvacuationTasks<FullEvacuator>(this, &evacuation_job, nullptr,
+ live_bytes);
PostProcessEvacuationCandidates();
}
@@ -3503,6 +3561,57 @@ int MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
return pages;
}
+class EphemeronTableUpdatingItem : public UpdatingItem {
+ public:
+ enum EvacuationState { kRegular, kAborted };
+
+ explicit EphemeronTableUpdatingItem(Heap* heap) : heap_(heap) {}
+ ~EphemeronTableUpdatingItem() override = default;
+
+ void Process() override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "EphemeronTableUpdatingItem::Process");
+
+ for (auto it = heap_->ephemeron_remembered_set_.begin();
+ it != heap_->ephemeron_remembered_set_.end();) {
+ EphemeronHashTable table = it->first;
+ auto& indices = it->second;
+ if (table.map_word().IsForwardingAddress()) {
+ // The table has moved, and RecordMigratedSlotVisitor::VisitEphemeron
+ // inserts entries for the moved table into ephemeron_remembered_set_.
+ it = heap_->ephemeron_remembered_set_.erase(it);
+ continue;
+ }
+ DCHECK(table.map().IsMap());
+ DCHECK(table.Object::IsEphemeronHashTable());
+ for (auto iti = indices.begin(); iti != indices.end();) {
+ // EphemeronHashTable keys must be heap objects.
+ HeapObjectSlot key_slot(
+ table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(*iti)));
+ HeapObject key = key_slot.ToHeapObject();
+ MapWord map_word = key->map_word();
+ if (map_word.IsForwardingAddress()) {
+ key = map_word.ToForwardingAddress();
+ key_slot.StoreHeapObject(key);
+ }
+ if (!heap_->InYoungGeneration(key)) {
+ iti = indices.erase(iti);
+ } else {
+ ++iti;
+ }
+ }
+ if (indices.size() == 0) {
+ it = heap_->ephemeron_remembered_set_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+ }
+
+ private:
+ Heap* const heap_;
+};
+
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
@@ -3535,12 +3644,16 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
: NumberOfParallelPointerUpdateTasks(remembered_set_pages,
old_to_new_slots_);
const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
- const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
+ const int num_ephemeron_table_updating_tasks = 1;
+ const int num_tasks =
+ Max(to_space_tasks,
+ remembered_set_tasks + num_ephemeron_table_updating_tasks);
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpdatingTask(
isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
+ updating_job.AddItem(new EphemeronTableUpdatingItem(heap()));
updating_job.Run();
}
@@ -3672,20 +3785,6 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
continue;
}
- if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
- // We need to sweep the page to get it into an iterable state again. Note
- // that this adds unusable memory into the free list that is later on
- // (in the free list) dropped again. Since we only use the flag for
- // testing this is fine.
- p->set_concurrent_sweeping_state(Page::kSweepingInProgress);
- sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
- Heap::ShouldZapGarbage()
- ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
- : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
- space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
- continue;
- }
-
// One unused page is kept, all further are released before sweeping them.
if (non_atomic_marking_state()->live_bytes(p) == 0) {
if (unused_page_present) {
@@ -4028,7 +4127,7 @@ class YoungGenerationRecordMigratedSlotVisitor final
public:
explicit YoungGenerationRecordMigratedSlotVisitor(
MarkCompactCollector* collector)
- : RecordMigratedSlotVisitor(collector) {}
+ : RecordMigratedSlotVisitor(collector, nullptr) {}
void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
@@ -4652,9 +4751,10 @@ namespace {
class YoungGenerationEvacuator : public Evacuator {
public:
- YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
- RecordMigratedSlotVisitor* record_visitor)
- : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
+ explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector)
+ : Evacuator(collector->heap(), &record_visitor_),
+ record_visitor_(collector->heap()->mark_compact_collector()),
+ collector_(collector) {}
GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
@@ -4663,6 +4763,7 @@ class YoungGenerationEvacuator : public Evacuator {
protected:
void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
+ YoungGenerationRecordMigratedSlotVisitor record_visitor_;
MinorMarkCompactCollector* collector_;
};
@@ -4768,10 +4869,8 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
YoungGenerationMigrationObserver observer(heap(),
heap()->mark_compact_collector());
- YoungGenerationRecordMigratedSlotVisitor record_visitor(
- heap()->mark_compact_collector());
CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
- this, &evacuation_job, &record_visitor, &observer, live_bytes);
+ this, &evacuation_job, &observer, live_bytes);
}
int MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
diff --git a/chromium/v8/src/heap/mark-compact.h b/chromium/v8/src/heap/mark-compact.h
index 8d93830f358..566a7a53c4d 100644
--- a/chromium/v8/src/heap/mark-compact.h
+++ b/chromium/v8/src/heap/mark-compact.h
@@ -273,10 +273,10 @@ class MarkCompactCollectorBase {
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
template <class Evacuator, class Collector>
- void CreateAndExecuteEvacuationTasks(
- Collector* collector, ItemParallelJob* job,
- RecordMigratedSlotVisitor* record_visitor,
- MigrationObserver* migration_observer, const intptr_t live_bytes);
+ void CreateAndExecuteEvacuationTasks(Collector* collector,
+ ItemParallelJob* job,
+ MigrationObserver* migration_observer,
+ const intptr_t live_bytes);
// Returns whether this page should be moved according to heuristics.
bool ShouldMovePage(Page* p, intptr_t live_bytes);
@@ -414,7 +414,7 @@ struct Ephemeron {
HeapObject value;
};
-typedef Worklist<Ephemeron, 64> EphemeronWorklist;
+using EphemeronWorklist = Worklist<Ephemeron, 64>;
// Weak objects encountered during marking.
struct WeakObjects {
@@ -629,7 +629,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Ensures that sweeping is finished.
//
// Note: Can only be called safely from main thread.
- void EnsureSweepingCompleted();
+ V8_EXPORT_PRIVATE void EnsureSweepingCompleted();
// Checks if sweeping is in progress right now on any space.
bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
@@ -765,7 +765,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Implements ephemeron semantics: Marks value if key is already reachable.
// Returns true if value was actually marked.
- bool VisitEphemeron(HeapObject key, HeapObject value);
+ bool ProcessEphemeron(HeapObject key, HeapObject value);
// Marks ephemerons and drains marking worklist iteratively
// until a fixpoint is reached.
@@ -923,9 +923,8 @@ class MarkingVisitor final
int,
MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>> {
public:
- typedef HeapVisitor<
- int, MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>>
- Parent;
+ using Parent = HeapVisitor<
+ int, MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>>;
V8_INLINE MarkingVisitor(MarkCompactCollector* collector,
MarkingState* marking_state);
diff --git a/chromium/v8/src/heap/marking.h b/chromium/v8/src/heap/marking.h
index ec5b06cde14..612cc78601d 100644
--- a/chromium/v8/src/heap/marking.h
+++ b/chromium/v8/src/heap/marking.h
@@ -13,7 +13,7 @@ namespace internal {
class MarkBit {
public:
- typedef uint32_t CellType;
+ using CellType = uint32_t;
STATIC_ASSERT(sizeof(CellType) == sizeof(base::Atomic32));
inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
diff --git a/chromium/v8/src/heap/object-stats.cc b/chromium/v8/src/heap/object-stats.cc
index affd574ba2a..d920b25bd36 100644
--- a/chromium/v8/src/heap/object-stats.cc
+++ b/chromium/v8/src/heap/object-stats.cc
@@ -18,6 +18,7 @@
#include "src/memcopy.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/objects/heap-object.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/slots.h"
@@ -310,11 +311,14 @@ int ObjectStats::HistogramIndexFromSize(size_t size) {
kLastValueBucketIndex);
}
-void ObjectStats::RecordObjectStats(InstanceType type, size_t size) {
+void ObjectStats::RecordObjectStats(InstanceType type, size_t size,
+ size_t over_allocated) {
DCHECK_LE(type, LAST_TYPE);
object_counts_[type]++;
object_sizes_[type] += size;
size_histogram_[type][HistogramIndexFromSize(size)]++;
+ over_allocated_[type] += over_allocated;
+ over_allocated_histogram_[type][HistogramIndexFromSize(size)]++;
}
void ObjectStats::RecordVirtualObjectStats(VirtualInstanceType type,
@@ -365,8 +369,9 @@ class ObjectStatsCollectorImpl {
bool RecordSimpleVirtualObjectStats(HeapObject parent, HeapObject obj,
ObjectStats::VirtualInstanceType type);
// For HashTable it is possible to compute over allocated memory.
+ template <typename Derived, typename Shape>
void RecordHashTableVirtualObjectStats(HeapObject parent,
- FixedArray hash_table,
+ HashTable<Derived, Shape> hash_table,
ObjectStats::VirtualInstanceType type);
bool SameLiveness(HeapObject obj1, HeapObject obj2);
@@ -378,7 +383,9 @@ class ObjectStatsCollectorImpl {
// objects dispatch to the low level ObjectStats::RecordObjectStats manually.
bool ShouldRecordObject(HeapObject object, CowMode check_cow_array);
- void RecordObjectStats(HeapObject obj, InstanceType type, size_t size);
+ void RecordObjectStats(
+ HeapObject obj, InstanceType type, size_t size,
+ size_t over_allocated = ObjectStats::kNoOverAllocation);
// Specific recursion into constant pool or embedded code objects. Records
// FixedArrays and Tuple2.
@@ -395,13 +402,11 @@ class ObjectStatsCollectorImpl {
void RecordVirtualFixedArrayDetails(FixedArray array);
void RecordVirtualFunctionTemplateInfoDetails(FunctionTemplateInfo fti);
void RecordVirtualJSGlobalObjectDetails(JSGlobalObject object);
- void RecordVirtualJSCollectionDetails(JSObject object);
void RecordVirtualJSObjectDetails(JSObject object);
void RecordVirtualMapDetails(Map map);
void RecordVirtualScriptDetails(Script script);
void RecordVirtualExternalStringDetails(ExternalString script);
void RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo info);
- void RecordVirtualJSFunctionDetails(JSFunction function);
void RecordVirtualArrayBoilerplateDescription(
ArrayBoilerplateDescription description);
@@ -434,13 +439,16 @@ bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject obj,
return true;
}
+template <typename Derived, typename Shape>
void ObjectStatsCollectorImpl::RecordHashTableVirtualObjectStats(
- HeapObject parent, FixedArray hash_table,
+ HeapObject parent, HashTable<Derived, Shape> hash_table,
ObjectStats::VirtualInstanceType type) {
- CHECK(hash_table->IsHashTable());
- // TODO(mlippautz): Implement over allocation for hash tables.
+ size_t over_allocated =
+ (hash_table->Capacity() - (hash_table->NumberOfElements() +
+ hash_table->NumberOfDeletedElements())) *
+ HashTable<Derived, Shape>::kEntrySize * kTaggedSize;
RecordVirtualObjectStats(parent, hash_table, type, hash_table->Size(),
- ObjectStats::kNoOverAllocation);
+ over_allocated);
}
bool ObjectStatsCollectorImpl::RecordSimpleVirtualObjectStats(
@@ -452,6 +460,7 @@ bool ObjectStatsCollectorImpl::RecordSimpleVirtualObjectStats(
bool ObjectStatsCollectorImpl::RecordVirtualObjectStats(
HeapObject parent, HeapObject obj, ObjectStats::VirtualInstanceType type,
size_t size, size_t over_allocated, CowMode check_cow_array) {
+ CHECK_LT(over_allocated, size);
if (!SameLiveness(parent, obj) || !ShouldRecordObject(obj, check_cow_array)) {
return false;
}
@@ -529,36 +538,66 @@ void ObjectStatsCollectorImpl::RecordVirtualJSGlobalObjectDetails(
ObjectStats::GLOBAL_ELEMENTS_TYPE);
}
-void ObjectStatsCollectorImpl::RecordVirtualJSCollectionDetails(
- JSObject object) {
- if (object->IsJSMap()) {
- RecordSimpleVirtualObjectStats(
- object, FixedArray::cast(JSMap::cast(object)->table()),
- ObjectStats::JS_COLLECTION_TABLE_TYPE);
- }
- if (object->IsJSSet()) {
- RecordSimpleVirtualObjectStats(
- object, FixedArray::cast(JSSet::cast(object)->table()),
- ObjectStats::JS_COLLECTION_TABLE_TYPE);
- }
-}
-
void ObjectStatsCollectorImpl::RecordVirtualJSObjectDetails(JSObject object) {
// JSGlobalObject is recorded separately.
if (object->IsJSGlobalObject()) return;
+ // Uncompiled JSFunction has a separate type.
+ if (object->IsJSFunction() && !JSFunction::cast(object)->is_compiled()) {
+ RecordSimpleVirtualObjectStats(HeapObject(), object,
+ ObjectStats::JS_UNCOMPILED_FUNCTION_TYPE);
+ }
+
// Properties.
if (object->HasFastProperties()) {
PropertyArray properties = object->property_array();
- CHECK_EQ(PROPERTY_ARRAY_TYPE, properties->map()->instance_type());
+ if (properties != ReadOnlyRoots(heap_).empty_property_array()) {
+ size_t over_allocated =
+ object->map()->UnusedPropertyFields() * kTaggedSize;
+ RecordVirtualObjectStats(object, properties,
+ object->map()->is_prototype_map()
+ ? ObjectStats::PROTOTYPE_PROPERTY_ARRAY_TYPE
+ : ObjectStats::OBJECT_PROPERTY_ARRAY_TYPE,
+ properties->Size(), over_allocated);
+ }
} else {
NameDictionary properties = object->property_dictionary();
RecordHashTableVirtualObjectStats(
- object, properties, ObjectStats::OBJECT_PROPERTY_DICTIONARY_TYPE);
+ object, properties,
+ object->map()->is_prototype_map()
+ ? ObjectStats::PROTOTYPE_PROPERTY_DICTIONARY_TYPE
+ : ObjectStats::OBJECT_PROPERTY_DICTIONARY_TYPE);
}
+
// Elements.
FixedArrayBase elements = object->elements();
- RecordSimpleVirtualObjectStats(object, elements, ObjectStats::ELEMENTS_TYPE);
+ if (object->HasDictionaryElements()) {
+ RecordHashTableVirtualObjectStats(
+ object, NumberDictionary::cast(elements),
+ object->IsJSArray() ? ObjectStats::ARRAY_DICTIONARY_ELEMENTS_TYPE
+ : ObjectStats::OBJECT_DICTIONARY_ELEMENTS_TYPE);
+ } else if (object->IsJSArray()) {
+ if (elements != ReadOnlyRoots(heap_).empty_fixed_array()) {
+ size_t element_size =
+ (elements->Size() - FixedArrayBase::kHeaderSize) / elements->length();
+ uint32_t length = JSArray::cast(object)->length()->Number();
+ size_t over_allocated = (elements->length() - length) * element_size;
+ RecordVirtualObjectStats(object, elements,
+ ObjectStats::ARRAY_ELEMENTS_TYPE,
+ elements->Size(), over_allocated);
+ }
+ } else {
+ RecordSimpleVirtualObjectStats(object, elements,
+ ObjectStats::OBJECT_ELEMENTS_TYPE);
+ }
+
+ // JSCollections.
+ if (object->IsJSCollection()) {
+ // TODO(bmeurer): Properly compute over-allocation here.
+ RecordSimpleVirtualObjectStats(
+ object, FixedArray::cast(JSCollection::cast(object)->table()),
+ ObjectStats::JS_COLLECTION_TABLE_TYPE);
+ }
}
static ObjectStats::VirtualInstanceType GetFeedbackSlotType(
@@ -676,16 +715,12 @@ void ObjectStatsCollectorImpl::CollectStatistics(
} else if (obj->IsFunctionTemplateInfo()) {
RecordVirtualFunctionTemplateInfoDetails(
FunctionTemplateInfo::cast(obj));
- } else if (obj->IsJSFunction()) {
- RecordVirtualJSFunctionDetails(JSFunction::cast(obj));
} else if (obj->IsJSGlobalObject()) {
RecordVirtualJSGlobalObjectDetails(JSGlobalObject::cast(obj));
} else if (obj->IsJSObject()) {
// This phase needs to come after RecordVirtualAllocationSiteDetails
// to properly split among boilerplates.
RecordVirtualJSObjectDetails(JSObject::cast(obj));
- } else if (obj->IsJSCollection()) {
- RecordVirtualJSCollectionDetails(JSObject::cast(obj));
} else if (obj->IsSharedFunctionInfo()) {
RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo::cast(obj));
} else if (obj->IsContext()) {
@@ -706,7 +741,11 @@ void ObjectStatsCollectorImpl::CollectStatistics(
// sources. We still want to run RecordObjectStats after though.
RecordVirtualExternalStringDetails(ExternalString::cast(obj));
}
- RecordObjectStats(obj, map->instance_type(), obj->Size());
+ size_t over_allocated = ObjectStats::kNoOverAllocation;
+ if (obj->IsJSObject()) {
+ over_allocated = map->instance_size() - map->UsedInstanceSize();
+ }
+ RecordObjectStats(obj, map->instance_type(), obj->Size(), over_allocated);
if (collect_field_stats == CollectFieldStats::kYes) {
field_stats_collector_.RecordStats(obj);
}
@@ -749,10 +788,10 @@ void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
}
void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject obj,
- InstanceType type,
- size_t size) {
+ InstanceType type, size_t size,
+ size_t over_allocated) {
if (virtual_objects_.find(obj) == virtual_objects_.end()) {
- stats_->RecordObjectStats(type, size);
+ stats_->RecordObjectStats(type, size, over_allocated);
}
}
@@ -776,13 +815,52 @@ bool ObjectStatsCollectorImpl::SameLiveness(HeapObject obj1, HeapObject obj2) {
void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
// TODO(mlippautz): map->dependent_code(): DEPENDENT_CODE_TYPE.
+ // For Map we want to distinguish between various different states
+ // to get a better picture of what's going on in MapSpace. This
+ // method computes the virtual instance type to use for a given map,
+ // using MAP_TYPE for regular maps that aren't special in any way.
+ if (map->is_prototype_map()) {
+ if (map->is_dictionary_map()) {
+ RecordSimpleVirtualObjectStats(
+ HeapObject(), map, ObjectStats::MAP_PROTOTYPE_DICTIONARY_TYPE);
+ } else if (map->is_abandoned_prototype_map()) {
+ RecordSimpleVirtualObjectStats(HeapObject(), map,
+ ObjectStats::MAP_ABANDONED_PROTOTYPE_TYPE);
+ } else {
+ RecordSimpleVirtualObjectStats(HeapObject(), map,
+ ObjectStats::MAP_PROTOTYPE_TYPE);
+ }
+ } else if (map->is_deprecated()) {
+ RecordSimpleVirtualObjectStats(HeapObject(), map,
+ ObjectStats::MAP_DEPRECATED_TYPE);
+ } else if (map->is_dictionary_map()) {
+ RecordSimpleVirtualObjectStats(HeapObject(), map,
+ ObjectStats::MAP_DICTIONARY_TYPE);
+ } else if (map->is_stable()) {
+ RecordSimpleVirtualObjectStats(HeapObject(), map,
+ ObjectStats::MAP_STABLE_TYPE);
+ } else {
+ // This will be logged as MAP_TYPE in Phase2.
+ }
+
DescriptorArray array = map->instance_descriptors();
if (map->owns_descriptors() &&
array != ReadOnlyRoots(heap_).empty_descriptor_array()) {
- // DescriptorArray has its own instance type.
+ // Generally DescriptorArrays have their own instance type already
+ // (DESCRIPTOR_ARRAY_TYPE), but we'd like to be able to tell which
+ // of those are for (abandoned) prototypes, and which of those are
+ // owned by deprecated maps.
+ if (map->is_prototype_map()) {
+ RecordSimpleVirtualObjectStats(
+ map, array, ObjectStats::PROTOTYPE_DESCRIPTOR_ARRAY_TYPE);
+ } else if (map->is_deprecated()) {
+ RecordSimpleVirtualObjectStats(
+ map, array, ObjectStats::DEPRECATED_DESCRIPTOR_ARRAY_TYPE);
+ }
+
EnumCache enum_cache = array->enum_cache();
RecordSimpleVirtualObjectStats(array, enum_cache->keys(),
- ObjectStats::ENUM_CACHE_TYPE);
+ ObjectStats::ENUM_KEYS_CACHE_TYPE);
RecordSimpleVirtualObjectStats(array, enum_cache->indices(),
ObjectStats::ENUM_INDICES_CACHE_TYPE);
}
@@ -852,14 +930,6 @@ void ObjectStatsCollectorImpl::RecordVirtualSharedFunctionInfoDetails(
}
}
-void ObjectStatsCollectorImpl::RecordVirtualJSFunctionDetails(
- JSFunction function) {
- // Uncompiled JSFunctions get their own category.
- if (!function->is_compiled()) {
- RecordSimpleVirtualObjectStats(HeapObject(), function,
- ObjectStats::UNCOMPILED_JS_FUNCTION_TYPE);
- }
-}
void ObjectStatsCollectorImpl::RecordVirtualArrayBoilerplateDescription(
ArrayBoilerplateDescription description) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
@@ -902,8 +972,10 @@ void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
RecordSimpleVirtualObjectStats(
bytecode, bytecode->handler_table(),
ObjectStats::BYTECODE_ARRAY_HANDLER_TABLE_TYPE);
- RecordSimpleVirtualObjectStats(bytecode, bytecode->SourcePositionTable(),
- ObjectStats::SOURCE_POSITION_TABLE_TYPE);
+ if (bytecode->HasSourcePositionTable()) {
+ RecordSimpleVirtualObjectStats(bytecode, bytecode->SourcePositionTable(),
+ ObjectStats::SOURCE_POSITION_TABLE_TYPE);
+ }
}
namespace {
diff --git a/chromium/v8/src/heap/object-stats.h b/chromium/v8/src/heap/object-stats.h
index b7f3aefd7f2..72865a47f73 100644
--- a/chromium/v8/src/heap/object-stats.h
+++ b/chromium/v8/src/heap/object-stats.h
@@ -17,6 +17,8 @@
#define VIRTUAL_INSTANCE_TYPE_LIST(V) \
CODE_KIND_LIST(V) \
V(ARRAY_BOILERPLATE_DESCRIPTION_ELEMENTS_TYPE) \
+ V(ARRAY_DICTIONARY_ELEMENTS_TYPE) \
+ V(ARRAY_ELEMENTS_TYPE) \
V(BOILERPLATE_ELEMENTS_TYPE) \
V(BOILERPLATE_PROPERTY_ARRAY_TYPE) \
V(BOILERPLATE_PROPERTY_DICTIONARY_TYPE) \
@@ -25,9 +27,9 @@
V(COW_ARRAY_TYPE) \
V(DEOPTIMIZATION_DATA_TYPE) \
V(DEPENDENT_CODE_TYPE) \
- V(ELEMENTS_TYPE) \
+ V(DEPRECATED_DESCRIPTOR_ARRAY_TYPE) \
V(EMBEDDED_OBJECT_TYPE) \
- V(ENUM_CACHE_TYPE) \
+ V(ENUM_KEYS_CACHE_TYPE) \
V(ENUM_INDICES_CACHE_TYPE) \
V(FEEDBACK_VECTOR_ENTRY_TYPE) \
V(FEEDBACK_VECTOR_HEADER_TYPE) \
@@ -45,12 +47,25 @@
V(JS_ARRAY_BOILERPLATE_TYPE) \
V(JS_COLLECTION_TABLE_TYPE) \
V(JS_OBJECT_BOILERPLATE_TYPE) \
+ V(JS_UNCOMPILED_FUNCTION_TYPE) \
+ V(MAP_ABANDONED_PROTOTYPE_TYPE) \
+ V(MAP_DEPRECATED_TYPE) \
+ V(MAP_DICTIONARY_TYPE) \
+ V(MAP_PROTOTYPE_DICTIONARY_TYPE) \
+ V(MAP_PROTOTYPE_TYPE) \
+ V(MAP_STABLE_TYPE) \
V(NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
V(NUMBER_STRING_CACHE_TYPE) \
+ V(OBJECT_DICTIONARY_ELEMENTS_TYPE) \
+ V(OBJECT_ELEMENTS_TYPE) \
+ V(OBJECT_PROPERTY_ARRAY_TYPE) \
V(OBJECT_PROPERTY_DICTIONARY_TYPE) \
V(OBJECT_TO_CODE_TYPE) \
V(OPTIMIZED_CODE_LITERALS_TYPE) \
V(OTHER_CONTEXT_TYPE) \
+ V(PROTOTYPE_DESCRIPTOR_ARRAY_TYPE) \
+ V(PROTOTYPE_PROPERTY_ARRAY_TYPE) \
+ V(PROTOTYPE_PROPERTY_DICTIONARY_TYPE) \
V(PROTOTYPE_USERS_TYPE) \
V(REGEXP_MULTIPLE_CACHE_TYPE) \
V(RELOC_INFO_TYPE) \
@@ -67,7 +82,6 @@
V(STRING_EXTERNAL_RESOURCE_ONE_BYTE_TYPE) \
V(STRING_EXTERNAL_RESOURCE_TWO_BYTE_TYPE) \
V(SOURCE_POSITION_TABLE_TYPE) \
- V(UNCOMPILED_JS_FUNCTION_TYPE) \
V(UNCOMPILED_SHARED_FUNCTION_INFO_TYPE) \
V(WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE)
@@ -105,7 +119,8 @@ class ObjectStats {
void Dump(std::stringstream& stream);
void CheckpointObjectStats();
- void RecordObjectStats(InstanceType type, size_t size);
+ void RecordObjectStats(InstanceType type, size_t size,
+ size_t over_allocated = kNoOverAllocation);
void RecordVirtualObjectStats(VirtualInstanceType type, size_t size,
size_t over_allocated);
diff --git a/chromium/v8/src/heap/objects-visiting-inl.h b/chromium/v8/src/heap/objects-visiting-inl.h
index aea5920dff5..132bd5b6dce 100644
--- a/chromium/v8/src/heap/objects-visiting-inl.h
+++ b/chromium/v8/src/heap/objects-visiting-inl.h
@@ -15,6 +15,7 @@
#include "src/objects/free-space-inl.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/oddball.h"
+#include "src/objects/ordered-hash-table.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
diff --git a/chromium/v8/src/heap/objects-visiting.cc b/chromium/v8/src/heap/objects-visiting.cc
index 063dae512fe..d56dd91da20 100644
--- a/chromium/v8/src/heap/objects-visiting.cc
+++ b/chromium/v8/src/heap/objects-visiting.cc
@@ -51,7 +51,7 @@ Object VisitWeakList(Heap* heap, Object list, WeakObjectRetainer* retainer) {
if (record_slots) {
HeapObject slot_holder = WeakListVisitor<T>::WeakNextHolder(tail);
int slot_offset = WeakListVisitor<T>::WeakNextOffset();
- ObjectSlot slot = HeapObject::RawField(slot_holder, slot_offset);
+ ObjectSlot slot = slot_holder.RawField(slot_offset);
MarkCompactCollector::RecordSlot(slot_holder, slot,
HeapObject::cast(retained));
}
diff --git a/chromium/v8/src/heap/read-only-heap.cc b/chromium/v8/src/heap/read-only-heap.cc
index a2c086fc0ad..1a5345de9bc 100644
--- a/chromium/v8/src/heap/read-only-heap.cc
+++ b/chromium/v8/src/heap/read-only-heap.cc
@@ -4,24 +4,77 @@
#include "src/heap/read-only-heap.h"
+#include <cstring>
+
+#include "src/base/once.h"
+#include "src/heap/heap-inl.h"
#include "src/heap/spaces.h"
+#include "src/snapshot/read-only-deserializer.h"
namespace v8 {
namespace internal {
+#ifdef V8_SHARED_RO_HEAP
+V8_DECLARE_ONCE(setup_ro_heap_once);
+ReadOnlyHeap* shared_ro_heap = nullptr;
+#endif
+
// static
-ReadOnlyHeap* ReadOnlyHeap::GetOrCreateReadOnlyHeap(Heap* heap) {
- return new ReadOnlyHeap(new ReadOnlySpace(heap));
+void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
+#ifdef V8_SHARED_RO_HEAP
+ void* isolate_ro_roots = reinterpret_cast<void*>(
+ isolate->roots_table().read_only_roots_begin().address());
+ base::CallOnce(&setup_ro_heap_once, [isolate, des, isolate_ro_roots]() {
+ shared_ro_heap = Init(isolate, des);
+ if (des != nullptr) {
+ std::memcpy(shared_ro_heap->read_only_roots_, isolate_ro_roots,
+ kEntriesCount * sizeof(Address));
+ }
+ });
+
+ isolate->heap()->SetUpFromReadOnlyHeap(shared_ro_heap);
+ if (des != nullptr) {
+ std::memcpy(isolate_ro_roots, shared_ro_heap->read_only_roots_,
+ kEntriesCount * sizeof(Address));
+ }
+#else
+ Init(isolate, des);
+#endif // V8_SHARED_RO_HEAP
}
-void ReadOnlyHeap::MaybeDeserialize(Isolate* isolate,
- ReadOnlyDeserializer* des) {
- des->DeserializeInto(isolate);
+void ReadOnlyHeap::OnCreateHeapObjectsComplete() {
+ DCHECK(!deserializing_);
+#ifdef V8_SHARED_RO_HEAP
+ read_only_space_->Forget();
+#endif
+ read_only_space_->MarkAsReadOnly();
+}
+
+// static
+ReadOnlyHeap* ReadOnlyHeap::Init(Isolate* isolate, ReadOnlyDeserializer* des) {
+ auto* ro_heap = new ReadOnlyHeap(new ReadOnlySpace(isolate->heap()));
+ isolate->heap()->SetUpFromReadOnlyHeap(ro_heap);
+ if (des != nullptr) {
+ des->DeserializeInto(isolate);
+ ro_heap->deserializing_ = true;
+#ifdef V8_SHARED_RO_HEAP
+ ro_heap->read_only_space_->Forget();
+#endif
+ ro_heap->read_only_space_->MarkAsReadOnly();
+ }
+ return ro_heap;
}
void ReadOnlyHeap::OnHeapTearDown() {
+#ifndef V8_SHARED_RO_HEAP
delete read_only_space_;
delete this;
+#endif
+}
+
+// static
+bool ReadOnlyHeap::Contains(HeapObject object) {
+ return Page::FromAddress(object.ptr())->owner()->identity() == RO_SPACE;
}
} // namespace internal
diff --git a/chromium/v8/src/heap/read-only-heap.h b/chromium/v8/src/heap/read-only-heap.h
index d2b0db012dc..9e7a50e4f90 100644
--- a/chromium/v8/src/heap/read-only-heap.h
+++ b/chromium/v8/src/heap/read-only-heap.h
@@ -7,37 +7,53 @@
#include "src/base/macros.h"
#include "src/heap/heap.h"
+#include "src/objects.h"
#include "src/roots.h"
-#include "src/snapshot/read-only-deserializer.h"
namespace v8 {
namespace internal {
class ReadOnlySpace;
+class ReadOnlyDeserializer;
// This class transparently manages read-only space, roots and cache creation
-// and destruction. Eventually this will allow sharing these artifacts between
-// isolates.
-class ReadOnlyHeap {
+// and destruction.
+class ReadOnlyHeap final {
public:
- static ReadOnlyHeap* GetOrCreateReadOnlyHeap(Heap* heap);
- // If necessary, deserialize read-only objects and set up read-only object
- // cache.
- void MaybeDeserialize(Isolate* isolate, ReadOnlyDeserializer* des);
- // Frees ReadOnlySpace and itself when sharing is disabled. No-op otherwise.
- // Read-only data should not be used within the current isolate after this is
- // called.
+ static constexpr size_t kEntriesCount =
+ static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
+
+ // If necessary create read-only heap and initialize its artifacts (if the
+ // deserializer is provided).
+ // TODO(goszczycki): Ideally we'd create this without needing a heap.
+ static void SetUp(Isolate* isolate, ReadOnlyDeserializer* des);
+ // Indicate that all read-only space objects have been created and will not
+ // be written to. This is not thread safe, and should really only be used as
+ // part of mksnapshot or when read-only heap sharing is disabled.
+ void OnCreateHeapObjectsComplete();
+ // Indicate that the current isolate no longer requires the read-only heap and
+ // it may be safely disposed of.
void OnHeapTearDown();
+ // Returns whether the object resides in the read-only space.
+ V8_EXPORT_PRIVATE static bool Contains(HeapObject object);
+
std::vector<Object>* read_only_object_cache() {
return &read_only_object_cache_;
}
ReadOnlySpace* read_only_space() const { return read_only_space_; }
private:
+ static ReadOnlyHeap* Init(Isolate* isolate, ReadOnlyDeserializer* des);
+
+ bool deserializing_ = false;
ReadOnlySpace* read_only_space_ = nullptr;
std::vector<Object> read_only_object_cache_;
+#ifdef V8_SHARED_RO_HEAP
+ Address read_only_roots_[kEntriesCount];
+#endif
+
explicit ReadOnlyHeap(ReadOnlySpace* ro_space) : read_only_space_(ro_space) {}
DISALLOW_COPY_AND_ASSIGN(ReadOnlyHeap);
};
diff --git a/chromium/v8/src/heap/scavenger-inl.h b/chromium/v8/src/heap/scavenger-inl.h
index 1ac96b7362f..4736519099f 100644
--- a/chromium/v8/src/heap/scavenger-inl.h
+++ b/chromium/v8/src/heap/scavenger-inl.h
@@ -480,6 +480,21 @@ void ScavengeVisitor::VisitPointersImpl(HeapObject host, TSlot start,
}
}
+int ScavengeVisitor::VisitEphemeronHashTable(Map map,
+ EphemeronHashTable table) {
+ // Register table with the scavenger, so it can take care of the weak keys
+ // later. This allows to only iterate the tables' values, which are treated
+ // as strong independetly of whether the key is live.
+ scavenger_->AddEphemeronHashTable(table);
+ for (int i = 0; i < table->Capacity(); i++) {
+ ObjectSlot value_slot =
+ table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
+ VisitPointer(table, value_slot);
+ }
+
+ return table->SizeFromMap(map);
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/scavenger.cc b/chromium/v8/src/heap/scavenger.cc
index df0ed8886e2..217affa84b3 100644
--- a/chromium/v8/src/heap/scavenger.cc
+++ b/chromium/v8/src/heap/scavenger.cc
@@ -98,6 +98,20 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
HandleSlot(host, FullHeapObjectSlot(&heap_object), heap_object);
}
+ inline void VisitEphemeron(HeapObject obj, int entry, ObjectSlot key,
+ ObjectSlot value) override {
+ DCHECK(Heap::IsLargeObject(obj) || obj->IsEphemeronHashTable());
+ VisitPointer(obj, value);
+
+ if (ObjectInYoungGeneration(*key)) {
+ // We cannot check the map here, as it might be a large object.
+ scavenger_->RememberPromotedEphemeron(
+ EphemeronHashTable::unchecked_cast(obj), entry);
+ } else {
+ VisitPointer(obj, key);
+ }
+ }
+
private:
template <typename TSlot>
V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
@@ -151,11 +165,25 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
const bool record_slots_;
};
-static bool IsUnscavengedHeapObject(Heap* heap, FullObjectSlot p) {
- return Heap::InFromPage(*p) &&
- !HeapObject::cast(*p)->map_word().IsForwardingAddress();
+namespace {
+
+V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, Object object) {
+ return Heap::InFromPage(object) &&
+ !HeapObject::cast(object)->map_word().IsForwardingAddress();
+}
+
+// Same as IsUnscavengedHeapObject() above but specialized for HeapObjects.
+V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, HeapObject heap_object) {
+ return Heap::InFromPage(heap_object) &&
+ !heap_object->map_word().IsForwardingAddress();
+}
+
+bool IsUnscavengedHeapObjectSlot(Heap* heap, FullObjectSlot p) {
+ return IsUnscavengedHeapObject(heap, *p);
}
+} // namespace
+
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
public:
Object RetainAs(Object object) override {
@@ -185,9 +213,10 @@ void ScavengerCollector::CollectGarbage() {
OneshotBarrier barrier(base::TimeDelta::FromMilliseconds(kMaxWaitTimeMs));
Scavenger::CopiedList copied_list(num_scavenge_tasks);
Scavenger::PromotionList promotion_list(num_scavenge_tasks);
+ EphemeronTableList ephemeron_table_list(num_scavenge_tasks);
for (int i = 0; i < num_scavenge_tasks; i++) {
scavengers[i] = new Scavenger(this, heap_, is_logging, &copied_list,
- &promotion_list, i);
+ &promotion_list, &ephemeron_table_list, i);
job.AddTask(new ScavengingTask(heap_, scavengers[i], &barrier));
}
@@ -235,7 +264,7 @@ void ScavengerCollector::CollectGarbage() {
TRACE_GC(heap_->tracer(),
GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
isolate_->global_handles()->MarkYoungWeakUnmodifiedObjectsPending(
- &IsUnscavengedHeapObject);
+ &IsUnscavengedHeapObjectSlot);
isolate_->global_handles()->IterateYoungWeakUnmodifiedRootsForFinalizers(
&root_scavenge_visitor);
scavengers[kMainThreadId]->Process();
@@ -244,7 +273,7 @@ void ScavengerCollector::CollectGarbage() {
DCHECK(promotion_list.IsEmpty());
isolate_->global_handles()
->IterateYoungWeakUnmodifiedRootsForPhantomHandles(
- &root_scavenge_visitor, &IsUnscavengedHeapObject);
+ &root_scavenge_visitor, &IsUnscavengedHeapObjectSlot);
}
{
@@ -280,8 +309,7 @@ void ScavengerCollector::CollectGarbage() {
}
}
- ScavengeWeakObjectRetainer weak_object_retainer;
- heap_->ProcessYoungWeakReferences(&weak_object_retainer);
+ ProcessWeakReferences(&ephemeron_table_list);
// Set age mark.
heap_->new_space_->set_age_mark(heap_->new_space()->top());
@@ -349,11 +377,12 @@ int ScavengerCollector::NumberOfScavengeTasks() {
Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
CopiedList* copied_list, PromotionList* promotion_list,
- int task_id)
+ EphemeronTableList* ephemeron_table_list, int task_id)
: collector_(collector),
heap_(heap),
promotion_list_(promotion_list, task_id),
copied_list_(copied_list, task_id),
+ ephemeron_table_list_(ephemeron_table_list, task_id),
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
copied_size_(0),
promoted_size_(0),
@@ -377,6 +406,12 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
target->IterateBodyFast(map, size, &visitor);
}
+void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) {
+ auto indices =
+ ephemeron_remembered_set_.insert({table, std::unordered_set<int>()});
+ indices.first->second.insert(entry);
+}
+
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
AllocationSpace space = page->owner()->identity();
if ((space == OLD_SPACE) && !page->SweepingDone()) {
@@ -440,12 +475,88 @@ void Scavenger::Process(OneshotBarrier* barrier) {
} while (!done);
}
+void ScavengerCollector::ProcessWeakReferences(
+ EphemeronTableList* ephemeron_table_list) {
+ ScavengeWeakObjectRetainer weak_object_retainer;
+ heap_->ProcessYoungWeakReferences(&weak_object_retainer);
+ ClearYoungEphemerons(ephemeron_table_list);
+ ClearOldEphemerons();
+}
+
+// Clear ephemeron entries from EphemeronHashTables in new-space whenever the
+// entry has a dead new-space key.
+void ScavengerCollector::ClearYoungEphemerons(
+ EphemeronTableList* ephemeron_table_list) {
+ ephemeron_table_list->Iterate([this](EphemeronHashTable table) {
+ for (int i = 0; i < table->Capacity(); i++) {
+ // Keys in EphemeronHashTables must be heap objects.
+ HeapObjectSlot key_slot(
+ table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i)));
+ HeapObject key = key_slot.ToHeapObject();
+ if (IsUnscavengedHeapObject(heap_, key)) {
+ table->RemoveEntry(i);
+ } else {
+ HeapObject forwarded = ForwardingAddress(key);
+ key_slot.StoreHeapObject(forwarded);
+ }
+ }
+ });
+ ephemeron_table_list->Clear();
+}
+
+// Clear ephemeron entries from EphemeronHashTables in old-space whenever the
+// entry has a dead new-space key.
+void ScavengerCollector::ClearOldEphemerons() {
+ for (auto it = heap_->ephemeron_remembered_set_.begin();
+ it != heap_->ephemeron_remembered_set_.end();) {
+ EphemeronHashTable table = it->first;
+ auto& indices = it->second;
+ for (auto iti = indices.begin(); iti != indices.end();) {
+ // Keys in EphemeronHashTables must be heap objects.
+ HeapObjectSlot key_slot(
+ table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(*iti)));
+ HeapObject key = key_slot.ToHeapObject();
+ if (IsUnscavengedHeapObject(heap_, key)) {
+ table->RemoveEntry(*iti);
+ iti = indices.erase(iti);
+ } else {
+ HeapObject forwarded = ForwardingAddress(key);
+ key_slot.StoreHeapObject(forwarded);
+ if (!Heap::InYoungGeneration(forwarded)) {
+ iti = indices.erase(iti);
+ } else {
+ ++iti;
+ }
+ }
+ }
+
+ if (indices.size() == 0) {
+ it = heap_->ephemeron_remembered_set_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
void Scavenger::Finalize() {
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
heap()->IncrementPromotedObjectsSize(promoted_size_);
collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
allocator_.Finalize();
+ ephemeron_table_list_.FlushToGlobal();
+ for (auto it = ephemeron_remembered_set_.begin();
+ it != ephemeron_remembered_set_.end(); ++it) {
+ auto insert_result = heap()->ephemeron_remembered_set_.insert(
+ {it->first, std::unordered_set<int>()});
+ for (int entry : it->second) {
+ insert_result.first->second.insert(entry);
+ }
+ }
+}
+
+void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
+ ephemeron_table_list_.Push(table);
}
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
diff --git a/chromium/v8/src/heap/scavenger.h b/chromium/v8/src/heap/scavenger.h
index e122ab8cdf8..52df4cf340c 100644
--- a/chromium/v8/src/heap/scavenger.h
+++ b/chromium/v8/src/heap/scavenger.h
@@ -27,6 +27,10 @@ using SurvivingNewLargeObjectsMap =
std::unordered_map<HeapObject, Map, Object::Hasher>;
using SurvivingNewLargeObjectMapEntry = std::pair<HeapObject, Map>;
+constexpr int kEphemeronTableListSegmentSize = 128;
+using EphemeronTableList =
+ Worklist<EphemeronHashTable, kEphemeronTableListSegmentSize>;
+
class ScavengerCollector {
public:
static const int kMaxScavengerTasks = 8;
@@ -42,6 +46,9 @@ class ScavengerCollector {
int NumberOfScavengeTasks();
+ void ProcessWeakReferences(EphemeronTableList* ephemeron_table_list);
+ void ClearYoungEphemerons(EphemeronTableList* ephemeron_table_list);
+ void ClearOldEphemerons();
void HandleSurvivingNewLargeObjects();
Isolate* const isolate_;
@@ -109,10 +116,9 @@ class Scavenger {
static const int kCopiedListSegmentSize = 256;
using CopiedList = Worklist<ObjectAndSize, kCopiedListSegmentSize>;
-
Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
CopiedList* copied_list, PromotionList* promotion_list,
- int task_id);
+ EphemeronTableList* ephemeron_table_list, int task_id);
// Entry point for scavenging an old generation page. For scavenging single
// objects see RootScavengingVisitor and ScavengeVisitor below.
@@ -125,6 +131,8 @@ class Scavenger {
// Finalize the Scavenger. Needs to be called from the main thread.
void Finalize();
+ void AddEphemeronHashTable(EphemeronHashTable table);
+
size_t bytes_copied() const { return copied_size_; }
size_t bytes_promoted() const { return promoted_size_; }
@@ -194,16 +202,20 @@ class Scavenger {
int object_size);
void IterateAndScavengePromotedObject(HeapObject target, Map map, int size);
+ void RememberPromotedEphemeron(EphemeronHashTable table, int index);
ScavengerCollector* const collector_;
Heap* const heap_;
PromotionList::View promotion_list_;
CopiedList::View copied_list_;
+ EphemeronTableList::View ephemeron_table_list_;
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
size_t copied_size_;
size_t promoted_size_;
LocalAllocator allocator_;
SurvivingNewLargeObjectsMap surviving_new_large_objects_;
+
+ EphemeronRememberedSet ephemeron_remembered_set_;
const bool is_logging_;
const bool is_incremental_marking_;
const bool is_compacting_;
@@ -242,6 +254,7 @@ class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final;
V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final;
+ V8_INLINE int VisitEphemeronHashTable(Map map, EphemeronHashTable object);
private:
template <typename TSlot>
diff --git a/chromium/v8/src/heap/setup-heap-internal.cc b/chromium/v8/src/heap/setup-heap-internal.cc
index a3d690ece6b..ac00b77d71d 100644
--- a/chromium/v8/src/heap/setup-heap-internal.cc
+++ b/chromium/v8/src/heap/setup-heap-internal.cc
@@ -32,6 +32,7 @@
#include "src/objects/microtask.h"
#include "src/objects/module.h"
#include "src/objects/oddball-inl.h"
+#include "src/objects/ordered-hash-table.h"
#include "src/objects/promise.h"
#include "src/objects/script.h"
#include "src/objects/shared-function-info.h"
@@ -170,15 +171,16 @@ void Heap::FinalizePartialMap(Map map) {
map->set_constructor_or_backpointer(roots.null_value());
}
-AllocationResult Heap::Allocate(Map map, AllocationSpace space) {
+AllocationResult Heap::Allocate(Map map, AllocationType allocation_type) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
HeapObject result;
- AllocationResult allocation = AllocateRaw(size, Heap::SelectType(space));
+ AllocationResult allocation = AllocateRaw(size, allocation_type);
if (!allocation.To(&result)) return allocation;
// New space objects are allocated white.
WriteBarrierMode write_barrier_mode =
- space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ allocation_type == AllocationType::kYoung ? SKIP_WRITE_BARRIER
+ : UPDATE_WRITE_BARRIER;
result->set_map_after_allocation(map, write_barrier_mode);
return result;
}
@@ -275,21 +277,24 @@ bool Heap::CreateInitialMaps() {
set_empty_weak_array_list(WeakArrayList::cast(obj));
{
- AllocationResult allocation = Allocate(roots.null_map(), RO_SPACE);
+ AllocationResult allocation =
+ Allocate(roots.null_map(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_null_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kNull);
{
- AllocationResult allocation = Allocate(roots.undefined_map(), RO_SPACE);
+ AllocationResult allocation =
+ Allocate(roots.undefined_map(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_undefined_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kUndefined);
DCHECK(!InYoungGeneration(roots.undefined_value()));
{
- AllocationResult allocation = Allocate(roots.the_hole_map(), RO_SPACE);
+ AllocationResult allocation =
+ Allocate(roots.the_hole_map(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_the_hole_value(Oddball::cast(obj));
@@ -308,7 +313,8 @@ bool Heap::CreateInitialMaps() {
// Allocate the empty enum cache.
{
- AllocationResult allocation = Allocate(roots.tuple2_map(), RO_SPACE);
+ AllocationResult allocation =
+ Allocate(roots.enum_cache_map(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_empty_enum_cache(EnumCache::cast(obj));
@@ -364,6 +370,8 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(SCOPE_INFO_TYPE, scope_info)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
+ ALLOCATE_VARSIZE_MAP(CLOSURE_FEEDBACK_CELL_ARRAY_TYPE,
+ closure_feedback_cell_array)
ALLOCATE_VARSIZE_MAP(FEEDBACK_VECTOR_TYPE, feedback_vector)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
Context::NUMBER_FUNCTION_INDEX)
@@ -449,8 +457,6 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, one_closure_cell)
roots.one_closure_cell_map()->mark_unstable();
ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, many_closures_cell)
- ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, no_feedback_cell)
- roots.no_feedback_cell_map()->mark_unstable();
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
@@ -548,8 +554,8 @@ bool Heap::CreateInitialMaps() {
{
// Empty array boilerplate description
- AllocationResult alloc =
- Allocate(roots.array_boilerplate_description_map(), RO_SPACE);
+ AllocationResult alloc = Allocate(roots.array_boilerplate_description_map(),
+ AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
ArrayBoilerplateDescription::cast(obj)->set_constant_elements(
@@ -561,14 +567,16 @@ bool Heap::CreateInitialMaps() {
ArrayBoilerplateDescription::cast(obj));
{
- AllocationResult allocation = Allocate(roots.boolean_map(), RO_SPACE);
+ AllocationResult allocation =
+ Allocate(roots.boolean_map(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_true_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kTrue);
{
- AllocationResult allocation = Allocate(roots.boolean_map(), RO_SPACE);
+ AllocationResult allocation =
+ Allocate(roots.boolean_map(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_false_value(Oddball::cast(obj));
@@ -594,6 +602,17 @@ bool Heap::CreateInitialMaps() {
set_empty_property_array(PropertyArray::cast(obj));
}
+ {
+ if (!AllocateRaw(FixedArray::SizeFor(0), AllocationType::kReadOnly)
+ .To(&obj)) {
+ return false;
+ }
+ obj->set_map_after_allocation(roots.closure_feedback_cell_array_map(),
+ SKIP_WRITE_BARRIER);
+ FixedArray::cast(obj)->set_length(0);
+ set_empty_closure_feedback_cell_array(ClosureFeedbackCellArray::cast(obj));
+ }
+
#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype) \
{ \
FixedTypedArrayBase obj; \
@@ -620,8 +639,9 @@ void Heap::CreateApiObjects() {
set_message_listeners(*TemplateList::New(isolate, 2));
- Handle<InterceptorInfo> info = Handle<InterceptorInfo>::cast(
- isolate->factory()->NewStruct(INTERCEPTOR_INFO_TYPE, TENURED_READ_ONLY));
+ Handle<InterceptorInfo> info =
+ Handle<InterceptorInfo>::cast(isolate->factory()->NewStruct(
+ INTERCEPTOR_INFO_TYPE, AllocationType::kReadOnly));
info->set_flags(0);
set_noop_interceptor_info(*info);
}
@@ -632,18 +652,20 @@ void Heap::CreateInitialObjects() {
ReadOnlyRoots roots(this);
// The -0 value must be set before NewNumber works.
- set_minus_zero_value(*factory->NewHeapNumber(-0.0, TENURED_READ_ONLY));
+ set_minus_zero_value(
+ *factory->NewHeapNumber(-0.0, AllocationType::kReadOnly));
DCHECK(std::signbit(roots.minus_zero_value()->Number()));
set_nan_value(*factory->NewHeapNumber(
- std::numeric_limits<double>::quiet_NaN(), TENURED_READ_ONLY));
- set_hole_nan_value(
- *factory->NewHeapNumberFromBits(kHoleNanInt64, TENURED_READ_ONLY));
- set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, TENURED_READ_ONLY));
+ std::numeric_limits<double>::quiet_NaN(), AllocationType::kReadOnly));
+ set_hole_nan_value(*factory->NewHeapNumberFromBits(
+ kHoleNanInt64, AllocationType::kReadOnly));
+ set_infinity_value(
+ *factory->NewHeapNumber(V8_INFINITY, AllocationType::kReadOnly));
set_minus_infinity_value(
- *factory->NewHeapNumber(-V8_INFINITY, TENURED_READ_ONLY));
+ *factory->NewHeapNumber(-V8_INFINITY, AllocationType::kReadOnly));
- set_hash_seed(*factory->NewByteArray(kInt64Size, TENURED_READ_ONLY));
+ set_hash_seed(*factory->NewByteArray(kInt64Size, AllocationType::kReadOnly));
InitializeHashSeed();
// There's no "current microtask" in the beginning.
@@ -653,8 +675,8 @@ void Heap::CreateInitialObjects() {
set_weak_refs_keep_during_job(roots.undefined_value());
// Allocate cache for single character one byte strings.
- set_single_character_string_cache(
- *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
+ set_single_character_string_cache(*factory->NewFixedArray(
+ String::kMaxOneByteCharCode + 1, AllocationType::kOld));
// Allocate initial string table.
set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
@@ -720,17 +742,17 @@ void Heap::CreateInitialObjects() {
// Initialize the self-reference marker.
set_self_reference_marker(
- *factory->NewSelfReferenceMarker(TENURED_READ_ONLY));
+ *factory->NewSelfReferenceMarker(AllocationType::kReadOnly));
set_interpreter_entry_trampoline_for_profiling(roots.undefined_value());
{
HandleScope scope(isolate());
-#define SYMBOL_INIT(_, name) \
- { \
- Handle<Symbol> symbol( \
- isolate()->factory()->NewPrivateSymbol(TENURED_READ_ONLY)); \
- roots_table()[RootIndex::k##name] = symbol->ptr(); \
+#define SYMBOL_INIT(_, name) \
+ { \
+ Handle<Symbol> symbol( \
+ isolate()->factory()->NewPrivateSymbol(AllocationType::kReadOnly)); \
+ roots_table()[RootIndex::k##name] = symbol->ptr(); \
}
PRIVATE_SYMBOL_LIST_GENERATOR(SYMBOL_INIT, /* not used */)
#undef SYMBOL_INIT
@@ -739,7 +761,7 @@ void Heap::CreateInitialObjects() {
{
HandleScope scope(isolate());
#define SYMBOL_INIT(_, name, description) \
- Handle<Symbol> name = factory->NewSymbol(TENURED_READ_ONLY); \
+ Handle<Symbol> name = factory->NewSymbol(AllocationType::kReadOnly); \
Handle<String> name##d = factory->InternalizeUtf8String(#description); \
name->set_name(*name##d); \
roots_table()[RootIndex::k##name] = name->ptr();
@@ -747,7 +769,7 @@ void Heap::CreateInitialObjects() {
#undef SYMBOL_INIT
#define SYMBOL_INIT(_, name, description) \
- Handle<Symbol> name = factory->NewSymbol(TENURED_READ_ONLY); \
+ Handle<Symbol> name = factory->NewSymbol(AllocationType::kReadOnly); \
Handle<String> name##d = factory->InternalizeUtf8String(#description); \
name->set_is_well_known_symbol(true); \
name->set_name(*name##d); \
@@ -760,7 +782,7 @@ void Heap::CreateInitialObjects() {
}
Handle<NameDictionary> empty_property_dictionary = NameDictionary::New(
- isolate(), 1, TENURED_READ_ONLY, USE_CUSTOM_MINIMUM_CAPACITY);
+ isolate(), 1, AllocationType::kReadOnly, USE_CUSTOM_MINIMUM_CAPACITY);
DCHECK(!empty_property_dictionary->HasSufficientCapacityToAdd(1));
set_empty_property_dictionary(*empty_property_dictionary);
@@ -768,27 +790,23 @@ void Heap::CreateInitialObjects() {
set_api_symbol_table(*empty_property_dictionary);
set_api_private_symbol_table(*empty_property_dictionary);
- set_number_string_cache(
- *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
+ set_number_string_cache(*factory->NewFixedArray(
+ kInitialNumberStringCacheSize * 2, AllocationType::kOld));
// Allocate cache for string split and regexp-multiple.
set_string_split_cache(*factory->NewFixedArray(
- RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
+ RegExpResultsCache::kRegExpResultsCacheSize, AllocationType::kOld));
set_regexp_multiple_cache(*factory->NewFixedArray(
- RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
+ RegExpResultsCache::kRegExpResultsCacheSize, AllocationType::kOld));
// Allocate FeedbackCell for builtins.
Handle<FeedbackCell> many_closures_cell =
factory->NewManyClosuresCell(factory->undefined_value());
set_many_closures_cell(*many_closures_cell);
- // Allocate FeedbackCell for cases where we don't collect feedback.
- Handle<FeedbackCell> no_feedback_cell = factory->NewNoFeedbackCell();
- set_no_feedback_cell(*no_feedback_cell);
-
{
Handle<FixedArray> empty_sloppy_arguments_elements =
- factory->NewFixedArray(2, TENURED_READ_ONLY);
+ factory->NewFixedArray(2, AllocationType::kReadOnly);
empty_sloppy_arguments_elements->set_map_after_allocation(
roots.sloppy_arguments_elements_map(), SKIP_WRITE_BARRIER);
set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
@@ -804,12 +822,12 @@ void Heap::CreateInitialObjects() {
set_script_list(roots.empty_weak_array_list());
Handle<NumberDictionary> slow_element_dictionary = NumberDictionary::New(
- isolate(), 1, TENURED_READ_ONLY, USE_CUSTOM_MINIMUM_CAPACITY);
+ isolate(), 1, AllocationType::kReadOnly, USE_CUSTOM_MINIMUM_CAPACITY);
DCHECK(!slow_element_dictionary->HasSufficientCapacityToAdd(1));
slow_element_dictionary->set_requires_slow_elements();
set_empty_slow_element_dictionary(*slow_element_dictionary);
- set_materialized_objects(*factory->NewFixedArray(0, TENURED));
+ set_materialized_objects(*factory->NewFixedArray(0, AllocationType::kOld));
// Handling of script id generation is in Heap::NextScriptId().
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
@@ -818,7 +836,7 @@ void Heap::CreateInitialObjects() {
// Allocate the empty OrderedHashMap.
Handle<FixedArray> empty_ordered_hash_map = factory->NewFixedArray(
- OrderedHashMap::HashTableStartIndex(), TENURED_READ_ONLY);
+ OrderedHashMap::HashTableStartIndex(), AllocationType::kReadOnly);
empty_ordered_hash_map->set_map_no_write_barrier(
*factory->ordered_hash_map_map());
for (int i = 0; i < empty_ordered_hash_map->length(); ++i) {
@@ -828,7 +846,7 @@ void Heap::CreateInitialObjects() {
// Allocate the empty OrderedHashSet.
Handle<FixedArray> empty_ordered_hash_set = factory->NewFixedArray(
- OrderedHashSet::HashTableStartIndex(), TENURED_READ_ONLY);
+ OrderedHashSet::HashTableStartIndex(), AllocationType::kReadOnly);
empty_ordered_hash_set->set_map_no_write_barrier(
*factory->ordered_hash_set_map());
for (int i = 0; i < empty_ordered_hash_set->length(); ++i) {
@@ -838,7 +856,7 @@ void Heap::CreateInitialObjects() {
// Allocate the empty FeedbackMetadata.
Handle<FeedbackMetadata> empty_feedback_metadata =
- factory->NewFeedbackMetadata(0, TENURED_READ_ONLY);
+ factory->NewFeedbackMetadata(0, 0, AllocationType::kReadOnly);
set_empty_feedback_metadata(*empty_feedback_metadata);
// Allocate the empty script.
@@ -857,7 +875,8 @@ void Heap::CreateInitialObjects() {
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_no_elements_protector(*cell);
- cell = factory->NewPropertyCell(factory->empty_string(), TENURED_READ_ONLY);
+ cell = factory->NewPropertyCell(factory->empty_string(),
+ AllocationType::kReadOnly);
cell->set_value(roots.the_hole_value());
set_empty_property_cell(*cell);
diff --git a/chromium/v8/src/heap/slot-set.h b/chromium/v8/src/heap/slot-set.h
index 2d9fb327bed..894563bacd9 100644
--- a/chromium/v8/src/heap/slot-set.h
+++ b/chromium/v8/src/heap/slot-set.h
@@ -11,13 +11,10 @@
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/bits.h"
+#include "src/objects/compressed-slots.h"
#include "src/objects/slots.h"
#include "src/utils.h"
-#ifdef V8_COMPRESS_POINTERS
-#include "src/ptr-compr.h"
-#endif
-
namespace v8 {
namespace internal {
@@ -268,7 +265,7 @@ class SlotSet : public Malloced {
}
private:
- typedef uint32_t* Bucket;
+ using Bucket = uint32_t*;
static const int kMaxSlots = (1 << kPageSizeBits) / kTaggedSize;
static const int kCellsPerBucket = 32;
static const int kCellsPerBucketLog2 = 5;
diff --git a/chromium/v8/src/heap/spaces-inl.h b/chromium/v8/src/heap/spaces-inl.h
index 86312d82af1..091ab6503f7 100644
--- a/chromium/v8/src/heap/spaces-inl.h
+++ b/chromium/v8/src/heap/spaces-inl.h
@@ -186,8 +186,10 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
added += category->available();
category->Relink();
});
- DCHECK_EQ(page->AvailableInFreeList(),
- page->AvailableInFreeListFromAllocatedBytes());
+
+ DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
+ page->AvailableInFreeList() ==
+ page->AvailableInFreeListFromAllocatedBytes());
return added;
}
diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc
index c2e6ffb54e6..ada97772154 100644
--- a/chromium/v8/src/heap/spaces.cc
+++ b/chromium/v8/src/heap/spaces.cc
@@ -1327,8 +1327,8 @@ static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
return slot_set;
}
-template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
-template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
+template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
+template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
@@ -1543,6 +1543,12 @@ void PagedSpace::RefillFreeList() {
{
Page* p = nullptr;
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
+ // We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
+ // entries here to make them unavailable for allocations.
+ if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
+ p->ForAllFreeListCategories(
+ [](FreeListCategory* category) { category->Reset(); });
+ }
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
@@ -1583,8 +1589,9 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Relinking requires the category to be unlinked.
other->RemovePage(p);
AddPage(p);
- DCHECK_EQ(p->AvailableInFreeList(),
- p->AvailableInFreeListFromAllocatedBytes());
+ DCHECK_IMPLIES(
+ !p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
+ p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
}
DCHECK_EQ(0u, other->Size());
DCHECK_EQ(0u, other->Capacity());
@@ -2896,7 +2903,6 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
void FreeListCategory::Free(Address start, size_t size_in_bytes,
FreeMode mode) {
- DCHECK(page()->CanAllocate());
FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
free_space->set_next(top());
set_top(free_space);
@@ -3255,9 +3261,11 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
static_cast<size_t>(size_in_bytes)))
return true;
}
- } else if (is_local()) {
- // Sweeping not in progress and we are on a {CompactionSpace}. This can
- // only happen when we are evacuating for the young generation.
+ }
+
+ if (is_local()) {
+ // The main thread may have acquired all swept pages. Try to steal from
+ // it. This can only happen during young generation evacuation.
PagedSpace* main_space = heap()->paged_space(identity());
Page* page = main_space->RemovePageSafe(size_in_bytes);
if (page != nullptr) {
@@ -3303,6 +3311,12 @@ void ReadOnlyPage::MakeHeaderRelocatable() {
}
}
+void ReadOnlySpace::Forget() {
+ for (Page* p : *this) {
+ heap()->memory_allocator()->PreFreeMemory(p);
+ }
+}
+
void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
MemoryAllocator* memory_allocator = heap()->memory_allocator();
for (Page* p : *this) {
diff --git a/chromium/v8/src/heap/spaces.h b/chromium/v8/src/heap/spaces.h
index 8fe33574304..ce286ef3909 100644
--- a/chromium/v8/src/heap/spaces.h
+++ b/chromium/v8/src/heap/spaces.h
@@ -233,7 +233,7 @@ class FreeListCategory {
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
};
-class MemoryChunkLayout {
+class V8_EXPORT_PRIVATE MemoryChunkLayout {
public:
static size_t CodePageGuardStartOffset();
static size_t CodePageGuardSize();
@@ -241,7 +241,7 @@ class MemoryChunkLayout {
static intptr_t ObjectEndOffsetInCodePage();
static size_t AllocatableMemoryInCodePage();
static intptr_t ObjectStartOffsetInDataPage();
- V8_EXPORT_PRIVATE static size_t AllocatableMemoryInDataPage();
+ static size_t AllocatableMemoryInDataPage();
static size_t ObjectStartOffsetInMemoryChunk(AllocationSpace space);
static size_t AllocatableMemoryInMemoryChunk(AllocationSpace space);
};
@@ -407,8 +407,7 @@ class MemoryChunk {
static const int kPageSize = 1 << kPageSizeBits;
// Maximum number of nested code memory modification scopes.
- // TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
- static const int kMaxWriteUnprotectCounter = 4;
+ static const int kMaxWriteUnprotectCounter = 3;
static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
@@ -505,7 +504,7 @@ class MemoryChunk {
}
template <RememberedSetType type>
- SlotSet* AllocateSlotSet();
+ V8_EXPORT_PRIVATE SlotSet* AllocateSlotSet();
// Not safe to be called concurrently.
template <RememberedSetType type>
void ReleaseSlotSet();
@@ -517,7 +516,8 @@ class MemoryChunk {
InvalidatedSlots* AllocateInvalidatedSlots();
void ReleaseInvalidatedSlots();
- void RegisterObjectWithInvalidatedSlots(HeapObject object, int size);
+ V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object,
+ int size);
// Updates invalidated_slots after array left-trimming.
void MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start);
@@ -537,7 +537,7 @@ class MemoryChunk {
size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
// Approximate amount of physical memory committed for this chunk.
- size_t CommittedPhysicalMemory();
+ V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
Address HighWaterMark() { return address() + high_water_mark_; }
@@ -647,8 +647,7 @@ class MemoryChunk {
return InYoungGeneration() && IsLargePage();
}
bool InOldSpace() const;
- bool InLargeObjectSpace() const;
-
+ V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
Space* owner() const { return owner_; }
@@ -660,9 +659,9 @@ class MemoryChunk {
// MemoryChunk::synchronized_heap() to simulate the barrier.
void InitializationMemoryFence();
- void SetReadable();
- void SetReadAndExecutable();
- void SetReadAndWritable();
+ V8_EXPORT_PRIVATE void SetReadable();
+ V8_EXPORT_PRIVATE void SetReadAndExecutable();
+ V8_EXPORT_PRIVATE void SetReadAndWritable();
void SetDefaultCodePermissions() {
if (FLAG_jitless) {
@@ -975,7 +974,7 @@ class LargePage : public MemoryChunk {
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces.
-class Space : public Malloced {
+class V8_EXPORT_PRIVATE Space : public Malloced {
public:
Space(Heap* heap, AllocationSpace id)
: allocation_observers_paused_(false),
@@ -1006,19 +1005,17 @@ class Space : public Malloced {
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
- const char* name() { return AllocationSpaceName(id_); }
+ const char* name() { return Heap::GetSpaceName(id_); }
- V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
- AllocationObserver* observer);
+ virtual void AddAllocationObserver(AllocationObserver* observer);
- V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
- AllocationObserver* observer);
+ virtual void RemoveAllocationObserver(AllocationObserver* observer);
- V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
+ virtual void PauseAllocationObservers();
- V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
+ virtual void ResumeAllocationObservers();
- V8_EXPORT_PRIVATE virtual void StartNextInlineAllocationStep() {}
+ virtual void StartNextInlineAllocationStep() {}
void AllocationStep(int bytes_since_last, Address soon_object, int size);
@@ -1076,7 +1073,7 @@ class Space : public Malloced {
return external_backing_store_bytes_[type];
}
- V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
+ void* GetRandomMmapAddr();
MemoryChunk* first_page() { return memory_chunk_list_.front(); }
MemoryChunk* last_page() { return memory_chunk_list_.back(); }
@@ -1115,7 +1112,6 @@ class Space : public Malloced {
DISALLOW_COPY_AND_ASSIGN(Space);
};
-
class MemoryChunkValidator {
// Computed offsets should match the compiler generated ones.
STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
@@ -1205,7 +1201,7 @@ class SkipList {
// A space acquires chunks of memory from the operating system. The memory
// allocator allocates and deallocates pages for the paged heap spaces and large
// pages for large object space.
-class V8_EXPORT_PRIVATE MemoryAllocator {
+class MemoryAllocator {
public:
// Unmapper takes care of concurrently unmapping and uncommitting memory
// chunks.
@@ -1251,10 +1247,10 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
V8_EXPORT_PRIVATE void FreeQueuedChunks();
void CancelAndWaitForPendingTasks();
void PrepareForGC();
- void EnsureUnmappingCompleted();
+ V8_EXPORT_PRIVATE void EnsureUnmappingCompleted();
V8_EXPORT_PRIVATE void TearDown();
size_t NumberOfCommittedChunks();
- int NumberOfChunks();
+ V8_EXPORT_PRIVATE int NumberOfChunks();
size_t CommittedBufferedMemory();
private:
@@ -1320,18 +1316,18 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
kPooledAndQueue,
};
- static intptr_t GetCommitPageSize();
+ V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize();
// Computes the memory area of discardable memory within a given memory area
// [addr, addr+size) and returns the result as base::AddressRegion. If the
// memory is not discardable base::AddressRegion is an empty region.
- static base::AddressRegion ComputeDiscardMemoryArea(Address addr,
- size_t size);
+ V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
+ Address addr, size_t size);
- MemoryAllocator(Isolate* isolate, size_t max_capacity,
- size_t code_range_size);
+ V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate, size_t max_capacity,
+ size_t code_range_size);
- void TearDown();
+ V8_EXPORT_PRIVATE void TearDown();
// Allocates a Page from the allocator. AllocationMode is used to indicate
// whether pooled allocation, which only works for MemoryChunk::kPageSize,
@@ -1370,8 +1366,10 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
- MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
- Executability executable, Space* space);
+ V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
+ size_t commit_area_size,
+ Executability executable,
+ Space* space);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
size_t alignment, Executability executable,
@@ -1553,23 +1551,32 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
-extern template Page*
-MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- size_t size, PagedSpace* owner, Executability executable);
-extern template Page*
-MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
-extern template Page*
-MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- size_t size, SemiSpace* owner, Executability executable);
+extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+ size_t size, PagedSpace* owner, Executability executable);
+extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+
+extern template EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) void MemoryAllocator::
+ Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
+extern template EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) void MemoryAllocator::
+ Free<MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
+extern template EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) void MemoryAllocator::
+ Free<MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
+extern template EXPORT_TEMPLATE_DECLARE(
+ V8_EXPORT_PRIVATE) void MemoryAllocator::
+ Free<MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
// -----------------------------------------------------------------------------
// Interface for heap object iterator to be implemented by all object space
// object iterators.
-//
-// NOTE: The space specific object iterators also implements the own next()
-// method which is used to avoid using virtual functions
-// iterating a specific space.
class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
public:
@@ -1597,12 +1604,12 @@ class PageIteratorImpl
PAGE_TYPE* p_;
};
-typedef PageIteratorImpl<Page> PageIterator;
-typedef PageIteratorImpl<LargePage> LargePageIterator;
+using PageIterator = PageIteratorImpl<Page>;
+using LargePageIterator = PageIteratorImpl<LargePage>;
class PageRange {
public:
- typedef PageIterator iterator;
+ using iterator = PageIterator;
PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
inline PageRange(Address start, Address limit);
@@ -1803,7 +1810,7 @@ class AllocationStats {
// words in size.
// At least 16384 words (huge): This list is for objects of 2048 words or
// larger. Empty pages are also added to this list.
-class V8_EXPORT_PRIVATE FreeList {
+class FreeList {
public:
// This method returns how much memory can be allocated after freeing
// maximum_freed memory.
@@ -1885,7 +1892,7 @@ class V8_EXPORT_PRIVATE FreeList {
// Used after booting the VM.
void RepairLists(Heap* heap);
- size_t EvictFreeListItems(Page* page);
+ V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
bool ContainsPageFreeListItems(Page* page);
size_t wasted_bytes() { return wasted_bytes_; }
@@ -1908,7 +1915,7 @@ class V8_EXPORT_PRIVATE FreeList {
}
bool AddCategory(FreeListCategory* category);
- void RemoveCategory(FreeListCategory* category);
+ V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
void PrintCategories(FreeListCategoryType type);
// Returns a page containing an entry for a given type, or nullptr otherwise.
@@ -1949,7 +1956,7 @@ class V8_EXPORT_PRIVATE FreeList {
static const size_t kTinyListMax = 0x1f * kTaggedSize;
static const size_t kSmallListMax = 0xff * kTaggedSize;
static const size_t kMediumListMax = 0x7ff * kTaggedSize;
- static const size_t kLargeListMax = 0x2fff * kTaggedSize;
+ static const size_t kLargeListMax = 0x1fff * kTaggedSize;
static const size_t kTinyAllocationMax = kTiniestListMax;
static const size_t kSmallAllocationMax = kTinyListMax;
static const size_t kMediumAllocationMax = kSmallListMax;
@@ -2044,11 +2051,11 @@ class LocalAllocationBuffer {
inline bool TryFreeLast(HeapObject object, int object_size);
// Close a LAB, effectively invalidating it. Returns the unused area.
- LinearAllocationArea Close();
+ V8_EXPORT_PRIVATE LinearAllocationArea Close();
private:
- LocalAllocationBuffer(Heap* heap,
- LinearAllocationArea allocation_info) V8_NOEXCEPT;
+ V8_EXPORT_PRIVATE LocalAllocationBuffer(
+ Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT;
Heap* heap_;
LinearAllocationArea allocation_info_;
@@ -2113,7 +2120,7 @@ class SpaceWithLinearArea : public Space {
class V8_EXPORT_PRIVATE PagedSpace
: NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
- typedef PageIterator iterator;
+ using iterator = PageIterator;
static const size_t kCompactionMemoryWanted = 500 * KB;
@@ -2424,7 +2431,7 @@ enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
// space as a marking stack when tracing live objects.
class SemiSpace : public Space {
public:
- typedef PageIterator iterator;
+ using iterator = PageIterator;
static void Swap(SemiSpace* from, SemiSpace* to);
@@ -2540,7 +2547,7 @@ class SemiSpace : public Space {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
#ifdef DEBUG
- void Print() override;
+ V8_EXPORT_PRIVATE void Print() override;
// Validate a range of of addresses in a SemiSpace.
// The "from" address must be on a page prior to the "to" address,
// in the linked page order, or it must be earlier on the same page.
@@ -2616,9 +2623,10 @@ class SemiSpaceIterator : public ObjectIterator {
// The new space consists of a contiguous pair of semispaces. It simply
// forwards most functions to the appropriate semispace.
-class NewSpace : public SpaceWithLinearArea {
+class V8_EXPORT_PRIVATE NewSpace
+ : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
public:
- typedef PageIterator iterator;
+ using iterator = PageIterator;
NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity, size_t max_semispace_capacity);
@@ -2856,7 +2864,7 @@ class NewSpace : public SpaceWithLinearArea {
friend class SemiSpaceIterator;
};
-class PauseAllocationObserversScope {
+class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
public:
explicit PauseAllocationObserversScope(Heap* heap);
~PauseAllocationObserversScope();
@@ -2887,7 +2895,6 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
int size_in_bytes) override;
};
-
// A collection of |CompactionSpace|s used by a single compaction task.
class CompactionSpaceCollection : public Malloced {
public:
@@ -2991,8 +2998,11 @@ class ReadOnlySpace : public PagedSpace {
bool writable() const { return !is_marked_read_only_; }
- void ClearStringPaddingIfNeeded();
+ V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
void MarkAsReadOnly();
+ // Make the heap forget the space for memory bookkeeping purposes
+ // (e.g. prevent space's memory from registering as leaked).
+ void Forget();
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
@@ -3017,7 +3027,7 @@ class ReadOnlySpace : public PagedSpace {
class LargeObjectSpace : public Space {
public:
- typedef LargePageIterator iterator;
+ using iterator = LargePageIterator;
explicit LargeObjectSpace(Heap* heap);
LargeObjectSpace(Heap* heap, AllocationSpace id);
@@ -3050,7 +3060,7 @@ class LargeObjectSpace : public Space {
void PromoteNewLargeObject(LargePage* page);
// Checks whether a heap object is in this space; O(1).
- bool Contains(HeapObject obj);
+ V8_EXPORT_PRIVATE bool Contains(HeapObject obj);
// Checks whether an address is in the object area in this space. Iterates
// all objects in the space. May be slow.
bool ContainsSlow(Address addr);
@@ -3098,7 +3108,8 @@ class NewLargeObjectSpace : public LargeObjectSpace {
public:
NewLargeObjectSpace(Heap* heap, size_t capacity);
- V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateRaw(int object_size);
// Available bytes for objects in this space.
size_t Available() override;
diff --git a/chromium/v8/src/heap/sweeper.h b/chromium/v8/src/heap/sweeper.h
index ff806a0af62..cd459323489 100644
--- a/chromium/v8/src/heap/sweeper.h
+++ b/chromium/v8/src/heap/sweeper.h
@@ -23,9 +23,9 @@ enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
class Sweeper {
public:
- typedef std::vector<Page*> IterabilityList;
- typedef std::deque<Page*> SweepingList;
- typedef std::vector<Page*> SweptList;
+ using IterabilityList = std::vector<Page*>;
+ using SweepingList = std::deque<Page*>;
+ using SweptList = std::vector<Page*>;
// Pauses the sweeper tasks or completes sweeping.
class PauseOrCompleteScope final {
@@ -96,7 +96,7 @@ class Sweeper {
// and the main thread can sweep lazily, but the background sweeper tasks
// are not running yet.
void StartSweeping();
- void StartSweeperTasks();
+ V8_EXPORT_PRIVATE void StartSweeperTasks();
void EnsureCompleted();
bool AreSweeperTasksRunning();
diff --git a/chromium/v8/src/heap/worklist.h b/chromium/v8/src/heap/worklist.h
index c086b87e599..82a278a0429 100644
--- a/chromium/v8/src/heap/worklist.h
+++ b/chromium/v8/src/heap/worklist.h
@@ -51,6 +51,8 @@ class Worklist {
return worklist_->LocalPushSegmentSize(task_id_);
}
+ void FlushToGlobal() { worklist_->FlushToGlobal(task_id_); }
+
private:
Worklist<EntryType, SEGMENT_SIZE>* worklist_;
int task_id_;