summaryrefslogtreecommitdiff
path: root/chromium/v8/src/heap
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-01-20 13:40:20 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-01-22 12:41:23 +0000
commit7961cea6d1041e3e454dae6a1da660b453efd238 (patch)
treec0eeb4a9ff9ba32986289c1653d9608e53ccb444 /chromium/v8/src/heap
parentb7034d0803538058e5c9d904ef03cf5eab34f6ef (diff)
downloadqtwebengine-chromium-7961cea6d1041e3e454dae6a1da660b453efd238.tar.gz
BASELINE: Update Chromium to 78.0.3904.130
Change-Id: If185e0c0061b3437531c97c9c8c78f239352a68b Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/heap')
-rw-r--r--chromium/v8/src/heap/array-buffer-tracker-inl.h3
-rw-r--r--chromium/v8/src/heap/array-buffer-tracker.cc3
-rw-r--r--chromium/v8/src/heap/embedder-tracing.h6
-rw-r--r--chromium/v8/src/heap/factory-inl.h18
-rw-r--r--chromium/v8/src/heap/factory.cc52
-rw-r--r--chromium/v8/src/heap/factory.h23
-rw-r--r--chromium/v8/src/heap/gc-tracer.cc70
-rw-r--r--chromium/v8/src/heap/gc-tracer.h27
-rw-r--r--chromium/v8/src/heap/heap-inl.h14
-rw-r--r--chromium/v8/src/heap/heap.cc182
-rw-r--r--chromium/v8/src/heap/heap.h42
-rw-r--r--chromium/v8/src/heap/invalidated-slots-inl.h42
-rw-r--r--chromium/v8/src/heap/invalidated-slots.cc59
-rw-r--r--chromium/v8/src/heap/invalidated-slots.h33
-rw-r--r--chromium/v8/src/heap/local-allocator-inl.h11
-rw-r--r--chromium/v8/src/heap/local-allocator.h2
-rw-r--r--chromium/v8/src/heap/mark-compact.cc52
-rw-r--r--chromium/v8/src/heap/object-stats.cc63
-rw-r--r--chromium/v8/src/heap/object-stats.h3
-rw-r--r--chromium/v8/src/heap/remembered-set.h4
-rw-r--r--chromium/v8/src/heap/scavenger-inl.h8
-rw-r--r--chromium/v8/src/heap/scavenger.cc18
-rw-r--r--chromium/v8/src/heap/setup-heap-internal.cc14
-rw-r--r--chromium/v8/src/heap/slot-set.h12
-rw-r--r--chromium/v8/src/heap/spaces-inl.h121
-rw-r--r--chromium/v8/src/heap/spaces.cc601
-rw-r--r--chromium/v8/src/heap/spaces.h419
-rw-r--r--chromium/v8/src/heap/store-buffer-inl.h10
-rw-r--r--chromium/v8/src/heap/store-buffer.cc35
-rw-r--r--chromium/v8/src/heap/store-buffer.h30
-rw-r--r--chromium/v8/src/heap/sweeper.cc66
-rw-r--r--chromium/v8/src/heap/sweeper.h23
32 files changed, 1404 insertions, 662 deletions
diff --git a/chromium/v8/src/heap/array-buffer-tracker-inl.h b/chromium/v8/src/heap/array-buffer-tracker-inl.h
index 65d3f4a732a..763300cffe2 100644
--- a/chromium/v8/src/heap/array-buffer-tracker-inl.h
+++ b/chromium/v8/src/heap/array-buffer-tracker-inl.h
@@ -80,8 +80,7 @@ void LocalArrayBufferTracker::Free(Callback should_free) {
ExternalBackingStoreType::kArrayBuffer, freed_memory);
// TODO(wez): Remove backing-store from external memory accounting.
- page_->heap()->update_external_memory_concurrently_freed(
- static_cast<intptr_t>(freed_memory));
+ page_->heap()->update_external_memory_concurrently_freed(freed_memory);
}
}
diff --git a/chromium/v8/src/heap/array-buffer-tracker.cc b/chromium/v8/src/heap/array-buffer-tracker.cc
index 0c04d7b6ae2..fdca6e8df27 100644
--- a/chromium/v8/src/heap/array-buffer-tracker.cc
+++ b/chromium/v8/src/heap/array-buffer-tracker.cc
@@ -68,8 +68,7 @@ void LocalArrayBufferTracker::Process(Callback callback) {
page_->DecrementExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer, freed_memory);
// TODO(wez): Remove backing-store from external memory accounting.
- page_->heap()->update_external_memory_concurrently_freed(
- static_cast<intptr_t>(freed_memory));
+ page_->heap()->update_external_memory_concurrently_freed(freed_memory);
}
array_buffers_.swap(kept_array_buffers);
diff --git a/chromium/v8/src/heap/embedder-tracing.h b/chromium/v8/src/heap/embedder-tracing.h
index eae29cbf5ce..7c67ccfab71 100644
--- a/chromium/v8/src/heap/embedder-tracing.h
+++ b/chromium/v8/src/heap/embedder-tracing.h
@@ -57,6 +57,12 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) {
return !InUse() || remote_tracer_->IsRootForNonTracingGC(handle);
}
+ void ResetHandleInNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) {
+ // Resetting is only called when IsRootForNonTracingGC returns false which
+ // can only happen the EmbedderHeapTracer is set on API level.
+ DCHECK(InUse());
+ remote_tracer_->ResetHandleInNonTracingGC(handle);
+ }
void NotifyV8MarkingWorklistWasEmpty() {
num_v8_marking_worklist_was_empty_++;
diff --git a/chromium/v8/src/heap/factory-inl.h b/chromium/v8/src/heap/factory-inl.h
index 9aa705047c8..bcad5d27141 100644
--- a/chromium/v8/src/heap/factory-inl.h
+++ b/chromium/v8/src/heap/factory-inl.h
@@ -71,13 +71,6 @@ Handle<HeapNumber> Factory::NewHeapNumber(double value,
return heap_number;
}
-Handle<MutableHeapNumber> Factory::NewMutableHeapNumber(
- double value, AllocationType allocation) {
- Handle<MutableHeapNumber> number = NewMutableHeapNumber(allocation);
- number->set_value(value);
- return number;
-}
-
Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits,
AllocationType allocation) {
Handle<HeapNumber> heap_number = NewHeapNumber(allocation);
@@ -85,16 +78,9 @@ Handle<HeapNumber> Factory::NewHeapNumberFromBits(uint64_t bits,
return heap_number;
}
-Handle<MutableHeapNumber> Factory::NewMutableHeapNumberFromBits(
- uint64_t bits, AllocationType allocation) {
- Handle<MutableHeapNumber> number = NewMutableHeapNumber(allocation);
- number->set_value_as_bits(bits);
- return number;
-}
-
-Handle<MutableHeapNumber> Factory::NewMutableHeapNumberWithHoleNaN(
+Handle<HeapNumber> Factory::NewHeapNumberWithHoleNaN(
AllocationType allocation) {
- return NewMutableHeapNumberFromBits(kHoleNanInt64, allocation);
+ return NewHeapNumberFromBits(kHoleNanInt64, allocation);
}
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
diff --git a/chromium/v8/src/heap/factory.cc b/chromium/v8/src/heap/factory.cc
index 19c36656225..9bf46be6e81 100644
--- a/chromium/v8/src/heap/factory.cc
+++ b/chromium/v8/src/heap/factory.cc
@@ -285,11 +285,12 @@ HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
}
Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
- AllocationType allocation) {
+ AllocationType allocation,
+ AllocationOrigin origin) {
AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
Heap* heap = isolate()->heap();
HeapObject result =
- heap->AllocateRawWithRetryOrFail(size, allocation, alignment);
+ heap->AllocateRawWithRetryOrFail(size, allocation, origin, alignment);
heap->CreateFillerObjectAt(result.address(), size, ClearRecordedSlots::kNo);
return Handle<HeapObject>(result, isolate());
}
@@ -685,16 +686,19 @@ Handle<SmallOrderedNameDictionary> Factory::NewSmallOrderedNameDictionary(
}
Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
- return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kMinCapacity);
+ return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kMinCapacity)
+ .ToHandleChecked();
}
Handle<OrderedHashMap> Factory::NewOrderedHashMap() {
- return OrderedHashMap::Allocate(isolate(), OrderedHashMap::kMinCapacity);
+ return OrderedHashMap::Allocate(isolate(), OrderedHashMap::kMinCapacity)
+ .ToHandleChecked();
}
Handle<OrderedNameDictionary> Factory::NewOrderedNameDictionary() {
return OrderedNameDictionary::Allocate(isolate(),
- OrderedNameDictionary::kMinCapacity);
+ OrderedNameDictionary::kMinCapacity)
+ .ToHandleChecked();
}
Handle<AccessorPair> Factory::NewAccessorPair() {
@@ -1744,16 +1748,6 @@ Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
return microtask;
}
-Handle<FinalizationGroupCleanupJobTask>
-Factory::NewFinalizationGroupCleanupJobTask(
- Handle<JSFinalizationGroup> finalization_group) {
- Handle<FinalizationGroupCleanupJobTask> microtask =
- Handle<FinalizationGroupCleanupJobTask>::cast(
- NewStruct(FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE));
- microtask->set_finalization_group(*finalization_group);
- return microtask;
-}
-
Handle<Foreign> Factory::NewForeign(Address addr, AllocationType allocation) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
@@ -2010,7 +2004,8 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
HeapObject raw_clone = isolate()->heap()->AllocateRawWithRetryOrFail(
adjusted_object_size, AllocationType::kYoung);
- DCHECK(Heap::InYoungGeneration(raw_clone));
+ DCHECK(Heap::InYoungGeneration(raw_clone) || FLAG_single_generation);
+
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
Heap::CopyBlock(raw_clone.address(), source->address(), object_size);
@@ -2234,13 +2229,10 @@ Handle<HeapNumber> Factory::NewHeapNumber(AllocationType allocation) {
return handle(HeapNumber::cast(result), isolate());
}
-Handle<MutableHeapNumber> Factory::NewMutableHeapNumber(
- AllocationType allocation) {
- STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
- Map map = *mutable_heap_number_map();
- HeapObject result = AllocateRawWithImmortalMap(
- MutableHeapNumber::kSize, allocation, map, kDoubleUnaligned);
- return handle(MutableHeapNumber::cast(result), isolate());
+Handle<HeapNumber> Factory::NewHeapNumberForCodeAssembler(double value) {
+ return NewHeapNumber(value, isolate()->heap()->CanAllocateInReadOnlySpace()
+ ? AllocationType::kReadOnly
+ : AllocationType::kOld);
}
Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
@@ -2518,7 +2510,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
NewFunction(initial_map, info, context, allocation);
// Give compiler a chance to pre-initialize.
- Compiler::PostInstantiation(result, allocation);
+ Compiler::PostInstantiation(result);
return result;
}
@@ -2550,14 +2542,15 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
result->set_raw_feedback_cell(*feedback_cell);
// Give compiler a chance to pre-initialize.
- Compiler::PostInstantiation(result, allocation);
+ Compiler::PostInstantiation(result);
return result;
}
-Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
+Handle<ScopeInfo> Factory::NewScopeInfo(int length, AllocationType type) {
+ DCHECK(type == AllocationType::kOld || type == AllocationType::kReadOnly);
return NewFixedArrayWithMap<ScopeInfo>(RootIndex::kScopeInfoMap, length,
- AllocationType::kOld);
+ type);
}
Handle<SourceTextModuleInfo> Factory::NewSourceTextModuleInfo() {
@@ -3716,6 +3709,7 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
Handle<Object> type_name = undefined_value();
Handle<Object> eval_origin = frame->GetEvalOrigin();
Handle<Object> wasm_module_name = frame->GetWasmModuleName();
+ Handle<Object> wasm_instance = frame->GetWasmInstance();
// MethodName and TypeName are expensive to look up, so they are only
// included when they are strictly needed by the stack trace
@@ -3751,6 +3745,7 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
info->set_type_name(*type_name);
info->set_eval_origin(*eval_origin);
info->set_wasm_module_name(*wasm_module_name);
+ info->set_wasm_instance(*wasm_instance);
info->set_is_eval(frame->IsEval());
info->set_is_constructor(is_constructor);
@@ -3904,9 +3899,12 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags));
store->set(JSRegExp::kIrregexpLatin1CodeIndex, uninitialized);
store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
+ store->set(JSRegExp::kIrregexpLatin1BytecodeIndex, uninitialized);
+ store->set(JSRegExp::kIrregexpUC16BytecodeIndex, uninitialized);
store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::kZero);
store->set(JSRegExp::kIrregexpCaptureCountIndex, Smi::FromInt(capture_count));
store->set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
+ store->set(JSRegExp::kIrregexpTierUpTicksIndex, Smi::kZero);
regexp->set_data(*store);
}
diff --git a/chromium/v8/src/heap/factory.h b/chromium/v8/src/heap/factory.h
index 3ccbe6856f8..1e47926e8e4 100644
--- a/chromium/v8/src/heap/factory.h
+++ b/chromium/v8/src/heap/factory.h
@@ -37,7 +37,6 @@ class ArrayBoilerplateDescription;
class CoverageInfo;
class DebugInfo;
class EnumCache;
-class FinalizationGroupCleanupJobTask;
class FreshlyAllocatedBigInt;
class Isolate;
class JSArrayBufferView;
@@ -478,8 +477,6 @@ class V8_EXPORT_PRIVATE Factory {
Handle<PromiseResolveThenableJobTask> NewPromiseResolveThenableJobTask(
Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> then,
Handle<JSReceiver> thenable, Handle<Context> context);
- Handle<FinalizationGroupCleanupJobTask> NewFinalizationGroupCleanupJobTask(
- Handle<JSFinalizationGroup> finalization_group);
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(
@@ -521,8 +518,9 @@ class V8_EXPORT_PRIVATE Factory {
// Allocate a block of memory of the given AllocationType (filled with a
// filler). Used as a fall-back for generated code when the space is full.
- Handle<HeapObject> NewFillerObject(int size, bool double_align,
- AllocationType allocation);
+ Handle<HeapObject> NewFillerObject(
+ int size, bool double_align, AllocationType allocation,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@@ -589,13 +587,11 @@ class V8_EXPORT_PRIVATE Factory {
Handle<HeapNumber> NewHeapNumber(
AllocationType allocation = AllocationType::kYoung);
- Handle<MutableHeapNumber> NewMutableHeapNumber(
- AllocationType allocation = AllocationType::kYoung);
- inline Handle<MutableHeapNumber> NewMutableHeapNumber(
- double value, AllocationType allocation = AllocationType::kYoung);
- inline Handle<MutableHeapNumber> NewMutableHeapNumberFromBits(
- uint64_t bits, AllocationType allocation = AllocationType::kYoung);
- inline Handle<MutableHeapNumber> NewMutableHeapNumberWithHoleNaN(
+ // Creates a new HeapNumber in read-only space if possible otherwise old
+ // space.
+ Handle<HeapNumber> NewHeapNumberForCodeAssembler(double value);
+
+ inline Handle<HeapNumber> NewHeapNumberWithHoleNaN(
AllocationType allocation = AllocationType::kYoung);
// Allocates a new BigInt with {length} digits. Only to be used by
@@ -771,7 +767,8 @@ class V8_EXPORT_PRIVATE Factory {
AllocationType allocation = AllocationType::kOld);
// Create a serialized scope info.
- Handle<ScopeInfo> NewScopeInfo(int length);
+ Handle<ScopeInfo> NewScopeInfo(int length,
+ AllocationType type = AllocationType::kOld);
Handle<SourceTextModuleInfo> NewSourceTextModuleInfo();
diff --git a/chromium/v8/src/heap/gc-tracer.cc b/chromium/v8/src/heap/gc-tracer.cc
index 77e6b999970..85152c7bfe2 100644
--- a/chromium/v8/src/heap/gc-tracer.cc
+++ b/chromium/v8/src/heap/gc-tracer.cc
@@ -26,6 +26,9 @@ static size_t CountTotalHolesSize(Heap* heap) {
}
return holes_size;
}
+WorkerThreadRuntimeCallStats* GCTracer::worker_thread_runtime_call_stats() {
+ return heap_->isolate()->counters()->worker_thread_runtime_call_stats();
+}
RuntimeCallCounterId GCTracer::RCSCounterFromScope(Scope::ScopeId id) {
STATIC_ASSERT(Scope::FIRST_SCOPE == Scope::MC_INCREMENTAL);
@@ -34,10 +37,20 @@ RuntimeCallCounterId GCTracer::RCSCounterFromScope(Scope::ScopeId id) {
static_cast<int>(id));
}
+RuntimeCallCounterId GCTracer::RCSCounterFromBackgroundScope(
+ BackgroundScope::ScopeId id) {
+ STATIC_ASSERT(Scope::FIRST_BACKGROUND_SCOPE ==
+ Scope::BACKGROUND_ARRAY_BUFFER_FREE);
+ STATIC_ASSERT(
+ 0 == static_cast<int>(BackgroundScope::BACKGROUND_ARRAY_BUFFER_FREE));
+ return static_cast<RuntimeCallCounterId>(
+ static_cast<int>(RCSCounterFromScope(Scope::FIRST_BACKGROUND_SCOPE)) +
+ static_cast<int>(id));
+}
+
GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
: tracer_(tracer), scope_(scope) {
start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
- // TODO(cbruni): remove once we fully moved to a trace-based system.
if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
runtime_stats_ = tracer_->heap_->isolate()->counters()->runtime_call_stats();
runtime_stats_->Enter(&timer_, GCTracer::RCSCounterFromScope(scope));
@@ -46,30 +59,25 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
GCTracer::Scope::~Scope() {
tracer_->AddScopeSample(
scope_, tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_);
- // TODO(cbruni): remove once we fully moved to a trace-based system.
if (V8_LIKELY(runtime_stats_ == nullptr)) return;
runtime_stats_->Leave(&timer_);
}
-GCTracer::BackgroundScope::BackgroundScope(GCTracer* tracer, ScopeId scope)
- : tracer_(tracer), scope_(scope), runtime_stats_enabled_(false) {
+GCTracer::BackgroundScope::BackgroundScope(GCTracer* tracer, ScopeId scope,
+ RuntimeCallStats* runtime_stats)
+ : tracer_(tracer), scope_(scope), runtime_stats_(runtime_stats) {
start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
- // TODO(cbruni): remove once we fully moved to a trace-based system.
if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
- timer_.Start(&counter_, nullptr);
- runtime_stats_enabled_ = true;
+ runtime_stats_->Enter(&timer_,
+ GCTracer::RCSCounterFromBackgroundScope(scope));
}
GCTracer::BackgroundScope::~BackgroundScope() {
double duration_ms =
tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_;
- // TODO(cbruni): remove once we fully moved to a trace-based system.
- if (V8_LIKELY(!runtime_stats_enabled_)) {
- tracer_->AddBackgroundScopeSample(scope_, duration_ms, nullptr);
- } else {
- timer_.Stop();
- tracer_->AddBackgroundScopeSample(scope_, duration_ms, &counter_);
- }
+ tracer_->AddBackgroundScopeSample(scope_, duration_ms);
+ if (V8_LIKELY(runtime_stats_ == nullptr)) return;
+ runtime_stats_->Leave(&timer_);
}
const char* GCTracer::Scope::Name(ScopeId id) {
@@ -170,7 +178,6 @@ GCTracer::GCTracer(Heap* heap)
current_.end_time = heap_->MonotonicallyIncreasingTimeInMs();
for (int i = 0; i < BackgroundScope::NUMBER_OF_SCOPES; i++) {
background_counter_[i].total_duration_ms = 0;
- background_counter_[i].runtime_call_counter = RuntimeCallCounter(nullptr);
}
}
@@ -204,7 +211,6 @@ void GCTracer::ResetForTesting() {
base::MutexGuard guard(&background_counter_mutex_);
for (int i = 0; i < BackgroundScope::NUMBER_OF_SCOPES; i++) {
background_counter_[i].total_duration_ms = 0;
- background_counter_[i].runtime_call_counter.Reset();
}
}
@@ -391,6 +397,12 @@ void GCTracer::NotifySweepingCompleted() {
"FreeLists statistics after sweeping completed:\n");
heap_->PrintFreeListsStats();
}
+ if (FLAG_trace_allocations_origins) {
+ heap_->new_space()->PrintAllocationsOrigins();
+ heap_->old_space()->PrintAllocationsOrigins();
+ heap_->code_space()->PrintAllocationsOrigins();
+ heap_->map_space()->PrintAllocationsOrigins();
+ }
}
void GCTracer::SampleAllocation(double current_ms,
@@ -1138,30 +1150,13 @@ void GCTracer::FetchBackgroundCounters(int first_global_scope,
background_counter_[first_background_scope + i].total_duration_ms;
background_counter_[first_background_scope + i].total_duration_ms = 0;
}
- if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
- RuntimeCallStats* runtime_stats =
- heap_->isolate()->counters()->runtime_call_stats();
- if (!runtime_stats) return;
- for (int i = 0; i < background_mc_scopes; i++) {
- runtime_stats
- ->GetCounter(GCTracer::RCSCounterFromScope(
- static_cast<Scope::ScopeId>(first_global_scope + i)))
- ->Add(&background_counter_[first_background_scope + i]
- .runtime_call_counter);
- background_counter_[first_background_scope + i]
- .runtime_call_counter.Reset();
- }
}
-void GCTracer::AddBackgroundScopeSample(
- BackgroundScope::ScopeId scope, double duration,
- RuntimeCallCounter* runtime_call_counter) {
+void GCTracer::AddBackgroundScopeSample(BackgroundScope::ScopeId scope,
+ double duration) {
base::MutexGuard guard(&background_counter_mutex_);
BackgroundCounter& counter = background_counter_[scope];
counter.total_duration_ms += duration;
- if (runtime_call_counter) {
- counter.runtime_call_counter.Add(runtime_call_counter);
- }
}
void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
@@ -1197,10 +1192,7 @@ void GCTracer::RecordGCPhasesHistograms(TimedHistogram* gc_timer) {
DCHECK_GT(overall_marking_time, 0.0);
const double overall_v8_marking_time =
overall_marking_time -
- current_.scopes[Scope::MC_MARK_EMBEDDER_PROLOGUE] -
- current_.scopes[Scope::MC_MARK_EMBEDDER_TRACING] -
- current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE] -
- current_.scopes[Scope::MC_INCREMENTAL_EMBEDDER_TRACING];
+ current_.scopes[Scope::MC_MARK_EMBEDDER_TRACING];
DCHECK_GT(overall_v8_marking_time, 0.0);
const int main_thread_marking_throughput_mb_per_s =
static_cast<int>(static_cast<double>(heap_->SizeOfObjects()) /
diff --git a/chromium/v8/src/heap/gc-tracer.h b/chromium/v8/src/heap/gc-tracer.h
index ec54b6c1ab6..454bb9ff179 100644
--- a/chromium/v8/src/heap/gc-tracer.h
+++ b/chromium/v8/src/heap/gc-tracer.h
@@ -31,9 +31,12 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), \
GCTracer::Scope::Name(gc_tracer_scope_id))
-#define TRACE_BACKGROUND_GC(tracer, scope_id) \
- GCTracer::BackgroundScope background_scope(tracer, scope_id); \
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), \
+#define TRACE_BACKGROUND_GC(tracer, scope_id) \
+ WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope( \
+ tracer->worker_thread_runtime_call_stats()); \
+ GCTracer::BackgroundScope background_scope(tracer, scope_id, \
+ runtime_call_stats_scope.Get()); \
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), \
GCTracer::BackgroundScope::Name(scope_id))
// GCTracer collects and prints ONE line after each garbage collector
@@ -82,7 +85,8 @@ class V8_EXPORT_PRIVATE GCTracer {
FIRST_TOP_MC_SCOPE = MC_CLEAR,
LAST_TOP_MC_SCOPE = MC_SWEEP,
FIRST_MINOR_GC_BACKGROUND_SCOPE = MINOR_MC_BACKGROUND_EVACUATE_COPY,
- LAST_MINOR_GC_BACKGROUND_SCOPE = SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL
+ LAST_MINOR_GC_BACKGROUND_SCOPE = SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL,
+ FIRST_BACKGROUND_SCOPE = FIRST_GENERAL_BACKGROUND_SCOPE
};
Scope(GCTracer* tracer, ScopeId scope);
@@ -113,7 +117,8 @@ class V8_EXPORT_PRIVATE GCTracer {
FIRST_MINOR_GC_BACKGROUND_SCOPE = MINOR_MC_BACKGROUND_EVACUATE_COPY,
LAST_MINOR_GC_BACKGROUND_SCOPE = SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL
};
- BackgroundScope(GCTracer* tracer, ScopeId scope);
+ BackgroundScope(GCTracer* tracer, ScopeId scope,
+ RuntimeCallStats* runtime_stats);
~BackgroundScope();
static const char* Name(ScopeId id);
@@ -123,8 +128,7 @@ class V8_EXPORT_PRIVATE GCTracer {
ScopeId scope_;
double start_time_;
RuntimeCallTimer timer_;
- RuntimeCallCounter counter_;
- bool runtime_stats_enabled_;
+ RuntimeCallStats* runtime_stats_;
DISALLOW_COPY_AND_ASSIGN(BackgroundScope);
};
@@ -206,6 +210,8 @@ class V8_EXPORT_PRIVATE GCTracer {
double optional_speed);
static RuntimeCallCounterId RCSCounterFromScope(Scope::ScopeId id);
+ static RuntimeCallCounterId RCSCounterFromBackgroundScope(
+ BackgroundScope::ScopeId id);
explicit GCTracer(Heap* heap);
@@ -340,13 +346,15 @@ class V8_EXPORT_PRIVATE GCTracer {
}
}
- void AddBackgroundScopeSample(BackgroundScope::ScopeId scope, double duration,
- RuntimeCallCounter* runtime_call_counter);
+ void AddBackgroundScopeSample(BackgroundScope::ScopeId scope,
+ double duration);
void RecordGCPhasesHistograms(TimedHistogram* gc_timer);
void RecordEmbedderSpeed(size_t bytes, double duration);
+ WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats();
+
private:
FRIEND_TEST(GCTracer, AverageSpeed);
FRIEND_TEST(GCTracerTest, AllocationThroughput);
@@ -369,7 +377,6 @@ class V8_EXPORT_PRIVATE GCTracer {
struct BackgroundCounter {
double total_duration_ms;
- RuntimeCallCounter runtime_call_counter;
};
// Returns the average speed of the events in the buffer.
diff --git a/chromium/v8/src/heap/heap-inl.h b/chromium/v8/src/heap/heap-inl.h
index f2f7a7f6920..da803f33395 100644
--- a/chromium/v8/src/heap/heap-inl.h
+++ b/chromium/v8/src/heap/heap-inl.h
@@ -67,7 +67,7 @@ void Heap::update_external_memory(int64_t delta) {
isolate()->isolate_data()->external_memory_ += delta;
}
-void Heap::update_external_memory_concurrently_freed(intptr_t freed) {
+void Heap::update_external_memory_concurrently_freed(uintptr_t freed) {
external_memory_concurrently_freed_ += freed;
}
@@ -159,6 +159,7 @@ size_t Heap::NewSpaceAllocationCounter() {
}
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
+ AllocationOrigin origin,
AllocationAlignment alignment) {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
@@ -179,6 +180,9 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
HeapObject object;
AllocationResult allocation;
+ if (FLAG_single_generation && type == AllocationType::kYoung)
+ type = AllocationType::kOld;
+
if (AllocationType::kYoung == type) {
if (large_object) {
if (FLAG_young_generation_large_objects) {
@@ -191,13 +195,13 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
allocation = lo_space_->AllocateRaw(size_in_bytes);
}
} else {
- allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
+ allocation = new_space_->AllocateRaw(size_in_bytes, alignment, origin);
}
} else if (AllocationType::kOld == type) {
if (large_object) {
allocation = lo_space_->AllocateRaw(size_in_bytes);
} else {
- allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
+ allocation = old_space_->AllocateRaw(size_in_bytes, alignment, origin);
}
} else if (AllocationType::kCode == type) {
if (size_in_bytes <= code_space()->AreaSize() && !large_object) {
@@ -213,7 +217,9 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
#endif
DCHECK(!large_object);
DCHECK(CanAllocateInReadOnlySpace());
- allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
+ DCHECK_EQ(AllocationOrigin::kRuntime, origin);
+ allocation =
+ read_only_space_->AllocateRaw(size_in_bytes, alignment, origin);
} else {
UNREACHABLE();
}
diff --git a/chromium/v8/src/heap/heap.cc b/chromium/v8/src/heap/heap.cc
index 7feb1c11ba9..ff3b34cfb4f 100644
--- a/chromium/v8/src/heap/heap.cc
+++ b/chromium/v8/src/heap/heap.cc
@@ -270,10 +270,11 @@ size_t Heap::MinOldGenerationSize() {
size_t Heap::MaxOldGenerationSize(uint64_t physical_memory) {
size_t max_size = V8HeapTrait::kMaxSize;
// Finch experiment: Increase the heap size from 2GB to 4GB for 64-bit
- // systems with physical memory bigger than 16GB.
+ // systems with physical memory bigger than 16GB. The physical memory
+ // is rounded up to GB.
constexpr bool x64_bit = Heap::kPointerMultiplier >= 2;
if (FLAG_huge_max_old_generation_size && x64_bit &&
- physical_memory / GB > 16) {
+ (physical_memory + 512 * MB) / GB >= 16) {
DCHECK_EQ(max_size / GB, 2);
max_size *= 2;
}
@@ -1107,6 +1108,15 @@ void Heap::GarbageCollectionEpilogue() {
AllowHeapAllocation for_the_rest_of_the_epilogue;
#ifdef DEBUG
+ // Old-to-new slot sets must be empty after each collection.
+ for (SpaceIterator it(this); it.HasNext();) {
+ Space* space = it.Next();
+
+ for (MemoryChunk* chunk = space->first_page(); chunk != space->last_page();
+ chunk = chunk->list_node().next())
+ DCHECK_NULL(chunk->invalidated_slots<OLD_TO_NEW>());
+ }
+
if (FLAG_print_global_handles) isolate_->global_handles()->Print();
if (FLAG_print_handles) PrintHandles();
if (FLAG_gc_verbose) Print();
@@ -1179,16 +1189,9 @@ void Heap::GarbageCollectionEpilogue() {
}
if (FLAG_harmony_weak_refs) {
- // TODO(marja): (spec): The exact condition on when to schedule the cleanup
- // task is unclear. This version schedules the cleanup task for a
- // JSFinalizationGroup whenever the GC has discovered new dirty WeakCells
- // for it (at that point it might have leftover dirty WeakCells since an
- // earlier invocation of the cleanup function didn't iterate through
- // them). See https://github.com/tc39/proposal-weakrefs/issues/34
HandleScope handle_scope(isolate());
while (!isolate()->heap()->dirty_js_finalization_groups().IsUndefined(
isolate())) {
- // Enqueue one microtask per JSFinalizationGroup.
Handle<JSFinalizationGroup> finalization_group(
JSFinalizationGroup::cast(
isolate()->heap()->dirty_js_finalization_groups()),
@@ -1196,22 +1199,7 @@ void Heap::GarbageCollectionEpilogue() {
isolate()->heap()->set_dirty_js_finalization_groups(
finalization_group->next());
finalization_group->set_next(ReadOnlyRoots(isolate()).undefined_value());
- Handle<NativeContext> context(finalization_group->native_context(),
- isolate());
- // GC has no native context, but we use the creation context of the
- // JSFinalizationGroup for the EnqueueTask operation. This is consitent
- // with the Promise implementation, assuming the JSFinalizationGroup's
- // creation context is the "caller's context" in promise functions. An
- // alternative would be to use the native context of the cleanup
- // function. This difference shouldn't be observable from JavaScript,
- // since we enter the native context of the cleanup function before
- // calling it. TODO(marja): Revisit when the spec clarifies this. See also
- // https://github.com/tc39/proposal-weakrefs/issues/38 .
- Handle<FinalizationGroupCleanupJobTask> task =
- isolate()->factory()->NewFinalizationGroupCleanupJobTask(
- finalization_group);
- MicrotaskQueue* microtask_queue = context->microtask_queue();
- if (microtask_queue) microtask_queue->EnqueueMicrotask(*task);
+ isolate()->RunHostCleanupFinalizationGroupCallback(finalization_group);
}
}
}
@@ -2841,6 +2829,9 @@ HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
ClearFreedMemoryMode clear_memory_mode) {
if (size == 0) return HeapObject();
HeapObject filler = HeapObject::FromAddress(addr);
+ bool clear_memory =
+ (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory ||
+ clear_slots_mode == ClearRecordedSlots::kYes);
if (size == kTaggedSize) {
filler.set_map_after_allocation(
Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
@@ -2849,9 +2840,9 @@ HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
filler.set_map_after_allocation(
Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
SKIP_WRITE_BARRIER);
- if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
- Memory<Tagged_t>(addr + kTaggedSize) =
- static_cast<Tagged_t>(kClearedFreeMemoryValue);
+ if (clear_memory) {
+ AtomicSlot slot(ObjectSlot(addr) + 1);
+ *slot = static_cast<Tagged_t>(kClearedFreeMemoryValue);
}
} else {
DCHECK_GT(size, 2 * kTaggedSize);
@@ -2859,7 +2850,7 @@ HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
SKIP_WRITE_BARRIER);
FreeSpace::cast(filler).relaxed_write_size(size);
- if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
+ if (clear_memory) {
MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
(size / kTaggedSize) - 2);
}
@@ -2944,6 +2935,9 @@ void Heap::OnMoveEvent(HeapObject target, HeapObject source,
if (target.IsSharedFunctionInfo()) {
LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(),
target.address()));
+ } else if (target.IsNativeContext()) {
+ PROFILE(isolate_,
+ NativeContextMoveEvent(source.address(), target.address()));
}
if (FLAG_verify_predictable) {
@@ -3000,11 +2994,21 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
object, HeapObject::FromAddress(new_start));
}
+#ifdef DEBUG
+ if (MayContainRecordedSlots(object)) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
+ DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
+ }
+#endif
+
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- HeapObject filler =
- CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
+ CreateFillerObjectAt(old_start, bytes_to_trim,
+ MayContainRecordedSlots(object)
+ ? ClearRecordedSlots::kYes
+ : ClearRecordedSlots::kNo);
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
@@ -3016,28 +3020,6 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
FixedArrayBase new_object =
FixedArrayBase::cast(HeapObject::FromAddress(new_start));
- // Remove recorded slots for the new map and length offset.
- ClearRecordedSlot(new_object, new_object.RawField(0));
- ClearRecordedSlot(new_object,
- new_object.RawField(FixedArrayBase::kLengthOffset));
-
- // Handle invalidated old-to-old slots.
- if (incremental_marking()->IsCompacting() &&
- MayContainRecordedSlots(new_object)) {
- // If the array was right-trimmed before, then it is registered in
- // the invalidated_slots.
- MemoryChunk::FromHeapObject(new_object)
- ->MoveObjectWithInvalidatedSlots(filler, new_object);
- // We have to clear slots in the free space to avoid stale old-to-old slots.
- // Note we cannot use ClearFreedMemoryMode of CreateFillerObjectAt because
- // we need pointer granularity writes to avoid race with the concurrent
- // marking.
- if (filler.Size() > FreeSpace::kSize) {
- MemsetTagged(filler.RawField(FreeSpace::kSize),
- ReadOnlyRoots(this).undefined_value(),
- (filler.Size() - FreeSpace::kSize) / kTaggedSize);
- }
- }
// Notify the heap profiler of change in object layout.
OnMoveEvent(new_object, object, new_object.Size());
@@ -3106,26 +3088,24 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
Address old_end = object.address() + old_size;
Address new_end = old_end - bytes_to_trim;
- // Register the array as an object with invalidated old-to-old slots. We
- // cannot use NotifyObjectLayoutChange as it would mark the array black,
- // which is not safe for left-trimming because left-trimming re-pushes
- // only grey arrays onto the marking worklist.
- if (incremental_marking()->IsCompacting() &&
- MayContainRecordedSlots(object)) {
- // Ensure that the object survives because the InvalidatedSlotsFilter will
- // compute its size from its map during pointers updating phase.
- incremental_marking()->WhiteToGreyAndPush(object);
- MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
- object, old_size);
+#ifdef DEBUG
+ if (MayContainRecordedSlots(object)) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
+ DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(object));
+ DCHECK(!chunk->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
}
+#endif
+
+ bool clear_slots = MayContainRecordedSlots(object);
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
// We do not create a filler for objects in a large object space.
if (!IsLargeObject(object)) {
- HeapObject filler =
- CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kNo);
+ HeapObject filler = CreateFillerObjectAt(
+ new_end, bytes_to_trim,
+ clear_slots ? ClearRecordedSlots::kYes : ClearRecordedSlots::kNo);
DCHECK(!filler.is_null());
// Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway.
@@ -3136,6 +3116,11 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
page->AddressToMarkbitIndex(new_end),
page->AddressToMarkbitIndex(new_end + bytes_to_trim));
}
+ } else if (clear_slots) {
+ // Large objects are not swept, so it is not necessary to clear the
+ // recorded slot.
+ MemsetTagged(ObjectSlot(new_end), Object(kClearedFreeMemoryValue),
+ (old_end - new_end) / kTaggedSize);
}
// Initialize header of the trimmed array. We are storing the new length
@@ -3408,8 +3393,8 @@ void Heap::NotifyObjectLayoutChange(HeapObject object, int size,
incremental_marking()->MarkBlackAndVisitObjectDueToLayoutChange(object);
if (incremental_marking()->IsCompacting() &&
MayContainRecordedSlots(object)) {
- MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
- object, size);
+ MemoryChunk::FromHeapObject(object)
+ ->RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(object, size);
}
}
#ifdef VERIFY_HEAP
@@ -4451,6 +4436,7 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
static_cast<uint64_t>(max_semi_space_size_)));
max_semi_space_size_ = Max(max_semi_space_size_, kMinSemiSpaceSize);
+ max_semi_space_size_ = Min(max_semi_space_size_, kMaxSemiSpaceSize);
max_semi_space_size_ = RoundDown<Page::kPageSize>(max_semi_space_size_);
}
@@ -4495,6 +4481,14 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
initial_semispace_size_ = SemiSpaceSizeFromYoungGenerationSize(
constraints.initial_young_generation_size_in_bytes());
}
+ if (FLAG_initial_heap_size > 0) {
+ size_t young_generation, old_generation;
+ Heap::GenerationSizesFromHeapSize(
+ static_cast<size_t>(FLAG_initial_heap_size) * MB, &young_generation,
+ &old_generation);
+ initial_semispace_size_ =
+ SemiSpaceSizeFromYoungGenerationSize(young_generation);
+ }
if (FLAG_min_semi_space_size > 0) {
initial_semispace_size_ =
static_cast<size_t>(FLAG_min_semi_space_size) * MB;
@@ -4513,6 +4507,17 @@ void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
constraints.initial_old_generation_size_in_bytes();
old_generation_size_configured_ = true;
}
+ if (FLAG_initial_heap_size > 0) {
+ size_t initial_heap_size =
+ static_cast<size_t>(FLAG_initial_heap_size) * MB;
+ size_t young_generation_size =
+ YoungGenerationSizeFromSemiSpaceSize(initial_semispace_size_);
+ initial_old_generation_size_ =
+ initial_heap_size > young_generation_size
+ ? initial_heap_size - young_generation_size
+ : 0;
+ old_generation_size_configured_ = true;
+ }
if (FLAG_initial_old_space_size > 0) {
initial_old_generation_size_ =
static_cast<size_t>(FLAG_initial_old_space_size) * MB;
@@ -4875,9 +4880,10 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
}
HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
+ AllocationOrigin origin,
AllocationAlignment alignment) {
HeapObject result;
- AllocationResult alloc = AllocateRaw(size, allocation, alignment);
+ AllocationResult alloc = AllocateRaw(size, allocation, origin, alignment);
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
@@ -4886,7 +4892,7 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
for (int i = 0; i < 2; i++) {
CollectGarbage(alloc.RetrySpace(),
GarbageCollectionReason::kAllocationFailure);
- alloc = AllocateRaw(size, allocation, alignment);
+ alloc = AllocateRaw(size, allocation, origin, alignment);
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
return result;
@@ -4896,16 +4902,18 @@ HeapObject Heap::AllocateRawWithLightRetry(int size, AllocationType allocation,
}
HeapObject Heap::AllocateRawWithRetryOrFail(int size, AllocationType allocation,
+ AllocationOrigin origin,
AllocationAlignment alignment) {
AllocationResult alloc;
- HeapObject result = AllocateRawWithLightRetry(size, allocation, alignment);
+ HeapObject result =
+ AllocateRawWithLightRetry(size, allocation, origin, alignment);
if (!result.is_null()) return result;
isolate()->counters()->gc_last_resort_from_handles()->Increment();
CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
{
AlwaysAllocateScope scope(isolate());
- alloc = AllocateRaw(size, allocation, alignment);
+ alloc = AllocateRaw(size, allocation, origin, alignment);
}
if (alloc.To(&result)) {
DCHECK(result != ReadOnlyRoots(this).exception());
@@ -5087,25 +5095,6 @@ void Heap::InitializeHashSeed() {
0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
}
-void Heap::SetStackLimits() {
- DCHECK_NOT_NULL(isolate_);
- DCHECK(isolate_ == isolate());
- // On 64 bit machines, pointers are generally out of range of Smis. We write
- // something that looks like an out of range Smi to the GC.
-
- // Set up the special root array entries containing the stack limits.
- // These are actually addresses, but the tag makes the GC ignore it.
- roots_table()[RootIndex::kStackLimit] =
- (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag;
- roots_table()[RootIndex::kRealStackLimit] =
- (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag;
-}
-
-void Heap::ClearStackLimits() {
- roots_table()[RootIndex::kStackLimit] = kNullAddress;
- roots_table()[RootIndex::kRealStackLimit] = kNullAddress;
-}
-
int Heap::NextAllocationTimeout(int current_timeout) {
if (FLAG_random_gc_interval > 0) {
// If current timeout hasn't reached 0 the GC was caused by something
@@ -5541,7 +5530,8 @@ void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
Page* page = Page::FromAddress(slot.address());
if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
- store_buffer()->DeleteEntry(slot.address());
+ store_buffer()->MoveAllEntriesToRememberedSet();
+ RememberedSet<OLD_TO_NEW>::Remove(page, slot.address());
}
}
@@ -5555,7 +5545,7 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
// Old to old slots are filtered with invalidated slots.
CHECK_IMPLIES(RememberedSet<OLD_TO_OLD>::Contains(page, slot.address()),
- page->RegisteredObjectWithInvalidatedSlots(object));
+ page->RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(object));
}
#endif
@@ -5564,7 +5554,9 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
DCHECK(!page->IsLargePage());
if (!page->InYoungGeneration()) {
DCHECK_EQ(page->owner_identity(), OLD_SPACE);
- store_buffer()->DeleteEntry(start, end);
+ store_buffer()->MoveAllEntriesToRememberedSet();
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
}
}
@@ -5925,7 +5917,7 @@ void Heap::KeepDuringJob(Handle<JSReceiver> target) {
table =
handle(OrderedHashSet::cast(weak_refs_keep_during_job()), isolate());
}
- table = OrderedHashSet::Add(isolate(), table, target);
+ table = OrderedHashSet::Add(isolate(), table, target).ToHandleChecked();
set_weak_refs_keep_during_job(*table);
}
diff --git a/chromium/v8/src/heap/heap.h b/chromium/v8/src/heap/heap.h
index 81f2b0dd8c3..2b8b963a798 100644
--- a/chromium/v8/src/heap/heap.h
+++ b/chromium/v8/src/heap/heap.h
@@ -96,6 +96,15 @@ enum class TraceRetainingPathMode { kEnabled, kDisabled };
enum class RetainingPathOption { kDefault, kTrackEphemeronPath };
+enum class AllocationOrigin {
+ kGeneratedCode = 0,
+ kRuntime = 1,
+ kGC = 2,
+ kFirstAllocationOrigin = kGeneratedCode,
+ kLastAllocationOrigin = kGC,
+ kNumberOfAllocationOrigins = kLastAllocationOrigin + 1
+};
+
enum class GarbageCollectionReason {
kUnknown = 0,
kAllocationFailure = 1,
@@ -576,7 +585,7 @@ class Heap {
V8_INLINE int64_t external_memory();
V8_INLINE void update_external_memory(int64_t delta);
- V8_INLINE void update_external_memory_concurrently_freed(intptr_t freed);
+ V8_INLINE void update_external_memory_concurrently_freed(uintptr_t freed);
V8_INLINE void account_external_memory_concurrently_freed();
size_t backing_store_bytes() const { return backing_store_bytes_; }
@@ -713,15 +722,6 @@ class Heap {
V8_INLINE void SetMessageListeners(TemplateList value);
V8_INLINE void SetPendingOptimizeForTestBytecode(Object bytecode);
- // Set the stack limit in the roots table. Some architectures generate
- // code that looks here, because it is faster than loading from the static
- // jslimit_/real_jslimit_ variable in the StackGuard.
- void SetStackLimits();
-
- // The stack limit is thread-dependent. To be able to reproduce the same
- // snapshot blob, we need to reset it before serializing.
- void ClearStackLimits();
-
void RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end);
void UnregisterStrongRoots(FullObjectSlot start);
@@ -1729,7 +1729,8 @@ class Heap {
// inlined allocations, use the Heap::DisableInlineAllocation() support).
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationType allocation,
- AllocationAlignment aligment = kWordAligned);
+ AllocationOrigin origin = AllocationOrigin::kRuntime,
+ AllocationAlignment alignment = kWordAligned);
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
@@ -1737,8 +1738,14 @@ class Heap {
// times. If after that retry procedure the allocation still fails nullptr is
// returned.
HeapObject AllocateRawWithLightRetry(
- int size, AllocationType allocation,
+ int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned);
+ HeapObject AllocateRawWithLightRetry(
+ int size, AllocationType allocation,
+ AllocationAlignment alignment = kWordAligned) {
+ return AllocateRawWithLightRetry(size, allocation,
+ AllocationOrigin::kRuntime, alignment);
+ }
// This method will try to perform an allocation of a given size of a given
// AllocationType. If the allocation fails, a regular full garbage collection
@@ -1747,8 +1754,15 @@ class Heap {
// garbage collection is triggered which tries to significantly reduce memory.
// If the allocation still fails after that a fatal error is thrown.
HeapObject AllocateRawWithRetryOrFail(
- int size, AllocationType allocation,
+ int size, AllocationType allocation, AllocationOrigin origin,
AllocationAlignment alignment = kWordAligned);
+ HeapObject AllocateRawWithRetryOrFail(
+ int size, AllocationType allocation,
+ AllocationAlignment alignment = kWordAligned) {
+ return AllocateRawWithRetryOrFail(size, allocation,
+ AllocationOrigin::kRuntime, alignment);
+ }
+
HeapObject AllocateRawCodeInLargeObjectSpace(int size);
// Allocates a heap object based on the map.
@@ -1789,7 +1803,7 @@ class Heap {
#endif // DEBUG
// The amount of memory that has been freed concurrently.
- std::atomic<intptr_t> external_memory_concurrently_freed_{0};
+ std::atomic<uintptr_t> external_memory_concurrently_freed_{0};
// This can be calculated directly from a pointer to the heap; however, it is
// more expedient to get at the isolate directly from within Heap methods.
diff --git a/chromium/v8/src/heap/invalidated-slots-inl.h b/chromium/v8/src/heap/invalidated-slots-inl.h
index 58f6ac9bc83..35a08108f63 100644
--- a/chromium/v8/src/heap/invalidated-slots-inl.h
+++ b/chromium/v8/src/heap/invalidated-slots-inl.h
@@ -62,6 +62,48 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
return invalidated_object_.IsValidSlot(invalidated_object_.map(), offset);
}
+void InvalidatedSlotsCleanup::Free(Address free_start, Address free_end) {
+#ifdef DEBUG
+ DCHECK_LT(free_start, free_end);
+ // Free regions should come in increasing order and do not overlap
+ DCHECK_LE(last_free_, free_start);
+ last_free_ = free_start;
+#endif
+
+ if (iterator_ == iterator_end_) return;
+
+ // Ignore invalidated objects before free region
+ while (free_start >= invalidated_end_) {
+ ++iterator_;
+ NextInvalidatedObject();
+ }
+
+ // Loop here: Free region might contain multiple invalidated objects
+ while (free_end > invalidated_start_) {
+ // Case: Free region starts before current invalidated object
+ if (free_start <= invalidated_start_) {
+ iterator_ = invalidated_slots_->erase(iterator_);
+
+ } else {
+ // Case: Free region starts within current invalidated object
+ // (Can happen for right-trimmed objects)
+ iterator_++;
+ }
+
+ NextInvalidatedObject();
+ }
+}
+
+void InvalidatedSlotsCleanup::NextInvalidatedObject() {
+ if (iterator_ != iterator_end_) {
+ invalidated_start_ = iterator_->first.address();
+ invalidated_end_ = invalidated_start_ + iterator_->second;
+ } else {
+ invalidated_start_ = sentinel_;
+ invalidated_end_ = sentinel_;
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/invalidated-slots.cc b/chromium/v8/src/heap/invalidated-slots.cc
index 368d189c556..8fa1518d683 100644
--- a/chromium/v8/src/heap/invalidated-slots.cc
+++ b/chromium/v8/src/heap/invalidated-slots.cc
@@ -8,18 +8,35 @@
namespace v8 {
namespace internal {
-InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
- // Adjust slots_in_free_space_are_valid_ if more spaces are added.
- DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr,
- chunk->InOldSpace() || chunk->InLargeObjectSpace());
+InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToOld(MemoryChunk* chunk) {
// The sweeper removes invalid slots and makes free space available for
// allocation. Slots for new objects can be recorded in the free space.
// Note that we cannot simply check for SweepingDone because pages in large
// object space are not swept but have SweepingDone() == true.
- slots_in_free_space_are_valid_ = chunk->SweepingDone() && chunk->InOldSpace();
+ bool slots_in_free_space_are_valid =
+ chunk->SweepingDone() && chunk->InOldSpace();
+ return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_OLD>(),
+ slots_in_free_space_are_valid);
+}
+
+InvalidatedSlotsFilter InvalidatedSlotsFilter::OldToNew(MemoryChunk* chunk) {
+ // Always treat these slots as valid for old-to-new for now. Invalid
+ // old-to-new slots are always cleared.
+ bool slots_in_free_space_are_valid = true;
+ return InvalidatedSlotsFilter(chunk, chunk->invalidated_slots<OLD_TO_NEW>(),
+ slots_in_free_space_are_valid);
+}
+
+InvalidatedSlotsFilter::InvalidatedSlotsFilter(
+ MemoryChunk* chunk, InvalidatedSlots* invalidated_slots,
+ bool slots_in_free_space_are_valid) {
+ // Adjust slots_in_free_space_are_valid_ if more spaces are added.
+ DCHECK_IMPLIES(invalidated_slots != nullptr,
+ chunk->InOldSpace() || chunk->InLargeObjectSpace());
+
+ slots_in_free_space_are_valid_ = slots_in_free_space_are_valid;
+ invalidated_slots = invalidated_slots ? invalidated_slots : &empty_;
- InvalidatedSlots* invalidated_slots =
- chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_;
iterator_ = invalidated_slots->begin();
iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end();
@@ -37,5 +54,33 @@ InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
#endif
}
+InvalidatedSlotsCleanup InvalidatedSlotsCleanup::OldToNew(MemoryChunk* chunk) {
+ return InvalidatedSlotsCleanup(chunk, chunk->invalidated_slots<OLD_TO_NEW>());
+}
+
+InvalidatedSlotsCleanup InvalidatedSlotsCleanup::NoCleanup(MemoryChunk* chunk) {
+ return InvalidatedSlotsCleanup(chunk, nullptr);
+}
+
+InvalidatedSlotsCleanup::InvalidatedSlotsCleanup(
+ MemoryChunk* chunk, InvalidatedSlots* invalidated_slots) {
+ invalidated_slots_ = invalidated_slots ? invalidated_slots : &empty_;
+ iterator_ = invalidated_slots_->begin();
+ iterator_end_ = invalidated_slots_->end();
+ sentinel_ = chunk->area_end();
+
+ if (iterator_ != iterator_end_) {
+ invalidated_start_ = iterator_->first.address();
+ invalidated_end_ = invalidated_start_ + iterator_->second;
+ } else {
+ invalidated_start_ = sentinel_;
+ invalidated_end_ = sentinel_;
+ }
+
+#ifdef DEBUG
+ last_free_ = chunk->area_start();
+#endif
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/invalidated-slots.h b/chromium/v8/src/heap/invalidated-slots.h
index 4098595fe46..4a722719106 100644
--- a/chromium/v8/src/heap/invalidated-slots.h
+++ b/chromium/v8/src/heap/invalidated-slots.h
@@ -30,7 +30,12 @@ using InvalidatedSlots = std::map<HeapObject, int, Object::Comparer>;
// n is the number of IsValid queries.
class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
public:
- explicit InvalidatedSlotsFilter(MemoryChunk* chunk);
+ static InvalidatedSlotsFilter OldToOld(MemoryChunk* chunk);
+ static InvalidatedSlotsFilter OldToNew(MemoryChunk* chunk);
+
+ explicit InvalidatedSlotsFilter(MemoryChunk* chunk,
+ InvalidatedSlots* invalidated_slots,
+ bool slots_in_free_space_are_valid);
inline bool IsValid(Address slot);
private:
@@ -48,6 +53,32 @@ class V8_EXPORT_PRIVATE InvalidatedSlotsFilter {
#endif
};
+class V8_EXPORT_PRIVATE InvalidatedSlotsCleanup {
+ public:
+ static InvalidatedSlotsCleanup OldToNew(MemoryChunk* chunk);
+ static InvalidatedSlotsCleanup NoCleanup(MemoryChunk* chunk);
+
+ explicit InvalidatedSlotsCleanup(MemoryChunk* chunk,
+ InvalidatedSlots* invalidated_slots);
+
+ inline void Free(Address free_start, Address free_end);
+
+ private:
+ InvalidatedSlots::iterator iterator_;
+ InvalidatedSlots::iterator iterator_end_;
+ InvalidatedSlots* invalidated_slots_;
+ InvalidatedSlots empty_;
+
+ Address sentinel_;
+ Address invalidated_start_;
+ Address invalidated_end_;
+
+ inline void NextInvalidatedObject();
+#ifdef DEBUG
+ Address last_free_;
+#endif
+};
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/local-allocator-inl.h b/chromium/v8/src/heap/local-allocator-inl.h
index 71dcd987781..10d6ce7370f 100644
--- a/chromium/v8/src/heap/local-allocator-inl.h
+++ b/chromium/v8/src/heap/local-allocator-inl.h
@@ -14,16 +14,17 @@ namespace internal {
AllocationResult LocalAllocator::Allocate(AllocationSpace space,
int object_size,
+ AllocationOrigin origin,
AllocationAlignment alignment) {
switch (space) {
case NEW_SPACE:
- return AllocateInNewSpace(object_size, alignment);
+ return AllocateInNewSpace(object_size, origin, alignment);
case OLD_SPACE:
return compaction_spaces_.Get(OLD_SPACE)->AllocateRaw(object_size,
- alignment);
+ alignment, origin);
case CODE_SPACE:
return compaction_spaces_.Get(CODE_SPACE)
- ->AllocateRaw(object_size, alignment);
+ ->AllocateRaw(object_size, alignment, origin);
default:
UNREACHABLE();
}
@@ -94,9 +95,9 @@ bool LocalAllocator::NewLocalAllocationBuffer() {
}
AllocationResult LocalAllocator::AllocateInNewSpace(
- int object_size, AllocationAlignment alignment) {
+ int object_size, AllocationOrigin origin, AllocationAlignment alignment) {
if (object_size > kMaxLabObjectSize) {
- return new_space_->AllocateRawSynchronized(object_size, alignment);
+ return new_space_->AllocateRawSynchronized(object_size, alignment, origin);
}
return AllocateInLAB(object_size, alignment);
}
diff --git a/chromium/v8/src/heap/local-allocator.h b/chromium/v8/src/heap/local-allocator.h
index 7019a79f21e..56da76a18da 100644
--- a/chromium/v8/src/heap/local-allocator.h
+++ b/chromium/v8/src/heap/local-allocator.h
@@ -42,12 +42,14 @@ class LocalAllocator {
}
inline AllocationResult Allocate(AllocationSpace space, int object_size,
+ AllocationOrigin origin,
AllocationAlignment alignment);
inline void FreeLast(AllocationSpace space, HeapObject object,
int object_size);
private:
inline AllocationResult AllocateInNewSpace(int object_size,
+ AllocationOrigin origin,
AllocationAlignment alignment);
inline bool NewLocalAllocationBuffer();
inline AllocationResult AllocateInLAB(int object_size,
diff --git a/chromium/v8/src/heap/mark-compact.cc b/chromium/v8/src/heap/mark-compact.cc
index 3cd6620083b..f7067a60ea2 100644
--- a/chromium/v8/src/heap/mark-compact.cc
+++ b/chromium/v8/src/heap/mark-compact.cc
@@ -577,6 +577,7 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
heap()->old_space()->RefillFreeList();
heap()->code_space()->RefillFreeList();
heap()->map_space()->RefillFreeList();
+ heap()->map_space()->SortFreeList();
heap()->tracer()->NotifySweepingCompleted();
@@ -1291,8 +1292,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (AbortCompactionForTesting(object)) return false;
#endif // VERIFY_HEAP
AllocationAlignment alignment = HeapObject::RequiredAlignment(object.map());
- AllocationResult allocation =
- local_allocator_->Allocate(target_space, size, alignment);
+ AllocationResult allocation = local_allocator_->Allocate(
+ target_space, size, AllocationOrigin::kGC, alignment);
if (allocation.To(target_object)) {
MigrateObject(*target_object, object, size, target_space);
if (target_space == CODE_SPACE)
@@ -1398,8 +1399,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
AllocationAlignment alignment =
HeapObject::RequiredAlignment(old_object.map());
AllocationSpace space_allocated_in = NEW_SPACE;
- AllocationResult allocation =
- local_allocator_->Allocate(NEW_SPACE, size, alignment);
+ AllocationResult allocation = local_allocator_->Allocate(
+ NEW_SPACE, size, AllocationOrigin::kGC, alignment);
if (allocation.IsRetry()) {
allocation = AllocateInOldSpace(size, alignment);
space_allocated_in = OLD_SPACE;
@@ -1412,8 +1413,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
inline AllocationResult AllocateInOldSpace(int size_in_bytes,
AllocationAlignment alignment) {
- AllocationResult allocation =
- local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
+ AllocationResult allocation = local_allocator_->Allocate(
+ OLD_SPACE, size_in_bytes, AllocationOrigin::kGC, alignment);
if (allocation.IsRetry()) {
heap_->FatalProcessOutOfMemory(
"MarkCompactCollector: semi-space copy, fallback in old gen");
@@ -2688,7 +2689,8 @@ void MarkCompactCollector::EvacuateEpilogue() {
for (Page* p : *heap()->old_space()) {
DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
- DCHECK_NULL(p->invalidated_slots());
+ DCHECK_NULL(p->invalidated_slots<OLD_TO_OLD>());
+ DCHECK_NULL(p->invalidated_slots<OLD_TO_NEW>());
}
#endif
}
@@ -3416,9 +3418,12 @@ class RememberedSetUpdatingItem : public UpdatingItem {
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
+
+ DCHECK_NULL(chunk_->invalidated_slots<OLD_TO_NEW>());
+
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
- InvalidatedSlotsFilter filter(chunk_);
+ InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(chunk_);
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
[&filter](MaybeObjectSlot slot) {
@@ -3428,9 +3433,9 @@ class RememberedSetUpdatingItem : public UpdatingItem {
SlotSet::PREFREE_EMPTY_BUCKETS);
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
- chunk_->invalidated_slots() != nullptr) {
+ chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) {
#ifdef DEBUG
- for (auto object_size : *chunk_->invalidated_slots()) {
+ for (auto object_size : *chunk_->invalidated_slots<OLD_TO_OLD>()) {
HeapObject object = object_size.first;
int size = object_size.second;
DCHECK_LE(object.SizeFromMap(object.map()), size);
@@ -3438,7 +3443,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
#endif
// The invalidated slots are not needed after old-to-old slots were
// processsed.
- chunk_->ReleaseInvalidatedSlots();
+ chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>();
}
}
@@ -3552,13 +3557,17 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
const bool contains_old_to_new_slots =
chunk->slot_set<OLD_TO_NEW>() != nullptr ||
chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
- const bool contains_invalidated_slots =
- chunk->invalidated_slots() != nullptr;
+ const bool contains_old_to_old_invalidated_slots =
+ chunk->invalidated_slots<OLD_TO_OLD>() != nullptr;
+ const bool contains_old_to_new_invalidated_slots =
+ chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
- !contains_invalidated_slots)
+ !contains_old_to_old_invalidated_slots &&
+ !contains_old_to_new_invalidated_slots)
continue;
if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
- contains_invalidated_slots) {
+ contains_old_to_old_invalidated_slots ||
+ contains_old_to_new_invalidated_slots) {
job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
pages++;
}
@@ -4635,11 +4644,14 @@ class PageMarkingItem : public MarkingItem {
inline Heap* heap() { return chunk_->heap(); }
void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
- RememberedSet<OLD_TO_NEW>::Iterate(chunk_,
- [this, task](MaybeObjectSlot slot) {
- return CheckAndMarkObject(task, slot);
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk_,
+ [this, task, &filter](MaybeObjectSlot slot) {
+ if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
+ return CheckAndMarkObject(task, slot);
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
}
void MarkTypedPointers(YoungGenerationMarkingTask* task) {
diff --git a/chromium/v8/src/heap/object-stats.cc b/chromium/v8/src/heap/object-stats.cc
index 2a63896242a..2ee88361c96 100644
--- a/chromium/v8/src/heap/object-stats.cc
+++ b/chromium/v8/src/heap/object-stats.cc
@@ -35,11 +35,16 @@ class FieldStatsCollector : public ObjectVisitor {
public:
FieldStatsCollector(size_t* tagged_fields_count,
size_t* embedder_fields_count,
+ size_t* inobject_smi_fields_count,
size_t* unboxed_double_fields_count,
- size_t* raw_fields_count)
+ size_t* boxed_double_fields_count,
+ size_t* string_data_count, size_t* raw_fields_count)
: tagged_fields_count_(tagged_fields_count),
embedder_fields_count_(embedder_fields_count),
+ inobject_smi_fields_count_(inobject_smi_fields_count),
unboxed_double_fields_count_(unboxed_double_fields_count),
+ boxed_double_fields_count_(boxed_double_fields_count),
+ string_data_count_(string_data_count),
raw_fields_count_(raw_fields_count) {}
void RecordStats(HeapObject host) {
@@ -62,11 +67,32 @@ class FieldStatsCollector : public ObjectVisitor {
*tagged_fields_count_ -= field_stats.embedded_fields_count_;
*embedder_fields_count_ += field_stats.embedded_fields_count_;
+ // Smi fields are also included into pointer words.
+ DCHECK_LE(
+ field_stats.unboxed_double_fields_count_ * kDoubleSize / kTaggedSize,
+ raw_fields_count_in_object);
+ tagged_fields_count_in_object -= field_stats.smi_fields_count_;
+ *tagged_fields_count_ -= field_stats.smi_fields_count_;
+ *inobject_smi_fields_count_ += field_stats.smi_fields_count_;
+
// The rest are data words.
- DCHECK_LE(field_stats.unboxed_double_fields_count_,
- raw_fields_count_in_object);
- raw_fields_count_in_object -= field_stats.unboxed_double_fields_count_;
+ DCHECK_LE(
+ field_stats.unboxed_double_fields_count_ * kDoubleSize / kTaggedSize,
+ raw_fields_count_in_object);
+ raw_fields_count_in_object -=
+ field_stats.unboxed_double_fields_count_ * kDoubleSize / kTaggedSize;
*unboxed_double_fields_count_ += field_stats.unboxed_double_fields_count_;
+ } else if (host.IsHeapNumber()) {
+ DCHECK_LE(kDoubleSize / kTaggedSize, raw_fields_count_in_object);
+ raw_fields_count_in_object -= kDoubleSize / kTaggedSize;
+ *boxed_double_fields_count_ += 1;
+ } else if (host.IsSeqString()) {
+ int string_data = SeqString::cast(host).synchronized_length() *
+ (String::cast(host).IsOneByteRepresentation() ? 1 : 2) /
+ kTaggedSize;
+ DCHECK_LE(string_data, raw_fields_count_in_object);
+ raw_fields_count_in_object -= string_data;
+ *string_data_count_ += string_data;
}
*raw_fields_count_ += raw_fields_count_in_object;
}
@@ -92,9 +118,12 @@ class FieldStatsCollector : public ObjectVisitor {
private:
struct JSObjectFieldStats {
JSObjectFieldStats()
- : embedded_fields_count_(0), unboxed_double_fields_count_(0) {}
+ : embedded_fields_count_(0),
+ smi_fields_count_(0),
+ unboxed_double_fields_count_(0) {}
unsigned embedded_fields_count_ : kDescriptorIndexBitCount;
+ unsigned smi_fields_count_ : kDescriptorIndexBitCount;
unsigned unboxed_double_fields_count_ : kDescriptorIndexBitCount;
};
std::unordered_map<Map, JSObjectFieldStats, Object::Hasher>
@@ -104,7 +133,10 @@ class FieldStatsCollector : public ObjectVisitor {
size_t* const tagged_fields_count_;
size_t* const embedder_fields_count_;
+ size_t* const inobject_smi_fields_count_;
size_t* const unboxed_double_fields_count_;
+ size_t* const boxed_double_fields_count_;
+ size_t* const string_data_count_;
size_t* const raw_fields_count_;
};
@@ -130,6 +162,9 @@ FieldStatsCollector::GetInobjectFieldStats(Map map) {
map.IsUnboxedDoubleField(index)) {
++stats.unboxed_double_fields_count_;
}
+ if (details.representation().IsSmi()) {
+ ++stats.smi_fields_count_;
+ }
}
}
}
@@ -149,7 +184,10 @@ void ObjectStats::ClearObjectStats(bool clear_last_time_stats) {
}
tagged_fields_count_ = 0;
embedder_fields_count_ = 0;
+ inobject_smi_fields_count_ = 0;
unboxed_double_fields_count_ = 0;
+ boxed_double_fields_count_ = 0;
+ string_data_count_ = 0;
raw_fields_count_ = 0;
}
@@ -208,8 +246,13 @@ void ObjectStats::PrintJSON(const char* key) {
PrintF(", \"tagged_fields\": %zu", tagged_fields_count_ * kTaggedSize);
PrintF(", \"embedder_fields\": %zu",
embedder_fields_count_ * kEmbedderDataSlotSize);
+ PrintF(", \"inobject_smi_fields\": %zu",
+ inobject_smi_fields_count_ * kTaggedSize);
PrintF(", \"unboxed_double_fields\": %zu",
unboxed_double_fields_count_ * kDoubleSize);
+ PrintF(", \"boxed_double_fields\": %zu",
+ boxed_double_fields_count_ * kDoubleSize);
+ PrintF(", \"string_data\": %zu", string_data_count_ * kTaggedSize);
PrintF(", \"other_raw_fields\": %zu", raw_fields_count_ * kSystemPointerSize);
PrintF(" }\n");
// bucket_sizes
@@ -263,8 +306,13 @@ void ObjectStats::Dump(std::stringstream& stream) {
stream << "\"tagged_fields\":" << (tagged_fields_count_ * kTaggedSize);
stream << ",\"embedder_fields\":"
<< (embedder_fields_count_ * kEmbedderDataSlotSize);
+ stream << ",\"inobject_smi_fields\": "
+ << (inobject_smi_fields_count_ * kTaggedSize);
stream << ",\"unboxed_double_fields\": "
<< (unboxed_double_fields_count_ * kDoubleSize);
+ stream << ",\"boxed_double_fields\": "
+ << (boxed_double_fields_count_ * kDoubleSize);
+ stream << ",\"string_data\": " << (string_data_count_ * kTaggedSize);
stream << ",\"other_raw_fields\":"
<< (raw_fields_count_ * kSystemPointerSize);
stream << "}, ";
@@ -427,7 +475,10 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
heap->mark_compact_collector()->non_atomic_marking_state()),
field_stats_collector_(
&stats->tagged_fields_count_, &stats->embedder_fields_count_,
- &stats->unboxed_double_fields_count_, &stats->raw_fields_count_) {}
+ &stats->inobject_smi_fields_count_,
+ &stats->unboxed_double_fields_count_,
+ &stats->boxed_double_fields_count_, &stats->string_data_count_,
+ &stats->raw_fields_count_) {}
bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject obj,
CowMode check_cow_array) {
diff --git a/chromium/v8/src/heap/object-stats.h b/chromium/v8/src/heap/object-stats.h
index 0bd2a1e3e4f..2a9b9675ef2 100644
--- a/chromium/v8/src/heap/object-stats.h
+++ b/chromium/v8/src/heap/object-stats.h
@@ -167,7 +167,10 @@ class ObjectStats {
size_t tagged_fields_count_;
size_t embedder_fields_count_;
+ size_t inobject_smi_fields_count_;
size_t unboxed_double_fields_count_;
+ size_t boxed_double_fields_count_;
+ size_t string_data_count_;
size_t raw_fields_count_;
friend class ObjectStatsCollectorImpl;
diff --git a/chromium/v8/src/heap/remembered-set.h b/chromium/v8/src/heap/remembered-set.h
index ea7fe0149ba..eefc565e008 100644
--- a/chromium/v8/src/heap/remembered-set.h
+++ b/chromium/v8/src/heap/remembered-set.h
@@ -122,7 +122,7 @@ class RememberedSet : public AllStatic {
SlotSet* slots = chunk->slot_set<type>();
TypedSlotSet* typed_slots = chunk->typed_slot_set<type>();
if (slots != nullptr || typed_slots != nullptr ||
- chunk->invalidated_slots() != nullptr) {
+ chunk->invalidated_slots<type>() != nullptr) {
callback(chunk);
}
}
@@ -256,7 +256,7 @@ class RememberedSet : public AllStatic {
while ((chunk = it.next()) != nullptr) {
chunk->ReleaseSlotSet<OLD_TO_OLD>();
chunk->ReleaseTypedSlotSet<OLD_TO_OLD>();
- chunk->ReleaseInvalidatedSlots();
+ chunk->ReleaseInvalidatedSlots<OLD_TO_OLD>();
}
}
diff --git a/chromium/v8/src/heap/scavenger-inl.h b/chromium/v8/src/heap/scavenger-inl.h
index 9c605f70893..7729807a8a4 100644
--- a/chromium/v8/src/heap/scavenger-inl.h
+++ b/chromium/v8/src/heap/scavenger-inl.h
@@ -135,8 +135,8 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
DCHECK(heap()->AllowedToBeMigrated(map, object, NEW_SPACE));
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
- AllocationResult allocation =
- allocator_.Allocate(NEW_SPACE, object_size, alignment);
+ AllocationResult allocation = allocator_.Allocate(
+ NEW_SPACE, object_size, AllocationOrigin::kGC, alignment);
HeapObject target;
if (allocation.To(&target)) {
@@ -171,8 +171,8 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
- AllocationResult allocation =
- allocator_.Allocate(OLD_SPACE, object_size, alignment);
+ AllocationResult allocation = allocator_.Allocate(
+ OLD_SPACE, object_size, AllocationOrigin::kGC, alignment);
HeapObject target;
if (allocation.To(&target)) {
diff --git a/chromium/v8/src/heap/scavenger.cc b/chromium/v8/src/heap/scavenger.cc
index 70b514142fe..7d56882953e 100644
--- a/chromium/v8/src/heap/scavenger.cc
+++ b/chromium/v8/src/heap/scavenger.cc
@@ -8,6 +8,7 @@
#include "src/heap/barrier.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting-inl.h"
@@ -371,7 +372,7 @@ void ScavengerCollector::MergeSurvivingNewLargeObjects(
int ScavengerCollector::NumberOfScavengeTasks() {
if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks =
- static_cast<int>(heap_->new_space()->TotalCapacity()) / MB;
+ static_cast<int>(heap_->new_space()->TotalCapacity()) / MB + 1;
static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
int tasks =
Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
@@ -431,12 +432,15 @@ void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
void Scavenger::ScavengePage(MemoryChunk* page) {
CodePageMemoryModificationScope memory_modification_scope(page);
- RememberedSet<OLD_TO_NEW>::Iterate(page,
- [this](MaybeObjectSlot addr) {
- return CheckAndScavengeObject(heap_,
- addr);
- },
- SlotSet::KEEP_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ page,
+ [this](MaybeObjectSlot addr) {
+ return CheckAndScavengeObject(heap_, addr);
+ },
+ SlotSet::KEEP_EMPTY_BUCKETS);
+
+ DCHECK_NULL(page->invalidated_slots<OLD_TO_NEW>());
+
RememberedSet<OLD_TO_NEW>::IterateTyped(
page, [=](SlotType type, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
diff --git a/chromium/v8/src/heap/setup-heap-internal.cc b/chromium/v8/src/heap/setup-heap-internal.cc
index a936521a7e4..15ca6d79303 100644
--- a/chromium/v8/src/heap/setup-heap-internal.cc
+++ b/chromium/v8/src/heap/setup-heap-internal.cc
@@ -358,8 +358,6 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(FEEDBACK_VECTOR_TYPE, feedback_vector)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
Context::NUMBER_FUNCTION_INDEX)
- ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, MutableHeapNumber::kSize,
- mutable_heap_number)
ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
Context::SYMBOL_FUNCTION_INDEX)
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
@@ -463,6 +461,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(AWAIT_CONTEXT_TYPE, await_context)
ALLOCATE_VARSIZE_MAP(BLOCK_CONTEXT_TYPE, block_context)
ALLOCATE_VARSIZE_MAP(MODULE_CONTEXT_TYPE, module_context)
+ ALLOCATE_VARSIZE_MAP(NATIVE_CONTEXT_TYPE, native_context)
ALLOCATE_VARSIZE_MAP(EVAL_CONTEXT_TYPE, eval_context)
ALLOCATE_VARSIZE_MAP(SCRIPT_CONTEXT_TYPE, script_context)
ALLOCATE_VARSIZE_MAP(SCRIPT_CONTEXT_TABLE_TYPE, script_context_table)
@@ -470,8 +469,6 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
object_boilerplate_description)
- ALLOCATE_MAP(NATIVE_CONTEXT_TYPE, NativeContext::kSize, native_context)
-
ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
side_effect_call_handler_info)
ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
@@ -825,6 +822,15 @@ void Heap::CreateInitialObjects() {
factory->NewFeedbackMetadata(0, 0, AllocationType::kReadOnly);
set_empty_feedback_metadata(*empty_feedback_metadata);
+ // Canonical scope arrays.
+ Handle<ScopeInfo> global_this_binding =
+ ScopeInfo::CreateGlobalThisBinding(isolate());
+ set_global_this_binding_scope_info(*global_this_binding);
+
+ Handle<ScopeInfo> empty_function =
+ ScopeInfo::CreateForEmptyFunction(isolate());
+ set_empty_function_scope_info(*empty_function);
+
// Allocate the empty script.
Handle<Script> script = factory->NewScript(factory->empty_string());
script->set_type(Script::TYPE_NATIVE);
diff --git a/chromium/v8/src/heap/slot-set.h b/chromium/v8/src/heap/slot-set.h
index f7efc642473..c71192bfdce 100644
--- a/chromium/v8/src/heap/slot-set.h
+++ b/chromium/v8/src/heap/slot-set.h
@@ -52,9 +52,6 @@ class SlotSet : public Malloced {
void SetPageStart(Address page_start) { page_start_ = page_start; }
// The slot offset specifies a slot at address page_start_ + slot_offset.
- // This method should only be called on the main thread because concurrent
- // allocation of the bucket is not thread-safe.
- //
// AccessMode defines whether there can be concurrent access on the buckets
// or not.
template <AccessMode access_mode = AccessMode::ATOMIC>
@@ -181,7 +178,10 @@ class SlotSet : public Malloced {
// Iterate over all slots in the set and for each slot invoke the callback.
// If the callback returns REMOVE_SLOT then the slot is removed from the set.
// Returns the new number of slots.
- // This method should only be called on the main thread.
+ //
+ // Iteration can be performed concurrently with other operations that use
+ // atomic access mode such as insertion and removal. However there is no
+ // guarantee about ordering and linearizability.
//
// Sample usage:
// Iterate([](MaybeObjectSlot slot) {
@@ -411,8 +411,8 @@ class V8_EXPORT_PRIVATE TypedSlots {
void Merge(TypedSlots* other);
protected:
- class OffsetField : public BitField<int, 0, 29> {};
- class TypeField : public BitField<SlotType, 29, 3> {};
+ using OffsetField = BitField<int, 0, 29>;
+ using TypeField = BitField<SlotType, 29, 3>;
struct TypedSlot {
uint32_t type_and_offset;
};
diff --git a/chromium/v8/src/heap/spaces-inl.h b/chromium/v8/src/heap/spaces-inl.h
index 3b4ed8d30ad..2feb47bec1b 100644
--- a/chromium/v8/src/heap/spaces-inl.h
+++ b/chromium/v8/src/heap/spaces-inl.h
@@ -172,8 +172,6 @@ bool PagedSpace::Contains(Object o) {
void PagedSpace::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
- DCHECK_EQ(free_list(), category->owner());
- category->set_free_list(nullptr);
free_list()->RemoveCategory(category);
});
}
@@ -182,9 +180,8 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
- category->set_free_list(free_list());
added += category->available();
- category->Relink();
+ category->Relink(free_list());
});
DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
@@ -315,10 +312,51 @@ MemoryChunk* OldGenerationMemoryChunkIterator::next() {
UNREACHABLE();
}
-FreeList* FreeListCategory::owner() { return free_list_; }
+bool FreeListCategory::is_linked(FreeList* owner) const {
+ return prev_ != nullptr || next_ != nullptr ||
+ owner->categories_[type_] == this;
+}
+
+void FreeListCategory::UpdateCountersAfterAllocation(size_t allocation_size) {
+ available_ -= allocation_size;
+}
+
+Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
+ FreeListCategory* category_top = top(type);
+ if (category_top != nullptr) {
+ DCHECK(!category_top->top().is_null());
+ return Page::FromHeapObject(category_top->top());
+ } else {
+ return nullptr;
+ }
+}
-bool FreeListCategory::is_linked() {
- return prev_ != nullptr || next_ != nullptr;
+Page* FreeListLegacy::GetPageForSize(size_t size_in_bytes) {
+ const int minimum_category =
+ static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+ Page* page = GetPageForCategoryType(kHuge);
+ if (!page && static_cast<int>(kLarge) >= minimum_category)
+ page = GetPageForCategoryType(kLarge);
+ if (!page && static_cast<int>(kMedium) >= minimum_category)
+ page = GetPageForCategoryType(kMedium);
+ if (!page && static_cast<int>(kSmall) >= minimum_category)
+ page = GetPageForCategoryType(kSmall);
+ if (!page && static_cast<int>(kTiny) >= minimum_category)
+ page = GetPageForCategoryType(kTiny);
+ if (!page && static_cast<int>(kTiniest) >= minimum_category)
+ page = GetPageForCategoryType(kTiniest);
+ return page;
+}
+
+Page* FreeListFastAlloc::GetPageForSize(size_t size_in_bytes) {
+ const int minimum_category =
+ static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+ Page* page = GetPageForCategoryType(kHuge);
+ if (!page && static_cast<int>(kLarge) >= minimum_category)
+ page = GetPageForCategoryType(kLarge);
+ if (!page && static_cast<int>(kMedium) >= minimum_category)
+ page = GetPageForCategoryType(kMedium);
+ return page;
}
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
@@ -338,11 +376,12 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
return AllocationResult(HeapObject::FromAddress(current_top));
}
-bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
+bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
return true;
}
- return SlowRefillLinearAllocationArea(size_in_bytes);
+ return SlowRefillLinearAllocationArea(size_in_bytes, origin);
}
HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
@@ -371,19 +410,26 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
return HeapObject::FromAddress(current_top);
}
-AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
+AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
+ AllocationOrigin origin) {
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
- if (!EnsureLinearAllocationArea(size_in_bytes)) {
+ if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
HeapObject object = AllocateLinearly(size_in_bytes);
DCHECK(!object.is_null());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
return object;
}
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
- AllocationAlignment alignment) {
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
int allocation_size = size_in_bytes;
@@ -393,7 +439,7 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
- if (!EnsureLinearAllocationArea(allocation_size)) {
+ if (!EnsureLinearAllocationArea(allocation_size, origin)) {
return AllocationResult::Retry(identity());
}
allocation_size = size_in_bytes;
@@ -401,12 +447,17 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
DCHECK(!object.is_null());
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
return object;
}
-
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
- AllocationAlignment alignment) {
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
@@ -421,11 +472,12 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
- AllocationResult result = alignment != kWordAligned
- ? AllocateRawAligned(size_in_bytes, alignment)
- : AllocateRawUnaligned(size_in_bytes);
+ AllocationResult result =
+ alignment != kWordAligned
+ ? AllocateRawAligned(size_in_bytes, alignment, origin)
+ : AllocateRawUnaligned(size_in_bytes, origin);
#else
- AllocationResult result = AllocateRawUnaligned(size_in_bytes);
+ AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
@@ -439,13 +491,12 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
return result;
}
-
// -----------------------------------------------------------------------------
// NewSpace
-
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
- AllocationAlignment alignment) {
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
Address top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
@@ -472,11 +523,15 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
return obj;
}
-
-AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
+AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
+ AllocationOrigin origin) {
Address top = allocation_info_.top();
if (allocation_info_.limit() < top + size_in_bytes) {
// See if we can create room.
@@ -493,12 +548,16 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
return obj;
}
-
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
- AllocationAlignment alignment) {
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
if (top() < top_on_previous_step_) {
// Generated code decreased the top() pointer to do folded allocations
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
@@ -507,8 +566,8 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
}
#ifdef V8_HOST_ARCH_32_BIT
return alignment != kWordAligned
- ? AllocateRawAligned(size_in_bytes, alignment)
- : AllocateRawUnaligned(size_in_bytes);
+ ? AllocateRawAligned(size_in_bytes, alignment, origin)
+ : AllocateRawUnaligned(size_in_bytes, origin);
#else
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
@@ -516,14 +575,14 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
- return AllocateRawUnaligned(size_in_bytes);
+ return AllocateRawUnaligned(size_in_bytes, origin);
#endif
}
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
- int size_in_bytes, AllocationAlignment alignment) {
+ int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
base::MutexGuard guard(&mutex_);
- return AllocateRaw(size_in_bytes, alignment);
+ return AllocateRaw(size_in_bytes, alignment, origin);
}
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc
index 438308a346d..dd8ba301018 100644
--- a/chromium/v8/src/heap/spaces.cc
+++ b/chromium/v8/src/heap/spaces.cc
@@ -703,7 +703,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
nullptr);
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
nullptr);
- chunk->invalidated_slots_ = nullptr;
+ chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
+ chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
chunk->set_concurrent_sweeping_state(kSweepingDone);
@@ -821,8 +822,7 @@ void Page::AllocateFreeListCategories() {
categories_ = new FreeListCategory*[free_list()->number_of_categories()]();
for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
DCHECK_NULL(categories_[i]);
- categories_[i] = new FreeListCategory(
- reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
+ categories_[i] = new FreeListCategory();
}
}
@@ -1379,7 +1379,8 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
- ReleaseInvalidatedSlots();
+ ReleaseInvalidatedSlots<OLD_TO_NEW>();
+ ReleaseInvalidatedSlots<OLD_TO_OLD>();
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
@@ -1461,53 +1462,107 @@ void MemoryChunk::ReleaseTypedSlotSet() {
}
}
+template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_NEW>();
+template InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots<OLD_TO_OLD>();
+
+template <RememberedSetType type>
InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
- DCHECK_NULL(invalidated_slots_);
- invalidated_slots_ = new InvalidatedSlots();
- return invalidated_slots_;
+ DCHECK_NULL(invalidated_slots_[type]);
+ invalidated_slots_[type] = new InvalidatedSlots();
+ return invalidated_slots_[type];
}
+template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_NEW>();
+template void MemoryChunk::ReleaseInvalidatedSlots<OLD_TO_OLD>();
+
+template <RememberedSetType type>
void MemoryChunk::ReleaseInvalidatedSlots() {
- if (invalidated_slots_) {
- delete invalidated_slots_;
- invalidated_slots_ = nullptr;
+ if (invalidated_slots_[type]) {
+ delete invalidated_slots_[type];
+ invalidated_slots_[type] = nullptr;
}
}
+template V8_EXPORT_PRIVATE void
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(HeapObject object,
+ int size);
+template V8_EXPORT_PRIVATE void
+MemoryChunk::RegisterObjectWithInvalidatedSlots<OLD_TO_OLD>(HeapObject object,
+ int size);
+
+template <RememberedSetType type>
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject object,
int size) {
- if (!ShouldSkipEvacuationSlotRecording()) {
- if (invalidated_slots() == nullptr) {
- AllocateInvalidatedSlots();
+ bool skip_slot_recording;
+
+ if (type == OLD_TO_NEW) {
+ skip_slot_recording = InYoungGeneration();
+ } else {
+ skip_slot_recording = ShouldSkipEvacuationSlotRecording();
+ }
+
+ if (skip_slot_recording) {
+ return;
+ }
+
+ if (invalidated_slots<type>() == nullptr) {
+ AllocateInvalidatedSlots<type>();
+ }
+
+ InvalidatedSlots* invalidated_slots = this->invalidated_slots<type>();
+ InvalidatedSlots::iterator it = invalidated_slots->lower_bound(object);
+
+ if (it != invalidated_slots->end() && it->first == object) {
+ // object was already inserted
+ CHECK_LE(size, it->second);
+ return;
+ }
+
+ it = invalidated_slots->insert(it, std::make_pair(object, size));
+
+ // prevent overlapping invalidated objects for old-to-new.
+ if (type == OLD_TO_NEW && it != invalidated_slots->begin()) {
+ HeapObject pred = (--it)->first;
+ int pred_size = it->second;
+ DCHECK_LT(pred.address(), object.address());
+
+ if (pred.address() + pred_size > object.address()) {
+ it->second = static_cast<int>(object.address() - pred.address());
}
- int old_size = (*invalidated_slots())[object];
- (*invalidated_slots())[object] = std::max(old_size, size);
}
}
+template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_NEW>(
+ HeapObject object);
+template bool MemoryChunk::RegisteredObjectWithInvalidatedSlots<OLD_TO_OLD>(
+ HeapObject object);
+
+template <RememberedSetType type>
bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject object) {
- if (ShouldSkipEvacuationSlotRecording()) {
- // Invalidated slots do not matter if we are not recording slots.
- return true;
- }
- if (invalidated_slots() == nullptr) {
+ if (invalidated_slots<type>() == nullptr) {
return false;
}
- return invalidated_slots()->find(object) != invalidated_slots()->end();
+ return invalidated_slots<type>()->find(object) !=
+ invalidated_slots<type>()->end();
}
+template void MemoryChunk::MoveObjectWithInvalidatedSlots<OLD_TO_OLD>(
+ HeapObject old_start, HeapObject new_start);
+
+template <RememberedSetType type>
void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start) {
DCHECK_LT(old_start, new_start);
DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
MemoryChunk::FromHeapObject(new_start));
- if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
- auto it = invalidated_slots()->find(old_start);
- if (it != invalidated_slots()->end()) {
+ static_assert(type == OLD_TO_OLD, "only use this for old-to-old slots");
+ if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots<type>()) {
+ auto it = invalidated_slots<type>()->find(old_start);
+ if (it != invalidated_slots<type>()->end()) {
int old_size = it->second;
int delta = static_cast<int>(new_start.address() - old_start.address());
- invalidated_slots()->erase(it);
- (*invalidated_slots())[new_start] = old_size - delta;
+ invalidated_slots<type>()->erase(it);
+ (*invalidated_slots<type>())[new_start] = old_size - delta;
}
}
}
@@ -1532,10 +1587,6 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
// -----------------------------------------------------------------------------
// PagedSpace implementation
-void Space::CheckOffsetsAreConsistent() const {
- DCHECK_EQ(Space::kIdOffset, OFFSET_OF(Space, id_));
-}
-
void Space::AddAllocationObserver(AllocationObserver* observer) {
allocation_observers_.push_back(observer);
StartNextInlineAllocationStep();
@@ -1612,8 +1663,9 @@ void PagedSpace::RefillFreeList() {
// We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
// entries here to make them unavailable for allocations.
if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
- p->ForAllFreeListCategories(
- [](FreeListCategory* category) { category->Reset(); });
+ p->ForAllFreeListCategories([this](FreeListCategory* category) {
+ category->Reset(free_list());
+ });
}
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
@@ -1645,6 +1697,11 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// area_size_
other->FreeLinearAllocationArea();
+ for (int i = static_cast<int>(AllocationOrigin::kFirstAllocationOrigin);
+ i <= static_cast<int>(AllocationOrigin::kLastAllocationOrigin); i++) {
+ allocations_origins_[i] += other->allocations_origins_[i];
+ }
+
// The linear allocation area of {other} should be destroyed now.
DCHECK_EQ(kNullAddress, other->top());
DCHECK_EQ(kNullAddress, other->limit());
@@ -1846,6 +1903,20 @@ Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
}
}
+void SpaceWithLinearArea::UpdateAllocationOrigins(AllocationOrigin origin) {
+ DCHECK(!((origin != AllocationOrigin::kGC) &&
+ (heap()->isolate()->current_vm_state() == GC)));
+ allocations_origins_[static_cast<int>(origin)]++;
+}
+
+void SpaceWithLinearArea::PrintAllocationsOrigins() {
+ PrintIsolate(
+ heap()->isolate(),
+ "Allocations Origins for %s: GeneratedCode:%zu - Runtime:%zu - GC:%zu\n",
+ name(), allocations_origins_[0], allocations_origins_[1],
+ allocations_origins_[2]);
+}
+
void PagedSpace::MarkLinearAllocationAreaBlack() {
DCHECK(heap()->incremental_marking()->black_allocation());
Address current_top = top();
@@ -1911,7 +1982,6 @@ void PagedSpace::ReleasePage(Page* page) {
DCHECK_EQ(page->owner(), this);
free_list_->EvictFreeListItems(page);
- DCHECK(!free_list_->ContainsPageFreeListItems(page));
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
DCHECK(!top_on_previous_step_);
@@ -1951,7 +2021,8 @@ std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
return std::unique_ptr<ObjectIterator>(new PagedSpaceObjectIterator(this));
}
-bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
+bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
+ size_t size_in_bytes, AllocationOrigin origin) {
DCHECK(IsAligned(size_in_bytes, kTaggedSize));
DCHECK_LE(top(), limit());
#ifdef DEBUG
@@ -1974,9 +2045,9 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
}
size_t new_node_size = 0;
- FreeSpace new_node = free_list_->Allocate(size_in_bytes, &new_node_size);
+ FreeSpace new_node =
+ free_list_->Allocate(size_in_bytes, &new_node_size, origin);
if (new_node.is_null()) return false;
-
DCHECK_GE(new_node_size, size_in_bytes);
// The old-space-step might have finished sweeping and restarted marking.
@@ -2895,42 +2966,41 @@ size_t NewSpace::CommittedPhysicalMemory() {
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
-
-void FreeListCategory::Reset() {
+void FreeListCategory::Reset(FreeList* owner) {
+ if (is_linked(owner) && !top().is_null()) {
+ owner->DecreaseAvailableBytes(available_);
+ }
set_top(FreeSpace());
set_prev(nullptr);
set_next(nullptr);
available_ = 0;
- length_ = 0;
}
FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
size_t* node_size) {
- DCHECK(page()->CanAllocate());
FreeSpace node = top();
DCHECK(!node.is_null());
+ DCHECK(Page::FromHeapObject(node)->CanAllocate());
if (static_cast<size_t>(node.Size()) < minimum_size) {
*node_size = 0;
return FreeSpace();
}
set_top(node.next());
*node_size = node.Size();
- available_ -= *node_size;
- length_--;
+ UpdateCountersAfterAllocation(*node_size);
return node;
}
FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
size_t* node_size) {
- DCHECK(page()->CanAllocate());
FreeSpace prev_non_evac_node;
for (FreeSpace cur_node = top(); !cur_node.is_null();
cur_node = cur_node.next()) {
+ DCHECK(Page::FromHeapObject(cur_node)->CanAllocate());
size_t size = cur_node.size();
if (size >= minimum_size) {
DCHECK_GE(available_, size);
- available_ -= size;
- length_--;
+ UpdateCountersAfterAllocation(size);
if (cur_node == top()) {
set_top(cur_node.next());
}
@@ -2950,19 +3020,21 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
return FreeSpace();
}
-void FreeListCategory::Free(Address start, size_t size_in_bytes,
- FreeMode mode) {
+void FreeListCategory::Free(Address start, size_t size_in_bytes, FreeMode mode,
+ FreeList* owner) {
FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
free_space.set_next(top());
set_top(free_space);
available_ += size_in_bytes;
- length_++;
- if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
- owner()->AddCategory(this);
+ if (mode == kLinkCategory) {
+ if (is_linked(owner)) {
+ owner->IncreaseAvailableBytes(size_in_bytes);
+ } else {
+ owner->AddCategory(this);
+ }
}
}
-
void FreeListCategory::RepairFreeList(Heap* heap) {
Map free_space_map = ReadOnlyRoots(heap).free_space_map();
FreeSpace n = top();
@@ -2977,21 +3049,30 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
}
}
-void FreeListCategory::Relink() {
- DCHECK(!is_linked());
- owner()->AddCategory(this);
+void FreeListCategory::Relink(FreeList* owner) {
+ DCHECK(!is_linked(owner));
+ owner->AddCategory(this);
}
// ------------------------------------------------
// Generic FreeList methods (alloc/free related)
FreeList* FreeList::CreateFreeList() {
- if (FLAG_gc_freelist_strategy == 1) {
- return new FreeListFastAlloc();
- } else if (FLAG_gc_freelist_strategy == 2) {
- return new FreeListMany();
- } else {
- return new FreeListLegacy();
+ switch (FLAG_gc_freelist_strategy) {
+ case 0:
+ return new FreeListLegacy();
+ case 1:
+ return new FreeListFastAlloc();
+ case 2:
+ return new FreeListMany();
+ case 3:
+ return new FreeListManyCached();
+ case 4:
+ return new FreeListManyCachedFastPath();
+ case 5:
+ return new FreeListManyCachedOrigin();
+ default:
+ FATAL("Invalid FreeList strategy");
}
}
@@ -3001,6 +3082,7 @@ FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
if (category == nullptr) return FreeSpace();
FreeSpace node = category->PickNodeFromList(minimum_size, node_size);
if (!node.is_null()) {
+ DecreaseAvailableBytes(*node_size);
DCHECK(IsVeryLong() || Available() == SumFreeLists());
}
if (category->is_empty()) {
@@ -3018,6 +3100,7 @@ FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
FreeListCategory* current = it.Next();
node = current->SearchForNodeInList(minimum_size, node_size);
if (!node.is_null()) {
+ DecreaseAvailableBytes(*node_size);
DCHECK(IsVeryLong() || Available() == SumFreeLists());
if (current->is_empty()) {
RemoveCategory(current);
@@ -3042,7 +3125,7 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
- page->free_list_category(type)->Free(start, size_in_bytes, mode);
+ page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
DCHECK_EQ(page->AvailableInFreeList(),
page->AvailableInFreeListFromAllocatedBytes());
return 0;
@@ -3063,7 +3146,8 @@ FreeListLegacy::FreeListLegacy() {
FreeListLegacy::~FreeListLegacy() { delete[] categories_; }
-FreeSpace FreeListLegacy::Allocate(size_t size_in_bytes, size_t* node_size) {
+FreeSpace FreeListLegacy::Allocate(size_t size_in_bytes, size_t* node_size,
+ AllocationOrigin origin) {
DCHECK_GE(kMaxBlockSize, size_in_bytes);
FreeSpace node;
// First try the allocation fast path: try to allocate the minimum element
@@ -3121,7 +3205,8 @@ FreeListFastAlloc::FreeListFastAlloc() {
FreeListFastAlloc::~FreeListFastAlloc() { delete[] categories_; }
-FreeSpace FreeListFastAlloc::Allocate(size_t size_in_bytes, size_t* node_size) {
+FreeSpace FreeListFastAlloc::Allocate(size_t size_in_bytes, size_t* node_size,
+ AllocationOrigin origin) {
DCHECK_GE(kMaxBlockSize, size_in_bytes);
FreeSpace node;
// Try to allocate the biggest element possible (to make the most of later
@@ -3143,16 +3228,7 @@ FreeSpace FreeListFastAlloc::Allocate(size_t size_in_bytes, size_t* node_size) {
// ------------------------------------------------
// FreeListMany implementation
-// Cf. the declaration of |categories_max| in |spaces.h| to see how this is
-// computed.
-const size_t FreeListMany::categories_max[kNumberOfCategories] = {
- 24, 32, 40, 48, 56, 64, 72,
- 80, 88, 96, 104, 112, 120, 128,
- 136, 144, 152, 160, 168, 176, 184,
- 192, 200, 208, 216, 224, 232, 240,
- 248, 256, 384, 512, 768, 1024, 1536,
- 2048, 3072, 4080, 4088, 4096, 6144, 8192,
- 12288, 16384, 24576, 32768, 49152, 65536, Page::kPageSize};
+constexpr unsigned int FreeListMany::categories_min[kNumberOfCategories];
FreeListMany::FreeListMany() {
// Initializing base (FreeList) fields
@@ -3164,31 +3240,36 @@ FreeListMany::FreeListMany() {
Reset();
}
+FreeListMany::~FreeListMany() { delete[] categories_; }
+
size_t FreeListMany::GuaranteedAllocatable(size_t maximum_freed) {
- if (maximum_freed < categories_max[0]) {
+ if (maximum_freed < categories_min[0]) {
return 0;
}
- for (int cat = kFirstCategory + 1; cat < last_category_; cat++) {
- if (maximum_freed <= categories_max[cat]) {
- return categories_max[cat - 1];
+ for (int cat = kFirstCategory + 1; cat <= last_category_; cat++) {
+ if (maximum_freed < categories_min[cat]) {
+ return categories_min[cat - 1];
}
}
return maximum_freed;
}
Page* FreeListMany::GetPageForSize(size_t size_in_bytes) {
- const int minimum_category =
- static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
- Page* page = GetPageForCategoryType(last_category_);
- for (int cat = last_category_ - 1; !page && cat >= minimum_category; cat--) {
+ FreeListCategoryType minimum_category =
+ SelectFreeListCategoryType(size_in_bytes);
+ Page* page = nullptr;
+ for (int cat = minimum_category + 1; !page && cat <= last_category_; cat++) {
page = GetPageForCategoryType(cat);
}
+ if (!page) {
+ // Might return a page in which |size_in_bytes| will not fit.
+ page = GetPageForCategoryType(minimum_category);
+ }
return page;
}
-FreeListMany::~FreeListMany() { delete[] categories_; }
-
-FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size) {
+FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size,
+ AllocationOrigin origin) {
DCHECK_GE(kMaxBlockSize, size_in_bytes);
FreeSpace node;
FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
@@ -3211,39 +3292,258 @@ FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size) {
}
// ------------------------------------------------
+// FreeListManyCached implementation
+
+FreeListManyCached::FreeListManyCached() { ResetCache(); }
+
+void FreeListManyCached::Reset() {
+ ResetCache();
+ FreeListMany::Reset();
+}
+
+bool FreeListManyCached::AddCategory(FreeListCategory* category) {
+ bool was_added = FreeList::AddCategory(category);
+
+ // Updating cache
+ if (was_added) {
+ UpdateCacheAfterAddition(category->type_);
+ }
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+
+ return was_added;
+}
+
+void FreeListManyCached::RemoveCategory(FreeListCategory* category) {
+ FreeList::RemoveCategory(category);
+
+ // Updating cache
+ int type = category->type_;
+ if (categories_[type] == nullptr) {
+ UpdateCacheAfterRemoval(type);
+ }
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+}
+
+size_t FreeListManyCached::Free(Address start, size_t size_in_bytes,
+ FreeMode mode) {
+ Page* page = Page::FromAddress(start);
+ page->DecreaseAllocatedBytes(size_in_bytes);
+
+ // Blocks have to be a minimum size to hold free list items.
+ if (size_in_bytes < min_block_size_) {
+ page->add_wasted_memory(size_in_bytes);
+ wasted_bytes_ += size_in_bytes;
+ return size_in_bytes;
+ }
+
+ // Insert other blocks at the head of a free list of the appropriate
+ // magnitude.
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ page->free_list_category(type)->Free(start, size_in_bytes, mode, this);
+
+ // Updating cache
+ if (mode == kLinkCategory) {
+ UpdateCacheAfterAddition(type);
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+ }
+
+ DCHECK_EQ(page->AvailableInFreeList(),
+ page->AvailableInFreeListFromAllocatedBytes());
+ return 0;
+}
+
+FreeSpace FreeListManyCached::Allocate(size_t size_in_bytes, size_t* node_size,
+ AllocationOrigin origin) {
+ USE(origin);
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+
+ FreeSpace node;
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ type = next_nonempty_category[type];
+ for (; type < last_category_; type = next_nonempty_category[type + 1]) {
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ if (!node.is_null()) break;
+ }
+
+ if (node.is_null()) {
+ // Searching each element of the last category.
+ type = last_category_;
+ node = SearchForNodeInList(type, size_in_bytes, node_size);
+ }
+
+ // Updating cache
+ if (!node.is_null() && categories_[type] == nullptr) {
+ UpdateCacheAfterRemoval(type);
+ }
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// FreeListManyCachedFastPath implementation
+
+FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) {
+ USE(origin);
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+ FreeSpace node;
+
+ // Fast path part 1: searching the last categories
+ FreeListCategoryType first_category =
+ SelectFastAllocationFreeListCategoryType(size_in_bytes);
+ FreeListCategoryType type = first_category;
+ for (type = next_nonempty_category[type]; type <= last_category_;
+ type = next_nonempty_category[type + 1]) {
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ if (!node.is_null()) break;
+ }
+
+ // Fast path part 2: searching the medium categories for tiny objects
+ if (node.is_null()) {
+ if (size_in_bytes <= kTinyObjectMaxSize) {
+ for (type = next_nonempty_category[kFastPathFallBackTiny];
+ type < kFastPathFirstCategory;
+ type = next_nonempty_category[type + 1]) {
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ if (!node.is_null()) break;
+ }
+ }
+ }
+
+ // Searching the last category
+ if (node.is_null()) {
+ // Searching each element of the last category.
+ type = last_category_;
+ node = SearchForNodeInList(type, size_in_bytes, node_size);
+ }
+
+ // Finally, search the most precise category
+ if (node.is_null()) {
+ type = SelectFreeListCategoryType(size_in_bytes);
+ for (type = next_nonempty_category[type]; type < first_category;
+ type = next_nonempty_category[type + 1]) {
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ if (!node.is_null()) break;
+ }
+ }
+
+ // Updating cache
+ if (!node.is_null() && categories_[type] == nullptr) {
+ UpdateCacheAfterRemoval(type);
+ }
+
+#ifdef DEBUG
+ CheckCacheIntegrity();
+#endif
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// FreeListManyCachedOrigin implementation
+
+FreeSpace FreeListManyCachedOrigin::Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) {
+ if (origin == AllocationOrigin::kGC) {
+ return FreeListManyCached::Allocate(size_in_bytes, node_size, origin);
+ } else {
+ return FreeListManyCachedFastPath::Allocate(size_in_bytes, node_size,
+ origin);
+ }
+}
+
+// ------------------------------------------------
+// FreeListMap implementation
+
+FreeListMap::FreeListMap() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = 1;
+ last_category_ = kOnlyCategory;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
+
+ Reset();
+}
+
+size_t FreeListMap::GuaranteedAllocatable(size_t maximum_freed) {
+ return maximum_freed;
+}
+
+Page* FreeListMap::GetPageForSize(size_t size_in_bytes) {
+ return GetPageForCategoryType(kOnlyCategory);
+}
+
+FreeListMap::~FreeListMap() { delete[] categories_; }
+
+FreeSpace FreeListMap::Allocate(size_t size_in_bytes, size_t* node_size,
+ AllocationOrigin origin) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+
+ // The following DCHECK ensures that maps are allocated one by one (ie,
+ // without folding). This assumption currently holds. However, if it were to
+ // become untrue in the future, you'll get an error here. To fix it, I would
+ // suggest removing the DCHECK, and replacing TryFindNodeIn by
+ // SearchForNodeInList below.
+ DCHECK_EQ(size_in_bytes, Map::kSize);
+
+ FreeSpace node = TryFindNodeIn(kOnlyCategory, size_in_bytes, node_size);
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK_IMPLIES(node.is_null(), IsEmpty());
+ return node;
+}
+
+// ------------------------------------------------
// Generic FreeList methods (non alloc/free related)
void FreeList::Reset() {
ForAllFreeListCategories(
- [](FreeListCategory* category) { category->Reset(); });
+ [this](FreeListCategory* category) { category->Reset(this); });
for (int i = kFirstCategory; i < number_of_categories_; i++) {
categories_[i] = nullptr;
}
wasted_bytes_ = 0;
+ available_ = 0;
}
size_t FreeList::EvictFreeListItems(Page* page) {
size_t sum = 0;
page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
- DCHECK_EQ(this, category->owner());
sum += category->available();
RemoveCategory(category);
- category->Reset();
+ category->Reset(this);
});
return sum;
}
-bool FreeList::ContainsPageFreeListItems(Page* page) {
- bool contained = false;
- page->ForAllFreeListCategories(
- [this, &contained](FreeListCategory* category) {
- if (category->owner() == this && category->is_linked()) {
- contained = true;
- }
- });
- return contained;
-}
-
void FreeList::RepairLists(Heap* heap) {
ForAllFreeListCategories(
[heap](FreeListCategory* category) { category->RepairFreeList(heap); });
@@ -3255,7 +3555,7 @@ bool FreeList::AddCategory(FreeListCategory* category) {
FreeListCategory* top = categories_[type];
if (category->is_empty()) return false;
- if (top == category) return false;
+ DCHECK_NE(top, category);
// Common double-linked list insertion.
if (top != nullptr) {
@@ -3263,6 +3563,8 @@ bool FreeList::AddCategory(FreeListCategory* category) {
}
category->set_next(top);
categories_[type] = category;
+
+ IncreaseAvailableBytes(category->available());
return true;
}
@@ -3271,6 +3573,10 @@ void FreeList::RemoveCategory(FreeListCategory* category) {
DCHECK_LT(type, number_of_categories_);
FreeListCategory* top = categories_[type];
+ if (category->is_linked(this)) {
+ DecreaseAvailableBytes(category->available());
+ }
+
// Common double-linked list removal.
if (top == category) {
categories_[type] = category->next();
@@ -3312,13 +3618,25 @@ size_t FreeListCategory::SumFreeList() {
while (!cur.is_null()) {
// We can't use "cur->map()" here because both cur's map and the
// root can be null during bootstrapping.
- DCHECK(cur.map_slot().contains_value(
- page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap).ptr()));
+ DCHECK(cur.map_slot().contains_value(Page::FromHeapObject(cur)
+ ->heap()
+ ->isolate()
+ ->root(RootIndex::kFreeSpaceMap)
+ .ptr()));
sum += cur.relaxed_read_size();
cur = cur.next();
}
return sum;
}
+int FreeListCategory::FreeListLength() {
+ int length = 0;
+ FreeSpace cur = top();
+ while (!cur.is_null()) {
+ length++;
+ cur = cur.next();
+ }
+ return length;
+}
#ifdef DEBUG
bool FreeList::IsVeryLong() {
@@ -3364,7 +3682,8 @@ size_t PagedSpace::SizeOfObjects() {
return Size() - (limit() - top());
}
-bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
+bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes,
+ AllocationOrigin origin) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
// Wait for the sweeper threads here and complete the sweeping phase.
@@ -3372,38 +3691,43 @@ bool PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
// After waiting for the sweeper threads, there may be new free-list
// entries.
- return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
+ return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
}
return false;
}
-bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
+bool CompactionSpace::SweepAndRetryAllocation(int size_in_bytes,
+ AllocationOrigin origin) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
collector->sweeper()->ParallelSweepSpace(identity(), 0);
RefillFreeList();
- return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
+ return RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin);
}
return false;
}
-bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
+bool PagedSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
VMState<GC> state(heap()->isolate());
RuntimeCallTimerScope runtime_timer(
heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
- return RawSlowRefillLinearAllocationArea(size_in_bytes);
+ return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
}
-bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes) {
- return RawSlowRefillLinearAllocationArea(size_in_bytes);
+bool CompactionSpace::SlowRefillLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
+ return RawSlowRefillLinearAllocationArea(size_in_bytes, origin);
}
-bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
+bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
// Allocation in this space has failed.
DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
- if (RefillLinearAllocationAreaFromFreeList(size_in_bytes)) return true;
+ if (RefillLinearAllocationAreaFromFreeList(size_in_bytes, origin))
+ return true;
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
@@ -3419,16 +3743,24 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
// Retry the free list allocation.
if (RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes)))
+ static_cast<size_t>(size_in_bytes), origin))
return true;
+ // Cleanup invalidated old-to-new refs for compaction space in the
+ // final atomic pause.
+ Sweeper::FreeSpaceMayContainInvalidatedSlots
+ invalidated_slots_in_free_space =
+ is_local() ? Sweeper::FreeSpaceMayContainInvalidatedSlots::kYes
+ : Sweeper::FreeSpaceMayContainInvalidatedSlots::kNo;
+
// If sweeping is still in progress try to sweep pages.
int max_freed = collector->sweeper()->ParallelSweepSpace(
- identity(), size_in_bytes, kMaxPagesToSweep);
+ identity(), size_in_bytes, kMaxPagesToSweep,
+ invalidated_slots_in_free_space);
RefillFreeList();
if (max_freed >= size_in_bytes) {
if (RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes)))
+ static_cast<size_t>(size_in_bytes), origin))
return true;
}
}
@@ -3441,7 +3773,7 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
if (page != nullptr) {
AddPage(page);
if (RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes)))
+ static_cast<size_t>(size_in_bytes), origin))
return true;
}
}
@@ -3450,22 +3782,57 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
DCHECK((CountTotalPages() > 1) ||
(static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
- static_cast<size_t>(size_in_bytes));
+ static_cast<size_t>(size_in_bytes), origin);
}
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given allocation.
- return SweepAndRetryAllocation(size_in_bytes);
+ return SweepAndRetryAllocation(size_in_bytes, origin);
}
// -----------------------------------------------------------------------------
// MapSpace implementation
+// TODO(dmercadier): use a heap instead of sorting like that.
+// Using a heap will have multiple benefits:
+// - for now, SortFreeList is only called after sweeping, which is somewhat
+// late. Using a heap, sorting could be done online: FreeListCategories would
+// be inserted in a heap (ie, in a sorted manner).
+// - SortFreeList is a bit fragile: any change to FreeListMap (or to
+// MapSpace::free_list_) could break it.
+void MapSpace::SortFreeList() {
+ using LiveBytesPagePair = std::pair<size_t, Page*>;
+ std::vector<LiveBytesPagePair> pages;
+ pages.reserve(CountTotalPages());
+
+ for (Page* p : *this) {
+ free_list()->RemoveCategory(p->free_list_category(kFirstCategory));
+ pages.push_back(std::make_pair(p->allocated_bytes(), p));
+ }
+
+ // Sorting by least-allocated-bytes first.
+ std::sort(pages.begin(), pages.end(),
+ [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
+ return a.first < b.first;
+ });
+
+ for (LiveBytesPagePair const& p : pages) {
+ // Since AddCategory inserts in head position, it reverts the order produced
+ // by the sort above: least-allocated-bytes will be Added first, and will
+ // therefore be the last element (and the first one will be
+ // most-allocated-bytes).
+ free_list()->AddCategory(p.second->free_list_category(kFirstCategory));
+ }
+}
+
#ifdef VERIFY_HEAP
void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
#endif
+// -----------------------------------------------------------------------------
+// ReadOnlySpace implementation
+
ReadOnlySpace::ReadOnlySpace(Heap* heap)
: PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList()),
is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
diff --git a/chromium/v8/src/heap/spaces.h b/chromium/v8/src/heap/spaces.h
index 384c731f376..ebb6876cbe1 100644
--- a/chromium/v8/src/heap/spaces.h
+++ b/chromium/v8/src/heap/spaces.h
@@ -31,6 +31,7 @@
#include "src/tasks/cancelable-task.h"
#include "src/utils/allocation.h"
#include "src/utils/utils.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
namespace internal {
@@ -120,7 +121,7 @@ class Space;
#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
DCHECK((0 < size) && (size <= code_space->AreaSize()))
-using FreeListCategoryType = int;
+using FreeListCategoryType = int32_t;
static const FreeListCategoryType kFirstCategory = 0;
static const FreeListCategoryType kInvalidCategory = -1;
@@ -138,32 +139,23 @@ enum RememberedSetType {
// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
- FreeListCategory(FreeList* free_list, Page* page)
- : free_list_(free_list),
- page_(page),
- type_(kInvalidCategory),
- available_(0),
- length_(0),
- prev_(nullptr),
- next_(nullptr) {}
-
void Initialize(FreeListCategoryType type) {
type_ = type;
available_ = 0;
- length_ = 0;
prev_ = nullptr;
next_ = nullptr;
}
- void Reset();
+ void Reset(FreeList* owner);
void RepairFreeList(Heap* heap);
// Relinks the category into the currently owning free list. Requires that the
// category is currently unlinked.
- void Relink();
+ void Relink(FreeList* owner);
- void Free(Address address, size_t size_in_bytes, FreeMode mode);
+ void Free(Address address, size_t size_in_bytes, FreeMode mode,
+ FreeList* owner);
// Performs a single try to pick a node of at least |minimum_size| from the
// category. Stores the actual size in |node_size|. Returns nullptr if no
@@ -174,22 +166,22 @@ class FreeListCategory {
// actual size in |node_size|. Returns nullptr if no node is found.
FreeSpace SearchForNodeInList(size_t minimum_size, size_t* node_size);
- inline FreeList* owner();
- inline Page* page() const { return page_; }
- inline bool is_linked();
+ inline bool is_linked(FreeList* owner) const;
bool is_empty() { return top().is_null(); }
- size_t available() const { return available_; }
-
- void set_free_list(FreeList* free_list) { free_list_ = free_list; }
+ uint32_t available() const { return available_; }
size_t SumFreeList();
- int FreeListLength() { return length_; }
+ int FreeListLength();
private:
// For debug builds we accurately compute free lists lengths up until
// {kVeryLongFreeList} by manually walking the list.
static const int kVeryLongFreeList = 500;
+ // Updates |available_|, |length_| and free_list_->Available() after an
+ // allocation of size |allocation_size|.
+ inline void UpdateCountersAfterAllocation(size_t allocation_size);
+
FreeSpace top() { return top_; }
void set_top(FreeSpace top) { top_ = top; }
FreeListCategory* prev() { return prev_; }
@@ -197,32 +189,23 @@ class FreeListCategory {
FreeListCategory* next() { return next_; }
void set_next(FreeListCategory* next) { next_ = next; }
- // This FreeListCategory is owned by the given free_list_.
- FreeList* free_list_;
-
- // This FreeListCategory holds free list entries of the given page_.
- Page* const page_;
-
// |type_|: The type of this free list category.
- FreeListCategoryType type_;
+ FreeListCategoryType type_ = kInvalidCategory;
// |available_|: Total available bytes in all blocks of this free list
// category.
- size_t available_;
-
- // |length_|: Total blocks in this free list category.
- int length_;
+ uint32_t available_ = 0;
// |top_|: Points to the top FreeSpace in the free list category.
FreeSpace top_;
- FreeListCategory* prev_;
- FreeListCategory* next_;
+ FreeListCategory* prev_ = nullptr;
+ FreeListCategory* next_ = nullptr;
friend class FreeList;
+ friend class FreeListManyCached;
friend class PagedSpace;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
+ friend class MapSpace;
};
// A free list maintains free blocks of memory. The free list is organized in
@@ -256,22 +239,24 @@ class FreeList {
// size_in_bytes. This method returns null if the allocation request cannot be
// handled by the free list.
virtual V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size) = 0;
+ size_t* node_size,
+ AllocationOrigin origin) = 0;
// Returns a page containing an entry for a given type, or nullptr otherwise.
V8_EXPORT_PRIVATE virtual Page* GetPageForSize(size_t size_in_bytes) = 0;
- void Reset();
+ virtual void Reset();
// Return the number of bytes available on the free list.
size_t Available() {
- size_t available = 0;
- ForAllFreeListCategories([&available](FreeListCategory* category) {
- available += category->available();
- });
- return available;
+ DCHECK(available_ == SumFreeLists());
+ return available_;
}
+ // Update number of available bytes on the Freelists.
+ void IncreaseAvailableBytes(size_t bytes) { available_ += bytes; }
+ void DecreaseAvailableBytes(size_t bytes) { available_ -= bytes; }
+
bool IsEmpty() {
bool empty = true;
ForAllFreeListCategories([&empty](FreeListCategory* category) {
@@ -284,7 +269,6 @@ class FreeList {
void RepairLists(Heap* heap);
V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
- bool ContainsPageFreeListItems(Page* page);
int number_of_categories() { return number_of_categories_; }
FreeListCategoryType last_category() { return last_category_; }
@@ -308,15 +292,10 @@ class FreeList {
}
}
- bool AddCategory(FreeListCategory* category);
- V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
+ virtual bool AddCategory(FreeListCategory* category);
+ virtual V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
void PrintCategories(FreeListCategoryType type);
-#ifdef DEBUG
- size_t SumFreeLists();
- bool IsVeryLong();
-#endif
-
protected:
class FreeListCategoryIterator final {
public:
@@ -336,6 +315,11 @@ class FreeList {
FreeListCategory* current_;
};
+#ifdef DEBUG
+ V8_EXPORT_PRIVATE size_t SumFreeLists();
+ bool IsVeryLong();
+#endif
+
// Tries to retrieve a node from the first category in a given |type|.
// Returns nullptr if the category is empty or the top entry is smaller
// than minimum_size.
@@ -355,9 +339,7 @@ class FreeList {
return categories_[type];
}
- Page* GetPageForCategoryType(FreeListCategoryType type) {
- return top(type) ? top(type)->page() : nullptr;
- }
+ inline Page* GetPageForCategoryType(FreeListCategoryType type);
int number_of_categories_ = 0;
FreeListCategoryType last_category_ = 0;
@@ -366,10 +348,14 @@ class FreeList {
std::atomic<size_t> wasted_bytes_{0};
FreeListCategory** categories_ = nullptr;
+ // |available_|: The number of bytes in this freelist.
+ size_t available_ = 0;
+
friend class FreeListCategory;
friend class Page;
friend class MemoryChunk;
friend class ReadOnlyPage;
+ friend class MapSpace;
};
// FreeList used for spaces that don't have freelists
@@ -383,7 +369,8 @@ class NoFreeList final : public FreeList {
FATAL("NoFreeList can't be used as a standard FreeList.");
}
V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size) final {
+ size_t* node_size,
+ AllocationOrigin origin) final {
FATAL("NoFreeList can't be used as a standard FreeList.");
}
Page* GetPageForSize(size_t size_in_bytes) final {
@@ -412,11 +399,8 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
0;
- CheckOffsetsAreConsistent();
}
- void CheckOffsetsAreConsistent() const;
-
static inline void MoveExternalBackingStoreBytes(
ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
@@ -531,8 +515,6 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
// Tracks off-heap memory used by this space.
std::atomic<size_t>* external_backing_store_bytes_;
- static const intptr_t kIdOffset = 9 * kSystemPointerSize;
-
bool allocation_observers_paused_;
Heap* heap_;
AllocationSpace id_;
@@ -627,7 +609,8 @@ class MemoryChunk : public BasicMemoryChunk {
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
- + kSystemPointerSize // InvalidatedSlots* invalidated_slots_
+ + kSystemPointerSize *
+ NUMBER_OF_REMEMBERED_SET_TYPES // InvalidatedSlots* array
+ kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ kSystemPointerSize // base::Mutex* mutex_
+ kSystemPointerSize // std::atomic<ConcurrentSweepingState>
@@ -713,7 +696,7 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
bool ContainsSlots() {
return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
- invalidated_slots() != nullptr;
+ invalidated_slots<type>() != nullptr;
}
template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
@@ -741,15 +724,23 @@ class MemoryChunk : public BasicMemoryChunk {
template <RememberedSetType type>
void ReleaseTypedSlotSet();
+ template <RememberedSetType type>
InvalidatedSlots* AllocateInvalidatedSlots();
+ template <RememberedSetType type>
void ReleaseInvalidatedSlots();
+ template <RememberedSetType type>
V8_EXPORT_PRIVATE void RegisterObjectWithInvalidatedSlots(HeapObject object,
int size);
// Updates invalidated_slots after array left-trimming.
+ template <RememberedSetType type>
void MoveObjectWithInvalidatedSlots(HeapObject old_start,
HeapObject new_start);
+ template <RememberedSetType type>
bool RegisteredObjectWithInvalidatedSlots(HeapObject object);
- InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
+ template <RememberedSetType type>
+ InvalidatedSlots* invalidated_slots() {
+ return invalidated_slots_[type];
+ }
void ReleaseLocalTracker();
@@ -925,7 +916,7 @@ class MemoryChunk : public BasicMemoryChunk {
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
- InvalidatedSlots* invalidated_slots_;
+ InvalidatedSlots* invalidated_slots_[NUMBER_OF_REMEMBERED_SET_TYPES];
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
@@ -1811,28 +1802,14 @@ class V8_EXPORT_PRIVATE FreeListLegacy : public FreeList {
return maximum_freed;
}
- Page* GetPageForSize(size_t size_in_bytes) override {
- const int minimum_category =
- static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
- Page* page = GetPageForCategoryType(kHuge);
- if (!page && static_cast<int>(kLarge) >= minimum_category)
- page = GetPageForCategoryType(kLarge);
- if (!page && static_cast<int>(kMedium) >= minimum_category)
- page = GetPageForCategoryType(kMedium);
- if (!page && static_cast<int>(kSmall) >= minimum_category)
- page = GetPageForCategoryType(kSmall);
- if (!page && static_cast<int>(kTiny) >= minimum_category)
- page = GetPageForCategoryType(kTiny);
- if (!page && static_cast<int>(kTiniest) >= minimum_category)
- page = GetPageForCategoryType(kTiniest);
- return page;
- }
+ inline Page* GetPageForSize(size_t size_in_bytes) override;
FreeListLegacy();
~FreeListLegacy();
V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size) override;
+ size_t* node_size,
+ AllocationOrigin origin) override;
private:
enum { kTiniest, kTiny, kSmall, kMedium, kLarge, kHuge };
@@ -1909,22 +1886,14 @@ class V8_EXPORT_PRIVATE FreeListFastAlloc : public FreeList {
return kHugeAllocationMax;
}
- Page* GetPageForSize(size_t size_in_bytes) override {
- const int minimum_category =
- static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
- Page* page = GetPageForCategoryType(kHuge);
- if (!page && static_cast<int>(kLarge) >= minimum_category)
- page = GetPageForCategoryType(kLarge);
- if (!page && static_cast<int>(kMedium) >= minimum_category)
- page = GetPageForCategoryType(kMedium);
- return page;
- }
+ inline Page* GetPageForSize(size_t size_in_bytes) override;
FreeListFastAlloc();
~FreeListFastAlloc();
V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size) override;
+ size_t* node_size,
+ AllocationOrigin origin) override;
private:
enum { kMedium, kLarge, kHuge };
@@ -1951,14 +1920,10 @@ class V8_EXPORT_PRIVATE FreeListFastAlloc : public FreeList {
}
return kHuge;
}
-
- Page* GetPageForCategoryType(FreeListCategoryType type) {
- return top(type) ? top(type)->page() : nullptr;
- }
};
-// Use 49 Freelists: on per size between 24 and 256, and then a few ones for
-// larger sizes. See the variable |categories_max| for the size of each
+// Use 24 Freelists: on per 16 bytes between 24 and 256, and then a few ones for
+// larger sizes. See the variable |categories_min| for the size of each
// Freelist. Allocation is done using a best-fit strategy (considering only the
// first element of each category though).
// Performances are expected to be worst than FreeListLegacy, but memory
@@ -1973,41 +1938,214 @@ class V8_EXPORT_PRIVATE FreeListMany : public FreeList {
~FreeListMany();
V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size) override;
+ size_t* node_size,
+ AllocationOrigin origin) override;
- private:
+ protected:
static const size_t kMinBlockSize = 3 * kTaggedSize;
// This is a conservative upper bound. The actual maximum block size takes
// padding and alignment of data and code pages into account.
static const size_t kMaxBlockSize = Page::kPageSize;
+ // Largest size for which categories are still precise, and for which we can
+ // therefore compute the category in constant time.
+ static const size_t kPreciseCategoryMaxSize = 256;
// Categories boundaries generated with:
// perl -E '
- // @cat = map {$_*8} 3..32, 48, 64;
- // while ($cat[-1] <= 32768) {
- // push @cat, $cat[-1]+$cat[-3], $cat[-1]*2
- // }
- // push @cat, 4080, 4088;
- // @cat = sort { $a <=> $b } @cat;
- // push @cat, "Page::kPageSize";
- // say join ", ", @cat;
- // say "\n", scalar @cat'
- // Note the special case for 4080 and 4088 bytes: experiments have shown that
- // this category classes are more used than others of similar sizes
- static const int kNumberOfCategories = 49;
- static const size_t categories_max[kNumberOfCategories];
+ // @cat = (24, map {$_*16} 2..16, 48, 64);
+ // while ($cat[-1] <= 32768) {
+ // push @cat, $cat[-1]*2
+ // }
+ // say join ", ", @cat;
+ // say "\n", scalar @cat'
+ static const int kNumberOfCategories = 24;
+ static constexpr unsigned int categories_min[kNumberOfCategories] = {
+ 24, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192,
+ 208, 224, 240, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536};
// Return the smallest category that could hold |size_in_bytes| bytes.
FreeListCategoryType SelectFreeListCategoryType(
size_t size_in_bytes) override {
- for (int cat = kFirstCategory; cat < last_category_; cat++) {
- if (size_in_bytes <= categories_max[cat]) {
+ if (size_in_bytes <= kPreciseCategoryMaxSize) {
+ if (size_in_bytes < categories_min[1]) return 0;
+ return static_cast<FreeListCategoryType>(size_in_bytes >> 4) - 1;
+ }
+ for (int cat = (kPreciseCategoryMaxSize >> 4) - 1; cat < last_category_;
+ cat++) {
+ if (size_in_bytes < categories_min[cat + 1]) {
+ return cat;
+ }
+ }
+ return last_category_;
+ }
+
+ FRIEND_TEST(SpacesTest, FreeListManySelectFreeListCategoryType);
+ FRIEND_TEST(SpacesTest, FreeListManyGuaranteedAllocatable);
+};
+
+// Same as FreeListMany but uses a cache to know which categories are empty.
+// The cache (|next_nonempty_category|) is maintained in a way such that for
+// each category c, next_nonempty_category[c] contains the first non-empty
+// category greater or equal to c, that may hold an object of size c.
+// Allocation is done using the same strategy as FreeListMany (ie, best fit).
+class V8_EXPORT_PRIVATE FreeListManyCached : public FreeListMany {
+ public:
+ FreeListManyCached();
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+
+ size_t Free(Address start, size_t size_in_bytes, FreeMode mode) override;
+
+ void Reset() override;
+
+ bool AddCategory(FreeListCategory* category) override;
+ void RemoveCategory(FreeListCategory* category) override;
+
+ protected:
+ // Updates the cache after adding something in the category |cat|.
+ void UpdateCacheAfterAddition(FreeListCategoryType cat) {
+ for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] > cat;
+ i--) {
+ next_nonempty_category[i] = cat;
+ }
+ }
+
+ // Updates the cache after emptying category |cat|.
+ void UpdateCacheAfterRemoval(FreeListCategoryType cat) {
+ for (int i = cat; i >= kFirstCategory && next_nonempty_category[i] == cat;
+ i--) {
+ next_nonempty_category[i] = next_nonempty_category[cat + 1];
+ }
+ }
+
+#ifdef DEBUG
+ void CheckCacheIntegrity() {
+ for (int i = 0; i <= last_category_; i++) {
+ DCHECK(next_nonempty_category[i] == last_category_ + 1 ||
+ categories_[next_nonempty_category[i]] != nullptr);
+ for (int j = i; j < next_nonempty_category[i]; j++) {
+ DCHECK(categories_[j] == nullptr);
+ }
+ }
+ }
+#endif
+
+ // The cache is overallocated by one so that the last element is always
+ // defined, and when updating the cache, we can always use cache[i+1] as long
+ // as i is < kNumberOfCategories.
+ int next_nonempty_category[kNumberOfCategories + 1];
+
+ private:
+ void ResetCache() {
+ for (int i = 0; i < kNumberOfCategories; i++) {
+ next_nonempty_category[i] = kNumberOfCategories;
+ }
+ // Setting the after-last element as well, as explained in the cache's
+ // declaration.
+ next_nonempty_category[kNumberOfCategories] = kNumberOfCategories;
+ }
+};
+
+// Same as FreeListManyCached but uses a fast path.
+// The fast path overallocates by at least 1.85k bytes. The idea of this 1.85k
+// is: we want the fast path to always overallocate, even for larger
+// categories. Therefore, we have two choices: either overallocate by
+// "size_in_bytes * something" or overallocate by "size_in_bytes +
+// something". We choose the later, as the former will tend to overallocate too
+// much for larger objects. The 1.85k (= 2048 - 128) has been chosen such that
+// for tiny objects (size <= 128 bytes), the first category considered is the
+// 36th (which holds objects of 2k to 3k), while for larger objects, the first
+// category considered will be one that guarantees a 1.85k+ bytes
+// overallocation. Using 2k rather than 1.85k would have resulted in either a
+// more complex logic for SelectFastAllocationFreeListCategoryType, or the 36th
+// category (2k to 3k) not being used; both of which are undesirable.
+// A secondary fast path is used for tiny objects (size <= 128), in order to
+// consider categories from 256 to 2048 bytes for them.
+// Note that this class uses a precise GetPageForSize (inherited from
+// FreeListMany), which makes its fast path less fast in the Scavenger. This is
+// done on purpose, since this class's only purpose is to be used by
+// FreeListManyCachedOrigin, which is precise for the scavenger.
+class V8_EXPORT_PRIVATE FreeListManyCachedFastPath : public FreeListManyCached {
+ public:
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+
+ protected:
+ // Objects in the 18th category are at least 2048 bytes
+ static const FreeListCategoryType kFastPathFirstCategory = 18;
+ static const size_t kFastPathStart = 2048;
+ static const size_t kTinyObjectMaxSize = 128;
+ static const size_t kFastPathOffset = kFastPathStart - kTinyObjectMaxSize;
+ // Objects in the 15th category are at least 256 bytes
+ static const FreeListCategoryType kFastPathFallBackTiny = 15;
+
+ STATIC_ASSERT(categories_min[kFastPathFirstCategory] == kFastPathStart);
+ STATIC_ASSERT(categories_min[kFastPathFallBackTiny] ==
+ kTinyObjectMaxSize * 2);
+
+ FreeListCategoryType SelectFastAllocationFreeListCategoryType(
+ size_t size_in_bytes) {
+ DCHECK(size_in_bytes < kMaxBlockSize);
+
+ if (size_in_bytes >= categories_min[last_category_]) return last_category_;
+
+ size_in_bytes += kFastPathOffset;
+ for (int cat = kFastPathFirstCategory; cat < last_category_; cat++) {
+ if (size_in_bytes <= categories_min[cat]) {
return cat;
}
}
return last_category_;
}
+
+ FRIEND_TEST(
+ SpacesTest,
+ FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType);
+};
+
+// Uses FreeListManyCached if in the GC; FreeListManyCachedFastPath otherwise.
+// The reasonning behind this FreeList is the following: the GC runs in
+// parallel, and therefore, more expensive allocations there are less
+// noticeable. On the other hand, the generated code and runtime need to be very
+// fast. Therefore, the strategy for the former is one that is not very
+// efficient, but reduces fragmentation (FreeListManyCached), while the strategy
+// for the later is one that is very efficient, but introduces some
+// fragmentation (FreeListManyCachedFastPath).
+class V8_EXPORT_PRIVATE FreeListManyCachedOrigin
+ : public FreeListManyCachedFastPath {
+ public:
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+};
+
+// FreeList for maps: since maps are all the same size, uses a single freelist.
+class V8_EXPORT_PRIVATE FreeListMap : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) override;
+
+ Page* GetPageForSize(size_t size_in_bytes) override;
+
+ FreeListMap();
+ ~FreeListMap();
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size,
+ AllocationOrigin origin) override;
+
+ private:
+ static const size_t kMinBlockSize = Map::kSize;
+ static const size_t kMaxBlockSize = Page::kPageSize;
+ static const FreeListCategoryType kOnlyCategory = 0;
+
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
+ return kOnlyCategory;
+ }
};
// LocalAllocationBuffer represents a linear allocation area that is created
@@ -2108,6 +2246,10 @@ class SpaceWithLinearArea : public Space {
V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
size_t min_size) = 0;
+ V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
+
+ void PrintAllocationsOrigins();
+
protected:
// If we are doing inline allocation in steps, this method performs the 'step'
// operation. top is the memory address of the bump pointer at the last
@@ -2125,6 +2267,9 @@ class SpaceWithLinearArea : public Space {
// TODO(ofrobots): make these private after refactoring is complete.
LinearAllocationArea allocation_info_;
Address top_on_previous_step_;
+
+ size_t allocations_origins_[static_cast<int>(
+ AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
};
class V8_EXPORT_PRIVATE PagedSpace
@@ -2190,17 +2335,19 @@ class V8_EXPORT_PRIVATE PagedSpace
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
- int size_in_bytes);
+ int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
- int size_in_bytes, AllocationAlignment alignment);
+ int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
// Allocate the requested number of bytes in the space and consider allocation
// alignment if needed.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
- int size_in_bytes, AllocationAlignment alignment);
+ int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
if (size_in_bytes == 0) return 0;
@@ -2373,7 +2520,8 @@ class V8_EXPORT_PRIVATE PagedSpace
// Sets up a linear allocation area that fits the given number of bytes.
// Returns false if there is not enough space and the caller has to retry
// after collecting garbage.
- inline bool EnsureLinearAllocationArea(int size_in_bytes);
+ inline bool EnsureLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin);
// Allocates an object from the linear allocation area. Assumes that the
// linear allocation area is large enought to fit the object.
inline HeapObject AllocateLinearly(int size_in_bytes);
@@ -2385,24 +2533,25 @@ class V8_EXPORT_PRIVATE PagedSpace
AllocationAlignment alignment);
V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
- size_t size_in_bytes);
+ size_t size_in_bytes, AllocationOrigin origin);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and retry free-list
// allocation. Returns false if there is not enough space and the caller
// has to retry after collecting garbage.
- V8_WARN_UNUSED_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
+ V8_WARN_UNUSED_RESULT virtual bool SweepAndRetryAllocation(
+ int size_in_bytes, AllocationOrigin origin);
// Slow path of AllocateRaw. This function is space-dependent. Returns false
// if there is not enough space and the caller has to retry after
// collecting garbage.
V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
- int size_in_bytes);
+ int size_in_bytes, AllocationOrigin origin);
// Implementation of SlowAllocateRaw. Returns false if there is not enough
// space and the caller has to retry after collecting garbage.
V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
- int size_in_bytes);
+ int size_in_bytes, AllocationOrigin origin);
Executability executable_;
@@ -2773,16 +2922,19 @@ class V8_EXPORT_PRIVATE NewSpace
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
- AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment);
+ AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
- V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
- AllocateRawUnaligned(int size_in_bytes);
+ V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
+ int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
- AllocateRaw(int size_in_bytes, AllocationAlignment alignment);
+ AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
- int size_in_bytes, AllocationAlignment alignment);
+ int size_in_bytes, AllocationAlignment alignment,
+ AllocationOrigin origin = AllocationOrigin::kRuntime);
// Reset the allocation pointer to the beginning of the active semispace.
void ResetLinearAllocationArea();
@@ -2888,10 +3040,10 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
bool snapshotable() override { return false; }
V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(
- int size_in_bytes) override;
+ int size_in_bytes, AllocationOrigin origin) override;
V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
- int size_in_bytes) override;
+ int size_in_bytes, AllocationOrigin origin) override;
};
// A collection of |CompactionSpace|s used by a single compaction task.
@@ -2961,8 +3113,7 @@ class MapSpace : public PagedSpace {
public:
// Creates a map space object.
explicit MapSpace(Heap* heap)
- : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE,
- FreeList::CreateFreeList()) {}
+ : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, new FreeListMap()) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
@@ -2972,6 +3123,8 @@ class MapSpace : public PagedSpace {
}
}
+ void SortFreeList();
+
#ifdef VERIFY_HEAP
void VerifyObject(HeapObject obj) override;
#endif
@@ -3006,6 +3159,8 @@ class ReadOnlySpace : public PagedSpace {
// to write it into the free list nodes that were already created.
void RepairFreeListsAfterDeserialization();
+ size_t Available() override { return 0; }
+
private:
// Unseal the space after is has been sealed, by making it writable.
// TODO(v8:7464): Only possible if the space hasn't been detached.
diff --git a/chromium/v8/src/heap/store-buffer-inl.h b/chromium/v8/src/heap/store-buffer-inl.h
index 4609c83ca03..b43098bf57d 100644
--- a/chromium/v8/src/heap/store-buffer-inl.h
+++ b/chromium/v8/src/heap/store-buffer-inl.h
@@ -12,16 +12,6 @@
namespace v8 {
namespace internal {
-void StoreBuffer::InsertDeletionIntoStoreBuffer(Address start, Address end) {
- if (top_ + sizeof(Address) * 2 > limit_[current_]) {
- StoreBufferOverflow(heap_->isolate());
- }
- *top_ = MarkDeletionAddress(start);
- top_++;
- *top_ = end;
- top_++;
-}
-
void StoreBuffer::InsertIntoStoreBuffer(Address slot) {
if (top_ + sizeof(Address) > limit_[current_]) {
StoreBufferOverflow(heap_->isolate());
diff --git a/chromium/v8/src/heap/store-buffer.cc b/chromium/v8/src/heap/store-buffer.cc
index 7d0dcfc3707..349e7877409 100644
--- a/chromium/v8/src/heap/store-buffer.cc
+++ b/chromium/v8/src/heap/store-buffer.cc
@@ -28,7 +28,6 @@ StoreBuffer::StoreBuffer(Heap* heap)
}
task_running_ = false;
insertion_callback = &InsertDuringRuntime;
- deletion_callback = &DeleteDuringRuntime;
}
void StoreBuffer::SetUp() {
@@ -91,22 +90,11 @@ void StoreBuffer::TearDown() {
}
}
-void StoreBuffer::DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
- Address end) {
- DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
- store_buffer->InsertDeletionIntoStoreBuffer(start, end);
-}
-
void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
store_buffer->InsertIntoStoreBuffer(slot);
}
-void StoreBuffer::DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
- Address start, Address end) {
- UNREACHABLE();
-}
-
void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
Address slot) {
DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
@@ -117,10 +105,8 @@ void StoreBuffer::SetMode(StoreBufferMode mode) {
mode_ = mode;
if (mode == NOT_IN_GC) {
insertion_callback = &InsertDuringRuntime;
- deletion_callback = &DeleteDuringRuntime;
} else {
insertion_callback = &InsertDuringGarbageCollection;
- deletion_callback = &DeleteDuringGarbageCollection;
}
}
@@ -160,24 +146,9 @@ void StoreBuffer::MoveEntriesToRememberedSet(int index) {
MemoryChunk::BaseAddress(addr) != chunk->address()) {
chunk = MemoryChunk::FromAnyPointerAddress(addr);
}
- if (IsDeletionAddress(addr)) {
- last_inserted_addr = kNullAddress;
- current++;
- Address end = *current;
- DCHECK(!IsDeletionAddress(end));
- addr = UnmarkDeletionAddress(addr);
- if (end) {
- RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, addr, end,
- SlotSet::PREFREE_EMPTY_BUCKETS);
- } else {
- RememberedSet<OLD_TO_NEW>::Remove(chunk, addr);
- }
- } else {
- DCHECK(!IsDeletionAddress(addr));
- if (addr != last_inserted_addr) {
- RememberedSet<OLD_TO_NEW>::Insert(chunk, addr);
- last_inserted_addr = addr;
- }
+ if (addr != last_inserted_addr) {
+ RememberedSet<OLD_TO_NEW>::Insert(chunk, addr);
+ last_inserted_addr = addr;
}
}
lazy_top_[index] = nullptr;
diff --git a/chromium/v8/src/heap/store-buffer.h b/chromium/v8/src/heap/store-buffer.h
index 62b10b90714..025bb6a060b 100644
--- a/chromium/v8/src/heap/store-buffer.h
+++ b/chromium/v8/src/heap/store-buffer.h
@@ -33,17 +33,11 @@ class StoreBuffer {
Max(static_cast<int>(kMinExpectedOSPageSize / kStoreBuffers),
1 << (11 + kSystemPointerSizeLog2));
static const int kStoreBufferMask = kStoreBufferSize - 1;
- static const intptr_t kDeletionTag = 1;
V8_EXPORT_PRIVATE static int StoreBufferOverflow(Isolate* isolate);
- static void DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
- Address start, Address end);
static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
Address slot);
-
- static void DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
- Address end);
static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot);
explicit StoreBuffer(Heap* heap);
@@ -61,19 +55,6 @@ class StoreBuffer {
// the remembered set.
void MoveAllEntriesToRememberedSet();
- inline bool IsDeletionAddress(Address address) const {
- return address & kDeletionTag;
- }
-
- inline Address MarkDeletionAddress(Address address) {
- return address | kDeletionTag;
- }
-
- inline Address UnmarkDeletionAddress(Address address) {
- return address & ~kDeletionTag;
- }
-
- inline void InsertDeletionIntoStoreBuffer(Address start, Address end);
inline void InsertIntoStoreBuffer(Address slot);
void InsertEntry(Address slot) {
@@ -83,16 +64,6 @@ class StoreBuffer {
insertion_callback(this, slot);
}
- // If we only want to delete a single slot, end should be set to null which
- // will be written into the second field. When processing the store buffer
- // the more efficient Remove method will be called in this case.
- void DeleteEntry(Address start, Address end = kNullAddress) {
- // Deletions coming from the GC are directly deleted from the remembered
- // set. Deletions coming from the runtime are added to the store buffer
- // to allow concurrent processing.
- deletion_callback(this, start, end);
- }
-
void SetMode(StoreBufferMode mode);
// Used by the concurrent processing thread to transfer entries from the
@@ -174,7 +145,6 @@ class StoreBuffer {
// Callbacks are more efficient than reading out the gc state for every
// store buffer operation.
void (*insertion_callback)(StoreBuffer*, Address);
- void (*deletion_callback)(StoreBuffer*, Address, Address);
};
} // namespace internal
diff --git a/chromium/v8/src/heap/sweeper.cc b/chromium/v8/src/heap/sweeper.cc
index cbb7d717b07..c3c6b58835c 100644
--- a/chromium/v8/src/heap/sweeper.cc
+++ b/chromium/v8/src/heap/sweeper.cc
@@ -8,6 +8,7 @@
#include "src/execution/vm-state-inl.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/gc-tracer.h"
+#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/remembered-set.h"
#include "src/objects/objects-inl.h"
@@ -154,12 +155,21 @@ void Sweeper::StartSweeping() {
MajorNonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
- int space_index = GetSweepSpaceIndex(space);
- std::sort(
- sweeping_list_[space_index].begin(), sweeping_list_[space_index].end(),
- [marking_state](Page* a, Page* b) {
- return marking_state->live_bytes(a) > marking_state->live_bytes(b);
- });
+ // Sorting is done in order to make compaction more efficient: by sweeping
+ // pages with the most free bytes first, we make it more likely that when
+ // evacuating a page, already swept pages will have enough free bytes to
+ // hold the objects to move (and therefore, we won't need to wait for more
+ // pages to be swept in order to move those objects).
+ // Since maps don't move, there is no need to sort the pages from MAP_SPACE
+ // before sweeping them.
+ if (space != MAP_SPACE) {
+ int space_index = GetSweepSpaceIndex(space);
+ std::sort(
+ sweeping_list_[space_index].begin(),
+ sweeping_list_[space_index].end(), [marking_state](Page* a, Page* b) {
+ return marking_state->live_bytes(a) > marking_state->live_bytes(b);
+ });
+ }
});
}
@@ -241,8 +251,10 @@ void Sweeper::EnsureCompleted() {
bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
-int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
- FreeSpaceTreatmentMode free_space_mode) {
+int Sweeper::RawSweep(
+ Page* p, FreeListRebuildingMode free_list_mode,
+ FreeSpaceTreatmentMode free_space_mode,
+ FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
Space* space = p->owner();
DCHECK_NOT_NULL(space);
DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
@@ -265,6 +277,15 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
ArrayBufferTracker::FreeDead(p, marking_state_);
Address free_start = p->area_start();
+ InvalidatedSlotsCleanup old_to_new_cleanup =
+ InvalidatedSlotsCleanup::NoCleanup(p);
+
+ // Clean invalidated slots during the final atomic pause. After resuming
+ // execution this isn't necessary, invalid old-to-new refs were already
+ // removed by mark compact's update pointers phase.
+ if (invalidated_slots_in_free_space ==
+ FreeSpaceMayContainInvalidatedSlots::kYes)
+ old_to_new_cleanup = InvalidatedSlotsCleanup::OldToNew(p);
intptr_t live_bytes = 0;
intptr_t freed_bytes = 0;
@@ -309,6 +330,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
static_cast<uint32_t>(free_start - p->address()),
static_cast<uint32_t>(free_end - p->address())));
}
+
+ old_to_new_cleanup.Free(free_start, free_end);
}
Map map = object.synchronized_map();
int size = object.SizeFromMap(map);
@@ -341,6 +364,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
static_cast<uint32_t>(free_start - p->address()),
static_cast<uint32_t>(p->area_end() - p->address())));
}
+
+ old_to_new_cleanup.Free(free_start, p->area_end());
}
// Clear invalid typed slots after collection all free ranges.
@@ -390,13 +415,15 @@ bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
}
-int Sweeper::ParallelSweepSpace(AllocationSpace identity,
- int required_freed_bytes, int max_pages) {
+int Sweeper::ParallelSweepSpace(
+ AllocationSpace identity, int required_freed_bytes, int max_pages,
+ FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
int max_freed = 0;
int pages_freed = 0;
Page* page = nullptr;
while ((page = GetSweepingPageSafe(identity)) != nullptr) {
- int freed = ParallelSweepPage(page, identity);
+ int freed =
+ ParallelSweepPage(page, identity, invalidated_slots_in_free_space);
if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
// Free list of a never-allocate page will be dropped later on.
continue;
@@ -410,7 +437,9 @@ int Sweeper::ParallelSweepSpace(AllocationSpace identity,
return max_freed;
}
-int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
+int Sweeper::ParallelSweepPage(
+ Page* page, AllocationSpace identity,
+ FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space) {
// Early bailout for pages that are swept outside of the regular sweeping
// path. This check here avoids taking the lock first, avoiding deadlocks.
if (page->SweepingDone()) return 0;
@@ -430,7 +459,8 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
page->set_concurrent_sweeping_state(Page::kSweepingInProgress);
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
- max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
+ max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode,
+ invalidated_slots_in_free_space);
DCHECK(page->SweepingDone());
// After finishing sweeping of a page we clean up its remembered set.
@@ -479,11 +509,14 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
}
void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
+#ifdef DEBUG
DCHECK_GE(page->area_size(),
static_cast<size_t>(marking_state_->live_bytes(page)));
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
- page->ForAllFreeListCategories(
- [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
+ page->ForAllFreeListCategories([page](FreeListCategory* category) {
+ DCHECK(!category->is_linked(page->owner()->free_list()));
+ });
+#endif // DEBUG
page->set_concurrent_sweeping_state(Page::kSweepingPending);
heap_->paged_space(space)->IncreaseAllocatedBytes(
marking_state_->live_bytes(page), page);
@@ -586,7 +619,8 @@ void Sweeper::MakeIterable(Page* page) {
DCHECK(IsValidIterabilitySpace(page->owner_identity()));
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
- RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
+ RawSweep(page, IGNORE_FREE_LIST, free_space_mode,
+ FreeSpaceMayContainInvalidatedSlots::kNo);
}
} // namespace internal
diff --git a/chromium/v8/src/heap/sweeper.h b/chromium/v8/src/heap/sweeper.h
index 97de7a028d1..f6ecba8450c 100644
--- a/chromium/v8/src/heap/sweeper.h
+++ b/chromium/v8/src/heap/sweeper.h
@@ -70,12 +70,8 @@ class Sweeper {
};
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
- enum ClearOldToNewSlotsMode {
- DO_NOT_CLEAR,
- CLEAR_REGULAR_SLOTS,
- CLEAR_TYPED_SLOTS
- };
enum AddPageMode { REGULAR, READD_TEMPORARY_REMOVED_PAGE };
+ enum class FreeSpaceMayContainInvalidatedSlots { kYes, kNo };
Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state);
@@ -83,14 +79,21 @@ class Sweeper {
void AddPage(AllocationSpace space, Page* page, AddPageMode mode);
- int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
- int max_pages = 0);
- int ParallelSweepPage(Page* page, AllocationSpace identity);
+ int ParallelSweepSpace(
+ AllocationSpace identity, int required_freed_bytes, int max_pages = 0,
+ FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
+ FreeSpaceMayContainInvalidatedSlots::kNo);
+ int ParallelSweepPage(
+ Page* page, AllocationSpace identity,
+ FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space =
+ FreeSpaceMayContainInvalidatedSlots::kNo);
void ScheduleIncrementalSweepingTask();
- int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
- FreeSpaceTreatmentMode free_space_mode);
+ int RawSweep(
+ Page* p, FreeListRebuildingMode free_list_mode,
+ FreeSpaceTreatmentMode free_space_mode,
+ FreeSpaceMayContainInvalidatedSlots invalidated_slots_in_free_space);
// After calling this function sweeping is considered to be in progress
// and the main thread can sweep lazily, but the background sweeper tasks