summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/concurrent-allocator.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/concurrent-allocator.cc')
-rw-r--r--deps/v8/src/heap/concurrent-allocator.cc160
1 files changed, 129 insertions, 31 deletions
diff --git a/deps/v8/src/heap/concurrent-allocator.cc b/deps/v8/src/heap/concurrent-allocator.cc
index 4b02b14170..b4494d9a63 100644
--- a/deps/v8/src/heap/concurrent-allocator.cc
+++ b/deps/v8/src/heap/concurrent-allocator.cc
@@ -8,7 +8,9 @@
#include "src/execution/isolate.h"
#include "src/handles/persistent-handles.h"
#include "src/heap/concurrent-allocator-inl.h"
+#include "src/heap/gc-tracer-inl.h"
#include "src/heap/heap.h"
+#include "src/heap/linear-allocation-area.h"
#include "src/heap/local-heap-inl.h"
#include "src/heap/local-heap.h"
#include "src/heap/marking.h"
@@ -41,7 +43,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
heap->CreateFillerObjectAtBackground(result.ToAddress(),
kSmallObjectSize);
} else {
- local_heap.TryPerformCollection();
+ heap->CollectGarbageFromAnyThread(&local_heap);
}
result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld,
@@ -51,7 +53,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
heap->CreateFillerObjectAtBackground(result.ToAddress(),
kMediumObjectSize);
} else {
- local_heap.TryPerformCollection();
+ heap->CollectGarbageFromAnyThread(&local_heap);
}
result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
@@ -61,7 +63,7 @@ void StressConcurrentAllocatorTask::RunInternal() {
heap->CreateFillerObjectAtBackground(result.ToAddress(),
kLargeObjectSize);
} else {
- local_heap.TryPerformCollection();
+ heap->CollectGarbageFromAnyThread(&local_heap);
}
local_heap.Safepoint();
}
@@ -77,24 +79,35 @@ void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) {
kDelayInSeconds);
}
+ConcurrentAllocator::ConcurrentAllocator(LocalHeap* local_heap,
+ PagedSpace* space)
+ : local_heap_(local_heap), space_(space), owning_heap_(space_->heap()) {}
+
void ConcurrentAllocator::FreeLinearAllocationArea() {
// The code page of the linear allocation area needs to be unprotected
// because we are going to write a filler into that memory area below.
base::Optional<CodePageMemoryModificationScope> optional_scope;
- if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
+ if (IsLabValid() && space_->identity() == CODE_SPACE) {
optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
}
- lab_.CloseAndMakeIterable();
+ if (lab_.top() != lab_.limit() &&
+ owning_heap()->incremental_marking()->black_allocation()) {
+ Page::FromAddress(lab_.top())
+ ->DestroyBlackAreaBackground(lab_.top(), lab_.limit());
+ }
+
+ MakeLabIterable();
+ ResetLab();
}
void ConcurrentAllocator::MakeLinearAllocationAreaIterable() {
// The code page of the linear allocation area needs to be unprotected
// because we are going to write a filler into that memory area below.
base::Optional<CodePageMemoryModificationScope> optional_scope;
- if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
+ if (IsLabValid() && space_->identity() == CODE_SPACE) {
optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
}
- lab_.MakeIterable();
+ MakeLabIterable();
}
void ConcurrentAllocator::MarkLinearAllocationAreaBlack() {
@@ -128,54 +141,134 @@ void ConcurrentAllocator::UnmarkLinearAllocationArea() {
AllocationResult ConcurrentAllocator::AllocateInLabSlow(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
- if (!EnsureLab(origin)) {
+ if (!AllocateLab(origin)) {
return AllocationResult::Failure();
}
AllocationResult allocation =
- lab_.AllocateRawAligned(size_in_bytes, alignment);
+ AllocateInLabFastAligned(size_in_bytes, alignment);
DCHECK(!allocation.IsFailure());
return allocation;
}
-bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
- auto result = space_->RawAllocateBackground(local_heap_, kMinLabSize,
- kMaxLabSize, origin);
+bool ConcurrentAllocator::AllocateLab(AllocationOrigin origin) {
+ auto result = AllocateFromSpaceFreeList(kMinLabSize, kMaxLabSize, origin);
if (!result) return false;
+ owning_heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
+
+ FreeLinearAllocationArea();
+
+ Address lab_start = result->first;
+ Address lab_end = lab_start + result->second;
+ lab_ = LinearAllocationArea(lab_start, lab_end);
+ DCHECK(IsLabValid());
+
if (IsBlackAllocationEnabled()) {
- Address top = result->first;
- Address limit = top + result->second;
+ Address top = lab_.top();
+ Address limit = lab_.limit();
Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
}
- HeapObject object = HeapObject::FromAddress(result->first);
- LocalAllocationBuffer saved_lab = std::move(lab_);
- lab_ = LocalAllocationBuffer::FromResult(
- space_->heap(), AllocationResult::FromObject(object), result->second);
- DCHECK(lab_.IsValid());
- if (!lab_.TryMerge(&saved_lab)) {
- saved_lab.CloseAndMakeIterable();
- }
return true;
}
+base::Optional<std::pair<Address, size_t>>
+ConcurrentAllocator::AllocateFromSpaceFreeList(size_t min_size_in_bytes,
+ size_t max_size_in_bytes,
+ AllocationOrigin origin) {
+ DCHECK(!space_->is_compaction_space());
+ DCHECK(space_->identity() == OLD_SPACE || space_->identity() == CODE_SPACE ||
+ space_->identity() == MAP_SPACE || space_->identity() == SHARED_SPACE);
+ DCHECK(origin == AllocationOrigin::kRuntime ||
+ origin == AllocationOrigin::kGC);
+ DCHECK_IMPLIES(!local_heap_, origin == AllocationOrigin::kGC);
+
+ base::Optional<std::pair<Address, size_t>> result =
+ space_->TryAllocationFromFreeListBackground(min_size_in_bytes,
+ max_size_in_bytes, origin);
+ if (result) return result;
+
+ // Sweeping is still in progress.
+ if (owning_heap()->sweeping_in_progress()) {
+ // First try to refill the free-list, concurrent sweeper threads
+ // may have freed some objects in the meantime.
+ {
+ TRACE_GC_EPOCH(owning_heap()->tracer(),
+ GCTracer::Scope::MC_BACKGROUND_SWEEPING,
+ ThreadKind::kBackground);
+ space_->RefillFreeList();
+ }
+
+ // Retry the free list allocation.
+ result = space_->TryAllocationFromFreeListBackground(
+ min_size_in_bytes, max_size_in_bytes, origin);
+ if (result) return result;
+
+ // Now contribute to sweeping from background thread and then try to
+ // reallocate.
+ int max_freed;
+ {
+ TRACE_GC_EPOCH(owning_heap()->tracer(),
+ GCTracer::Scope::MC_BACKGROUND_SWEEPING,
+ ThreadKind::kBackground);
+ const int kMaxPagesToSweep = 1;
+ max_freed = owning_heap()->sweeper()->ParallelSweepSpace(
+ space_->identity(), Sweeper::SweepingMode::kLazyOrConcurrent,
+ static_cast<int>(min_size_in_bytes), kMaxPagesToSweep);
+ space_->RefillFreeList();
+ }
+
+ if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
+ result = space_->TryAllocationFromFreeListBackground(
+ min_size_in_bytes, max_size_in_bytes, origin);
+ if (result) return result;
+ }
+ }
+
+ if (owning_heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap_) &&
+ owning_heap()->CanExpandOldGenerationBackground(local_heap_,
+ space_->AreaSize())) {
+ result = space_->TryExpandBackground(max_size_in_bytes);
+ if (result) return result;
+ }
+
+ if (owning_heap()->sweeping_in_progress()) {
+ // Complete sweeping for this space.
+ TRACE_GC_EPOCH(owning_heap()->tracer(),
+ GCTracer::Scope::MC_BACKGROUND_SWEEPING,
+ ThreadKind::kBackground);
+ owning_heap()->DrainSweepingWorklistForSpace(space_->identity());
+
+ space_->RefillFreeList();
+
+ // Last try to acquire memory from free list.
+ return space_->TryAllocationFromFreeListBackground(
+ min_size_in_bytes, max_size_in_bytes, origin);
+ }
+
+ return {};
+}
+
AllocationResult ConcurrentAllocator::AllocateOutsideLab(
int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
// Conservative estimate as we don't know the alignment of the allocation.
const int requested_filler_size = Heap::GetMaximumFillToAlign(alignment);
const int aligned_size_in_bytes = size_in_bytes + requested_filler_size;
- auto result = space_->RawAllocateBackground(
- local_heap_, aligned_size_in_bytes, aligned_size_in_bytes, origin);
+ auto result = AllocateFromSpaceFreeList(aligned_size_in_bytes,
+ aligned_size_in_bytes, origin);
if (!result) return AllocationResult::Failure();
+
+ owning_heap()->StartIncrementalMarkingIfAllocationLimitIsReachedBackground();
+
DCHECK_GE(result->second, aligned_size_in_bytes);
- HeapObject object =
- (requested_filler_size)
- ? owning_heap()->AlignWithFiller(
- HeapObject::FromAddress(result->first), size_in_bytes,
- static_cast<int>(result->second), alignment)
- : HeapObject::FromAddress(result->first);
+ HeapObject object = HeapObject::FromAddress(result->first);
+ if (requested_filler_size > 0) {
+ object = owning_heap()->AlignWithFiller(
+ object, size_in_bytes, static_cast<int>(result->second), alignment);
+ }
+
if (IsBlackAllocationEnabled()) {
owning_heap()->incremental_marking()->MarkBlackBackground(object,
size_in_bytes);
@@ -187,7 +280,12 @@ bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
return owning_heap()->incremental_marking()->black_allocation();
}
-Heap* ConcurrentAllocator::owning_heap() const { return space_->heap(); }
+void ConcurrentAllocator::MakeLabIterable() {
+ if (IsLabValid()) {
+ owning_heap()->CreateFillerObjectAtBackground(
+ lab_.top(), static_cast<int>(lab_.limit() - lab_.top()));
+ }
+}
} // namespace internal
} // namespace v8