summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/spaces-inl.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap/spaces-inl.h')
-rw-r--r--deps/v8/src/heap/spaces-inl.h121
1 files changed, 90 insertions, 31 deletions
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 3b4ed8d30a..2feb47bec1 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -172,8 +172,6 @@ bool PagedSpace::Contains(Object o) {
void PagedSpace::UnlinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
page->ForAllFreeListCategories([this](FreeListCategory* category) {
- DCHECK_EQ(free_list(), category->owner());
- category->set_free_list(nullptr);
free_list()->RemoveCategory(category);
});
}
@@ -182,9 +180,8 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
- category->set_free_list(free_list());
added += category->available();
- category->Relink();
+ category->Relink(free_list());
});
DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
@@ -315,10 +312,51 @@ MemoryChunk* OldGenerationMemoryChunkIterator::next() {
UNREACHABLE();
}
-FreeList* FreeListCategory::owner() { return free_list_; }
+bool FreeListCategory::is_linked(FreeList* owner) const {
+ return prev_ != nullptr || next_ != nullptr ||
+ owner->categories_[type_] == this;
+}
+
+void FreeListCategory::UpdateCountersAfterAllocation(size_t allocation_size) {
+ available_ -= allocation_size;
+}
+
+Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
+ FreeListCategory* category_top = top(type);
+ if (category_top != nullptr) {
+ DCHECK(!category_top->top().is_null());
+ return Page::FromHeapObject(category_top->top());
+ } else {
+ return nullptr;
+ }
+}
-bool FreeListCategory::is_linked() {
- return prev_ != nullptr || next_ != nullptr;
+Page* FreeListLegacy::GetPageForSize(size_t size_in_bytes) {
+ const int minimum_category =
+ static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+ Page* page = GetPageForCategoryType(kHuge);
+ if (!page && static_cast<int>(kLarge) >= minimum_category)
+ page = GetPageForCategoryType(kLarge);
+ if (!page && static_cast<int>(kMedium) >= minimum_category)
+ page = GetPageForCategoryType(kMedium);
+ if (!page && static_cast<int>(kSmall) >= minimum_category)
+ page = GetPageForCategoryType(kSmall);
+ if (!page && static_cast<int>(kTiny) >= minimum_category)
+ page = GetPageForCategoryType(kTiny);
+ if (!page && static_cast<int>(kTiniest) >= minimum_category)
+ page = GetPageForCategoryType(kTiniest);
+ return page;
+}
+
+Page* FreeListFastAlloc::GetPageForSize(size_t size_in_bytes) {
+ const int minimum_category =
+ static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+ Page* page = GetPageForCategoryType(kHuge);
+ if (!page && static_cast<int>(kLarge) >= minimum_category)
+ page = GetPageForCategoryType(kLarge);
+ if (!page && static_cast<int>(kMedium) >= minimum_category)
+ page = GetPageForCategoryType(kMedium);
+ return page;
}
AllocationResult LocalAllocationBuffer::AllocateRawAligned(
@@ -338,11 +376,12 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
return AllocationResult(HeapObject::FromAddress(current_top));
}
-bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
+bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes,
+ AllocationOrigin origin) {
if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
return true;
}
- return SlowRefillLinearAllocationArea(size_in_bytes);
+ return SlowRefillLinearAllocationArea(size_in_bytes, origin);
}
HeapObject PagedSpace::AllocateLinearly(int size_in_bytes) {
@@ -371,19 +410,26 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
return HeapObject::FromAddress(current_top);
}
-AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
+AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
+ AllocationOrigin origin) {
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
- if (!EnsureLinearAllocationArea(size_in_bytes)) {
+ if (!EnsureLinearAllocationArea(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
HeapObject object = AllocateLinearly(size_in_bytes);
DCHECK(!object.is_null());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
return object;
}
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
- AllocationAlignment alignment) {
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
int allocation_size = size_in_bytes;
@@ -393,7 +439,7 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
// allocated, so assume the worst case.
int filler_size = Heap::GetMaximumFillToAlign(alignment);
allocation_size += filler_size;
- if (!EnsureLinearAllocationArea(allocation_size)) {
+ if (!EnsureLinearAllocationArea(allocation_size, origin)) {
return AllocationResult::Retry(identity());
}
allocation_size = size_in_bytes;
@@ -401,12 +447,17 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
DCHECK(!object.is_null());
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
+
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
return object;
}
-
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
- AllocationAlignment alignment) {
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
if (top_on_previous_step_ && top() < top_on_previous_step_ &&
SupportsInlineAllocation()) {
// Generated code decreased the top() pointer to do folded allocations.
@@ -421,11 +472,12 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
DCHECK_IMPLIES(!SupportsInlineAllocation(), bytes_since_last == 0);
#ifdef V8_HOST_ARCH_32_BIT
- AllocationResult result = alignment != kWordAligned
- ? AllocateRawAligned(size_in_bytes, alignment)
- : AllocateRawUnaligned(size_in_bytes);
+ AllocationResult result =
+ alignment != kWordAligned
+ ? AllocateRawAligned(size_in_bytes, alignment, origin)
+ : AllocateRawUnaligned(size_in_bytes, origin);
#else
- AllocationResult result = AllocateRawUnaligned(size_in_bytes);
+ AllocationResult result = AllocateRawUnaligned(size_in_bytes, origin);
#endif
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
@@ -439,13 +491,12 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
return result;
}
-
// -----------------------------------------------------------------------------
// NewSpace
-
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
- AllocationAlignment alignment) {
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
Address top = allocation_info_.top();
int filler_size = Heap::GetFillToAlign(top, alignment);
int aligned_size_in_bytes = size_in_bytes + filler_size;
@@ -472,11 +523,15 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
return obj;
}
-
-AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
+AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
+ AllocationOrigin origin) {
Address top = allocation_info_.top();
if (allocation_info_.limit() < top + size_in_bytes) {
// See if we can create room.
@@ -493,12 +548,16 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
+ if (FLAG_trace_allocations_origins) {
+ UpdateAllocationOrigins(origin);
+ }
+
return obj;
}
-
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
- AllocationAlignment alignment) {
+ AllocationAlignment alignment,
+ AllocationOrigin origin) {
if (top() < top_on_previous_step_) {
// Generated code decreased the top() pointer to do folded allocations
DCHECK_EQ(Page::FromAllocationAreaAddress(top()),
@@ -507,8 +566,8 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
}
#ifdef V8_HOST_ARCH_32_BIT
return alignment != kWordAligned
- ? AllocateRawAligned(size_in_bytes, alignment)
- : AllocateRawUnaligned(size_in_bytes);
+ ? AllocateRawAligned(size_in_bytes, alignment, origin)
+ : AllocateRawUnaligned(size_in_bytes, origin);
#else
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
@@ -516,14 +575,14 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
// unaligned access since both x64 and arm64 architectures (where pointer
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
- return AllocateRawUnaligned(size_in_bytes);
+ return AllocateRawUnaligned(size_in_bytes, origin);
#endif
}
V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
- int size_in_bytes, AllocationAlignment alignment) {
+ int size_in_bytes, AllocationAlignment alignment, AllocationOrigin origin) {
base::MutexGuard guard(&mutex_);
- return AllocateRaw(size_in_bytes, alignment);
+ return AllocateRaw(size_in_bytes, alignment, origin);
}
LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,