summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/paged-spaces.cc
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2022-04-12 11:10:15 +0200
committerMichaël Zasso <targos@protonmail.com>2022-04-12 22:08:39 +0200
commitfd4f80ce54d7f7b7503e0999f6a9d293d493846d (patch)
tree00fba34b8aabeb481c7128fccee635719ee44a3b /deps/v8/src/heap/paged-spaces.cc
parent73d53fe9f56d7ce5de4b9c9ad5257dc601bbce14 (diff)
downloadnode-new-fd4f80ce54d7f7b7503e0999f6a9d293d493846d.tar.gz
deps: update V8 to 10.1.124.6
PR-URL: https://github.com/nodejs/node/pull/42657 Reviewed-By: Darshan Sen <raisinten@gmail.com> Reviewed-By: Richard Lau <rlau@redhat.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Michael Dawson <midawson@redhat.com>
Diffstat (limited to 'deps/v8/src/heap/paged-spaces.cc')
-rw-r--r--deps/v8/src/heap/paged-spaces.cc100
1 files changed, 81 insertions, 19 deletions
diff --git a/deps/v8/src/heap/paged-spaces.cc b/deps/v8/src/heap/paged-spaces.cc
index 0db2d5f989..c5604254be 100644
--- a/deps/v8/src/heap/paged-spaces.cc
+++ b/deps/v8/src/heap/paged-spaces.cc
@@ -4,6 +4,8 @@
#include "src/heap/paged-spaces.h"
+#include <atomic>
+
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/execution/isolate.h"
@@ -13,8 +15,10 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk-inl.h"
+#include "src/heap/memory-chunk-layout.h"
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-heap.h"
+#include "src/heap/safepoint.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/string.h"
#include "src/utils/utils.h"
@@ -103,7 +107,7 @@ void PagedSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front();
memory_chunk_list_.Remove(chunk);
- heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
+ heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, chunk);
}
accounting_stats_.Clear();
}
@@ -211,15 +215,42 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
}
size_t PagedSpace::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits()) return CommittedMemory();
+ if (!base::OS::HasLazyCommits()) {
+ DCHECK_EQ(0, committed_physical_memory());
+ return CommittedMemory();
+ }
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
- base::MutexGuard guard(mutex());
+ return committed_physical_memory();
+}
+
+void PagedSpace::IncrementCommittedPhysicalMemory(size_t increment_value) {
+ if (!base::OS::HasLazyCommits() || increment_value == 0) return;
+ size_t old_value = committed_physical_memory_.fetch_add(
+ increment_value, std::memory_order_relaxed);
+ USE(old_value);
+ DCHECK_LT(old_value, old_value + increment_value);
+}
+
+void PagedSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) {
+ if (!base::OS::HasLazyCommits() || decrement_value == 0) return;
+ size_t old_value = committed_physical_memory_.fetch_sub(
+ decrement_value, std::memory_order_relaxed);
+ USE(old_value);
+ DCHECK_GT(old_value, old_value - decrement_value);
+}
+
+#if DEBUG
+void PagedSpace::VerifyCommittedPhysicalMemory() {
+ heap()->safepoint()->AssertActive();
size_t size = 0;
for (Page* page : *this) {
+ DCHECK(page->SweepingDone());
size += page->CommittedPhysicalMemory();
}
- return size;
+ // Ensure that the space's counter matches the sum of all page counters.
+ DCHECK_EQ(size, CommittedPhysicalMemory());
}
+#endif // DEBUG
bool PagedSpace::ContainsSlow(Address addr) const {
Page* p = Page::FromAddress(addr);
@@ -264,6 +295,7 @@ size_t PagedSpace::AddPage(Page* page) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
+ IncrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
return RelinkFreeListCategories(page);
}
@@ -278,6 +310,7 @@ void PagedSpace::RemovePage(Page* page) {
ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i);
DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
}
+ DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
}
void PagedSpace::SetTopAndLimit(Address top, Address limit) {
@@ -319,8 +352,8 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
}
Page* PagedSpace::AllocatePage() {
- return heap()->memory_allocator()->AllocatePage(AreaSize(), this,
- executable());
+ return heap()->memory_allocator()->AllocatePage(
+ MemoryAllocator::kRegular, AreaSize(), this, executable());
}
Page* PagedSpace::Expand() {
@@ -334,7 +367,7 @@ Page* PagedSpace::Expand() {
}
base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
- LocalHeap* local_heap, size_t size_in_bytes) {
+ size_t size_in_bytes) {
Page* page = AllocatePage();
if (page == nullptr) return {};
base::MutexGuard lock(&space_mutex_);
@@ -346,6 +379,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
CHECK_LE(size_in_bytes, page->area_size());
Free(page->area_start() + size_in_bytes, page->area_size() - size_in_bytes,
SpaceAccountingMode::kSpaceAccounted);
+ AddRangeToActiveSystemPages(page, object_start, object_start + size_in_bytes);
return std::make_pair(object_start, size_in_bytes);
}
@@ -492,8 +526,9 @@ void PagedSpace::ReleasePage(Page* page) {
}
AccountUncommitted(page->size());
+ DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
accounting_stats_.DecreaseCapacity(page->area_size());
- heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
+ heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
}
void PagedSpace::SetReadable() {
@@ -573,6 +608,7 @@ bool PagedSpace::TryAllocationFromFreeListMain(size_t size_in_bytes,
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
+ AddRangeToActiveSystemPages(page, start, limit);
return true;
}
@@ -585,10 +621,11 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
identity() == MAP_SPACE);
DCHECK(origin == AllocationOrigin::kRuntime ||
origin == AllocationOrigin::kGC);
+ DCHECK_IMPLIES(!local_heap, origin == AllocationOrigin::kGC);
base::Optional<std::pair<Address, size_t>> result =
- TryAllocationFromFreeListBackground(local_heap, min_size_in_bytes,
- max_size_in_bytes, alignment, origin);
+ TryAllocationFromFreeListBackground(min_size_in_bytes, max_size_in_bytes,
+ alignment, origin);
if (result) return result;
MarkCompactCollector* collector = heap()->mark_compact_collector();
@@ -600,7 +637,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
// Retry the free list allocation.
result = TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
if (IsSweepingAllowedOnThread(local_heap)) {
@@ -619,8 +656,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
result = TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment,
- origin);
+ min_size_in_bytes, max_size_in_bytes, alignment, origin);
if (result) return result;
}
}
@@ -628,7 +664,7 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
if (heap()->ShouldExpandOldGenerationOnSlowAllocation(local_heap) &&
heap()->CanExpandOldGenerationBackground(local_heap, AreaSize())) {
- result = ExpandBackground(local_heap, max_size_in_bytes);
+ result = ExpandBackground(max_size_in_bytes);
if (result) {
DCHECK_EQ(Heap::GetFillToAlign(result->first, alignment), 0);
return result;
@@ -645,15 +681,14 @@ base::Optional<std::pair<Address, size_t>> PagedSpace::RawRefillLabBackground(
// Last try to acquire memory from free list.
return TryAllocationFromFreeListBackground(
- local_heap, min_size_in_bytes, max_size_in_bytes, alignment, origin);
+ min_size_in_bytes, max_size_in_bytes, alignment, origin);
}
return {};
}
base::Optional<std::pair<Address, size_t>>
-PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
- size_t min_size_in_bytes,
+PagedSpace::TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
size_t max_size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
@@ -694,13 +729,15 @@ PagedSpace::TryAllocationFromFreeListBackground(LocalHeap* local_heap,
}
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
+ AddRangeToActiveSystemPages(page, start, limit);
return std::make_pair(start, used_size_in_bytes);
}
bool PagedSpace::IsSweepingAllowedOnThread(LocalHeap* local_heap) {
// Code space sweeping is only allowed on main thread.
- return local_heap->is_main_thread() || identity() != CODE_SPACE;
+ return (local_heap && local_heap->is_main_thread()) ||
+ identity() != CODE_SPACE;
}
#ifdef DEBUG
@@ -742,7 +779,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
Map map = object.map(cage_base);
CHECK(map.IsMap(cage_base));
CHECK(ReadOnlyHeap::Contains(map) ||
- isolate->heap()->map_space()->Contains(map));
+ isolate->heap()->space_for_maps()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@@ -772,6 +809,9 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
external_space_bytes[t] += external_page_bytes[t];
}
+
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
+ CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
}
for (int i = 0; i < kNumTypes; i++) {
if (i == ExternalBackingStoreType::kArrayBuffer) continue;
@@ -1003,6 +1043,28 @@ AllocationResult PagedSpace::AllocateRawSlow(int size_in_bytes,
return result;
}
+void PagedSpace::AddRangeToActiveSystemPages(Page* page, Address start,
+ Address end) {
+ DCHECK_LE(page->address(), start);
+ DCHECK_LT(start, end);
+ DCHECK_LE(end, page->address() + Page::kPageSize);
+
+ const size_t added_pages = page->active_system_pages()->Add(
+ start - page->address(), end - page->address(),
+ MemoryAllocator::GetCommitPageSizeBits());
+
+ IncrementCommittedPhysicalMemory(added_pages *
+ MemoryAllocator::GetCommitPageSize());
+}
+
+void PagedSpace::ReduceActiveSystemPages(
+ Page* page, ActiveSystemPages active_system_pages) {
+ const size_t reduced_pages =
+ page->active_system_pages()->Reduce(active_system_pages);
+ DecrementCommittedPhysicalMemory(reduced_pages *
+ MemoryAllocator::GetCommitPageSize());
+}
+
// -----------------------------------------------------------------------------
// MapSpace implementation