summaryrefslogtreecommitdiff
path: root/chromium/v8/src/heap
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2019-08-30 10:22:43 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2019-08-30 12:36:28 +0000
commit271a6c3487a14599023a9106329505597638d793 (patch)
treee040d58ffc86c1480b79ca8528020ca9ec919bf8 /chromium/v8/src/heap
parent7b2ffa587235a47d4094787d72f38102089f402a (diff)
downloadqtwebengine-chromium-271a6c3487a14599023a9106329505597638d793.tar.gz
BASELINE: Update Chromium to 77.0.3865.59
Change-Id: I1e89a5f3b009a9519a6705102ad65c92fe736f21 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/v8/src/heap')
-rw-r--r--chromium/v8/src/heap/OWNERS2
-rw-r--r--chromium/v8/src/heap/array-buffer-tracker-inl.h2
-rw-r--r--chromium/v8/src/heap/array-buffer-tracker.h2
-rw-r--r--chromium/v8/src/heap/basic-memory-chunk.cc54
-rw-r--r--chromium/v8/src/heap/basic-memory-chunk.h229
-rw-r--r--chromium/v8/src/heap/code-stats.cc6
-rw-r--r--chromium/v8/src/heap/combined-heap.cc10
-rw-r--r--chromium/v8/src/heap/combined-heap.h20
-rw-r--r--chromium/v8/src/heap/concurrent-marking.cc30
-rw-r--r--chromium/v8/src/heap/embedder-tracing.cc6
-rw-r--r--chromium/v8/src/heap/embedder-tracing.h21
-rw-r--r--chromium/v8/src/heap/factory-inl.h9
-rw-r--r--chromium/v8/src/heap/factory.cc280
-rw-r--r--chromium/v8/src/heap/factory.h48
-rw-r--r--chromium/v8/src/heap/gc-tracer.cc29
-rw-r--r--chromium/v8/src/heap/gc-tracer.h2
-rw-r--r--chromium/v8/src/heap/heap-controller.cc21
-rw-r--r--chromium/v8/src/heap/heap-controller.h7
-rw-r--r--chromium/v8/src/heap/heap-inl.h17
-rw-r--r--chromium/v8/src/heap/heap-write-barrier-inl.h48
-rw-r--r--chromium/v8/src/heap/heap-write-barrier.h2
-rw-r--r--chromium/v8/src/heap/heap.cc649
-rw-r--r--chromium/v8/src/heap/heap.h214
-rw-r--r--chromium/v8/src/heap/incremental-marking.cc39
-rw-r--r--chromium/v8/src/heap/incremental-marking.h12
-rw-r--r--chromium/v8/src/heap/item-parallel-job.cc7
-rw-r--r--chromium/v8/src/heap/item-parallel-job.h6
-rw-r--r--chromium/v8/src/heap/mark-compact.cc194
-rw-r--r--chromium/v8/src/heap/object-stats.cc2
-rw-r--r--chromium/v8/src/heap/objects-visiting-inl.h28
-rw-r--r--chromium/v8/src/heap/objects-visiting.h5
-rw-r--r--chromium/v8/src/heap/read-only-heap-inl.h31
-rw-r--r--chromium/v8/src/heap/read-only-heap.cc85
-rw-r--r--chromium/v8/src/heap/read-only-heap.h23
-rw-r--r--chromium/v8/src/heap/remembered-set.h4
-rw-r--r--chromium/v8/src/heap/scavenger-inl.h24
-rw-r--r--chromium/v8/src/heap/scavenger.cc22
-rw-r--r--chromium/v8/src/heap/setup-heap-internal.cc16
-rw-r--r--chromium/v8/src/heap/spaces-inl.h36
-rw-r--r--chromium/v8/src/heap/spaces.cc574
-rw-r--r--chromium/v8/src/heap/spaces.h1103
-rw-r--r--chromium/v8/src/heap/store-buffer.cc11
-rw-r--r--chromium/v8/src/heap/stress-marking-observer.cc8
-rw-r--r--chromium/v8/src/heap/stress-marking-observer.h4
-rw-r--r--chromium/v8/src/heap/stress-scavenge-observer.cc24
-rw-r--r--chromium/v8/src/heap/stress-scavenge-observer.h4
-rw-r--r--chromium/v8/src/heap/sweeper.cc12
47 files changed, 2416 insertions, 1566 deletions
diff --git a/chromium/v8/src/heap/OWNERS b/chromium/v8/src/heap/OWNERS
index 79eea3aaaba..d826296e0c9 100644
--- a/chromium/v8/src/heap/OWNERS
+++ b/chromium/v8/src/heap/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
hpayer@chromium.org
mlippautz@chromium.org
mstarzinger@chromium.org
diff --git a/chromium/v8/src/heap/array-buffer-tracker-inl.h b/chromium/v8/src/heap/array-buffer-tracker-inl.h
index 61b5ba1f8cc..65d3f4a732a 100644
--- a/chromium/v8/src/heap/array-buffer-tracker-inl.h
+++ b/chromium/v8/src/heap/array-buffer-tracker-inl.h
@@ -57,8 +57,6 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer buffer) {
heap->update_external_memory(-static_cast<intptr_t>(length));
}
-Space* LocalArrayBufferTracker::space() { return page_->owner(); }
-
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
diff --git a/chromium/v8/src/heap/array-buffer-tracker.h b/chromium/v8/src/heap/array-buffer-tracker.h
index e8ca57b5430..b7950c25069 100644
--- a/chromium/v8/src/heap/array-buffer-tracker.h
+++ b/chromium/v8/src/heap/array-buffer-tracker.h
@@ -117,8 +117,6 @@ class LocalArrayBufferTracker {
// logic for updating external memory counters.
inline void AddInternal(JSArrayBuffer buffer, size_t length);
- inline Space* space();
-
Page* page_;
// The set contains raw heap pointers which are removed by the GC upon
// processing the tracker through its owning page.
diff --git a/chromium/v8/src/heap/basic-memory-chunk.cc b/chromium/v8/src/heap/basic-memory-chunk.cc
new file mode 100644
index 00000000000..307f0ec973f
--- /dev/null
+++ b/chromium/v8/src/heap/basic-memory-chunk.cc
@@ -0,0 +1,54 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/basic-memory-chunk.h"
+
+#include <cstdlib>
+
+#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/slots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Verify write barrier offsets match the the real offsets.
+STATIC_ASSERT(BasicMemoryChunk::Flag::INCREMENTAL_MARKING ==
+ heap_internals::MemoryChunk::kMarkingBit);
+STATIC_ASSERT(BasicMemoryChunk::Flag::FROM_PAGE ==
+ heap_internals::MemoryChunk::kFromPageBit);
+STATIC_ASSERT(BasicMemoryChunk::Flag::TO_PAGE ==
+ heap_internals::MemoryChunk::kToPageBit);
+STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
+ heap_internals::MemoryChunk::kFlagsOffset);
+STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
+ heap_internals::MemoryChunk::kHeapOffset);
+
+BasicMemoryChunk::BasicMemoryChunk(size_t size, Address area_start,
+ Address area_end) {
+ const Address base = reinterpret_cast<Address>(this);
+ size_ = size;
+ marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
+ header_sentinel_ = HeapObject::FromAddress(base).ptr();
+ DCHECK(HasHeaderSentinel(area_start));
+ area_start_ = area_start;
+ area_end_ = area_end;
+}
+
+// static
+bool BasicMemoryChunk::HasHeaderSentinel(Address slot_addr) {
+ Address base = BaseAddress(slot_addr);
+ if (slot_addr < base + kHeaderSize) return false;
+ return HeapObject::FromAddress(base) ==
+ ObjectSlot(base + kHeaderSentinelOffset).Relaxed_Load();
+}
+
+void BasicMemoryChunk::ReleaseMarkingBitmap() {
+ DCHECK_NOT_NULL(marking_bitmap_);
+ free(marking_bitmap_);
+ marking_bitmap_ = nullptr;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/basic-memory-chunk.h b/chromium/v8/src/heap/basic-memory-chunk.h
new file mode 100644
index 00000000000..65fc072bd24
--- /dev/null
+++ b/chromium/v8/src/heap/basic-memory-chunk.h
@@ -0,0 +1,229 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_BASIC_MEMORY_CHUNK_H_
+#define V8_HEAP_BASIC_MEMORY_CHUNK_H_
+
+#include <type_traits>
+
+#include "src/base/atomic-utils.h"
+#include "src/common/globals.h"
+#include "src/heap/marking.h"
+
+namespace v8 {
+namespace internal {
+
+class MemoryChunk;
+
+class BasicMemoryChunk {
+ public:
+ enum Flag {
+ NO_FLAGS = 0u,
+ IS_EXECUTABLE = 1u << 0,
+ POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
+ POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
+ // A page in the from-space or a young large page that was not scavenged
+ // yet.
+ FROM_PAGE = 1u << 3,
+ // A page in the to-space or a young large page that was scavenged.
+ TO_PAGE = 1u << 4,
+ LARGE_PAGE = 1u << 5,
+ EVACUATION_CANDIDATE = 1u << 6,
+ NEVER_EVACUATE = 1u << 7,
+
+ // Large objects can have a progress bar in their page header. These object
+ // are scanned in increments and will be kept black while being scanned.
+ // Even if the mutator writes to them they will be kept black and a white
+ // to grey transition is performed in the value.
+ HAS_PROGRESS_BAR = 1u << 8,
+
+ // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
+ // from new to old space during evacuation.
+ PAGE_NEW_OLD_PROMOTION = 1u << 9,
+
+ // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
+ // within the new space during evacuation.
+ PAGE_NEW_NEW_PROMOTION = 1u << 10,
+
+ // This flag is intended to be used for testing. Works only when both
+ // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
+ // are set. It forces the page to become an evacuation candidate at next
+ // candidates selection cycle.
+ FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
+
+ // This flag is intended to be used for testing.
+ NEVER_ALLOCATE_ON_PAGE = 1u << 12,
+
+ // The memory chunk is already logically freed, however the actual freeing
+ // still has to be performed.
+ PRE_FREED = 1u << 13,
+
+ // |POOLED|: When actually freeing this chunk, only uncommit and do not
+ // give up the reservation as we still reuse the chunk at some point.
+ POOLED = 1u << 14,
+
+ // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
+ // has been aborted and needs special handling by the sweeper.
+ COMPACTION_WAS_ABORTED = 1u << 15,
+
+ // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
+ // on pages is sometimes aborted. The flag is used to avoid repeatedly
+ // triggering on the same page.
+ COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
+
+ // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
+ // to iterate the page.
+ SWEEP_TO_ITERATE = 1u << 17,
+
+ // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
+ // enabled.
+ INCREMENTAL_MARKING = 1u << 18,
+ NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
+
+ // The memory chunk freeing bookkeeping has been performed but the chunk has
+ // not yet been freed.
+ UNREGISTERED = 1u << 20,
+
+ // The memory chunk belongs to the read-only heap and does not participate
+ // in garbage collection. This is used instead of owner for identity
+ // checking since read-only chunks have no owner once they are detached.
+ READ_ONLY_HEAP = 1u << 21,
+ };
+
+ static const intptr_t kAlignment =
+ (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+ static const intptr_t kAlignmentMask = kAlignment - 1;
+
+ BasicMemoryChunk(size_t size, Address area_start, Address area_end);
+
+ static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
+
+ Address address() const { return reinterpret_cast<Address>(this); }
+
+ size_t size() const { return size_; }
+ void set_size(size_t size) { size_ = size; }
+
+ Address area_start() const { return area_start_; }
+
+ Address area_end() const { return area_end_; }
+ void set_area_end(Address area_end) { area_end_ = area_end; }
+
+ size_t area_size() const {
+ return static_cast<size_t>(area_end() - area_start());
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ void SetFlag(Flag flag) {
+ if (access_mode == AccessMode::NON_ATOMIC) {
+ flags_ |= flag;
+ } else {
+ base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
+ }
+ }
+
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ bool IsFlagSet(Flag flag) const {
+ return (GetFlags<access_mode>() & flag) != 0;
+ }
+
+ void ClearFlag(Flag flag) { flags_ &= ~flag; }
+
+ // Set or clear multiple flags at a time. The flags in the mask are set to
+ // the value in "flags", the rest retain the current value in |flags_|.
+ void SetFlags(uintptr_t flags, uintptr_t mask) {
+ flags_ = (flags_ & ~mask) | (flags & mask);
+ }
+
+ // Return all current flags.
+ template <AccessMode access_mode = AccessMode::NON_ATOMIC>
+ uintptr_t GetFlags() const {
+ if (access_mode == AccessMode::NON_ATOMIC) {
+ return flags_;
+ } else {
+ return base::AsAtomicWord::Relaxed_Load(&flags_);
+ }
+ }
+
+ bool InReadOnlySpace() const { return IsFlagSet(READ_ONLY_HEAP); }
+
+ // TODO(v8:7464): Add methods for down casting to MemoryChunk.
+
+ bool Contains(Address addr) const {
+ return addr >= area_start() && addr < area_end();
+ }
+
+ // Checks whether |addr| can be a limit of addresses in this page. It's a
+ // limit if it's in the page, or if it's just after the last byte of the page.
+ bool ContainsLimit(Address addr) const {
+ return addr >= area_start() && addr <= area_end();
+ }
+
+ V8_EXPORT_PRIVATE static bool HasHeaderSentinel(Address slot_addr);
+
+ void ReleaseMarkingBitmap();
+
+ static const intptr_t kSizeOffset = 0;
+ static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
+ static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
+ static const intptr_t kHeapOffset = kMarkBitmapOffset + kSystemPointerSize;
+ static const intptr_t kHeaderSentinelOffset =
+ kHeapOffset + kSystemPointerSize;
+
+ static const size_t kHeaderSize =
+ kSizeOffset + kSizetSize // size_t size
+ + kUIntptrSize // uintptr_t flags_
+ + kSystemPointerSize // Bitmap* marking_bitmap_
+ + kSystemPointerSize // Heap* heap_
+ + kSystemPointerSize // Address header_sentinel_
+ + kSystemPointerSize // Address area_start_
+ + kSystemPointerSize; // Address area_end_
+
+ protected:
+ // Overall size of the chunk, including the header and guards.
+ size_t size_;
+
+ uintptr_t flags_ = NO_FLAGS;
+
+ Bitmap* marking_bitmap_ = nullptr;
+
+ // TODO(v8:7464): Find a way to remove this.
+ // This goes against the spirit for the BasicMemoryChunk, but until C++14/17
+ // is the default it needs to live here because MemoryChunk is not standard
+ // layout under C++11.
+ Heap* heap_;
+
+ // This is used to distinguish the memory chunk header from the interior of a
+ // large page. The memory chunk header stores here an impossible tagged
+ // pointer: the tagger pointer of the page start. A field in a large object is
+ // guaranteed to not contain such a pointer.
+ Address header_sentinel_;
+
+ // Start and end of allocatable memory on this chunk.
+ Address area_start_;
+ Address area_end_;
+
+ friend class BasicMemoryChunkValidator;
+};
+
+STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
+
+class BasicMemoryChunkValidator {
+ // Computed offsets should match the compiler generated ones.
+ STATIC_ASSERT(BasicMemoryChunk::kSizeOffset ==
+ offsetof(BasicMemoryChunk, size_));
+ STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
+ offsetof(BasicMemoryChunk, flags_));
+ STATIC_ASSERT(BasicMemoryChunk::kMarkBitmapOffset ==
+ offsetof(BasicMemoryChunk, marking_bitmap_));
+ STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
+ offsetof(BasicMemoryChunk, heap_));
+ STATIC_ASSERT(BasicMemoryChunk::kHeaderSentinelOffset ==
+ offsetof(BasicMemoryChunk, header_sentinel_));
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_BASIC_MEMORY_CHUNK_H_
diff --git a/chromium/v8/src/heap/code-stats.cc b/chromium/v8/src/heap/code-stats.cc
index cb34d732a4f..c6c111bc0eb 100644
--- a/chromium/v8/src/heap/code-stats.cc
+++ b/chromium/v8/src/heap/code-stats.cc
@@ -6,7 +6,7 @@
#include "src/codegen/code-comments.h"
#include "src/codegen/reloc-info.h"
-#include "src/heap/spaces-inl.h" // For HeapObjectIterator.
+#include "src/heap/spaces-inl.h" // For PagedSpaceObjectIterator.
#include "src/objects/objects-inl.h"
namespace v8 {
@@ -61,7 +61,7 @@ void CodeStatistics::ResetCodeAndMetadataStatistics(Isolate* isolate) {
// - by code comment (only in debug mode)
void CodeStatistics::CollectCodeStatistics(PagedSpace* space,
Isolate* isolate) {
- HeapObjectIterator obj_it(space);
+ PagedSpaceObjectIterator obj_it(space);
for (HeapObject obj = obj_it.Next(); !obj.is_null(); obj = obj_it.Next()) {
RecordCodeAndMetadataStatistics(obj, isolate);
}
@@ -73,7 +73,7 @@ void CodeStatistics::CollectCodeStatistics(PagedSpace* space,
// - by code comment (only in debug mode)
void CodeStatistics::CollectCodeStatistics(LargeObjectSpace* space,
Isolate* isolate) {
- LargeObjectIterator obj_it(space);
+ LargeObjectSpaceObjectIterator obj_it(space);
for (HeapObject obj = obj_it.Next(); !obj.is_null(); obj = obj_it.Next()) {
RecordCodeAndMetadataStatistics(obj, isolate);
}
diff --git a/chromium/v8/src/heap/combined-heap.cc b/chromium/v8/src/heap/combined-heap.cc
index ed60b438cbf..0416bb62a42 100644
--- a/chromium/v8/src/heap/combined-heap.cc
+++ b/chromium/v8/src/heap/combined-heap.cc
@@ -3,16 +3,22 @@
// found in the LICENSE file.
#include "src/heap/combined-heap.h"
+#include "src/heap/heap-inl.h"
namespace v8 {
namespace internal {
-HeapObject CombinedHeapIterator::Next() {
+CombinedHeapObjectIterator::CombinedHeapObjectIterator(
+ Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
+ : heap_iterator_(heap, filtering),
+ ro_heap_iterator_(heap->isolate()->read_only_heap()) {}
+
+HeapObject CombinedHeapObjectIterator::Next() {
HeapObject object = ro_heap_iterator_.Next();
if (!object.is_null()) {
return object;
}
- return heap_iterator_.next();
+ return heap_iterator_.Next();
}
} // namespace internal
diff --git a/chromium/v8/src/heap/combined-heap.h b/chromium/v8/src/heap/combined-heap.h
index c331d95c3d4..eaa012ec180 100644
--- a/chromium/v8/src/heap/combined-heap.h
+++ b/chromium/v8/src/heap/combined-heap.h
@@ -13,21 +13,19 @@ namespace v8 {
namespace internal {
// This class allows iteration over the entire heap (Heap and ReadOnlyHeap). It
-// uses the HeapIterator to iterate over non-read-only objects and accepts the
-// same filtering option. (Interrupting iteration while filtering unreachable
-// objects is still forbidden)
-class V8_EXPORT_PRIVATE CombinedHeapIterator final {
+// uses the HeapObjectIterator to iterate over non-read-only objects and accepts
+// the same filtering option. (Interrupting iteration while filtering
+// unreachable objects is still forbidden)
+class V8_EXPORT_PRIVATE CombinedHeapObjectIterator final {
public:
- CombinedHeapIterator(Heap* heap,
- HeapIterator::HeapObjectsFiltering filtering =
- HeapIterator::HeapObjectsFiltering::kNoFiltering)
- : heap_iterator_(heap, filtering),
- ro_heap_iterator_(heap->read_only_heap()) {}
+ CombinedHeapObjectIterator(
+ Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering =
+ HeapObjectIterator::HeapObjectsFiltering::kNoFiltering);
HeapObject Next();
private:
- HeapIterator heap_iterator_;
- ReadOnlyHeapIterator ro_heap_iterator_;
+ HeapObjectIterator heap_iterator_;
+ ReadOnlyHeapObjectIterator ro_heap_iterator_;
};
V8_WARN_UNUSED_RESULT inline bool IsValidHeapObject(Heap* heap,
diff --git a/chromium/v8/src/heap/concurrent-marking.cc b/chromium/v8/src/heap/concurrent-marking.cc
index 8ce96428e19..12bb28f1c8e 100644
--- a/chromium/v8/src/heap/concurrent-marking.cc
+++ b/chromium/v8/src/heap/concurrent-marking.cc
@@ -121,11 +121,7 @@ class ConcurrentMarkingVisitor final
void ProcessWeakHeapObject(HeapObject host, THeapObjectSlot slot,
HeapObject heap_object) {
#ifdef THREAD_SANITIZER
- // Perform a dummy acquire load to tell TSAN that there is no data race
- // in mark-bit initialization. See MemoryChunk::Initialize for the
- // corresponding release store.
- MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object.address());
- CHECK_NOT_NULL(chunk->synchronized_heap());
+ MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
#endif
if (marking_state_.IsBlackOrGrey(heap_object)) {
// Weak references with live values are directly processed here to
@@ -247,7 +243,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(weak_cell)) return 0;
int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
- VisitMapPointer(weak_cell, weak_cell.map_slot());
+ VisitMapPointer(weak_cell);
WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
if (weak_cell.target().IsHeapObject()) {
HeapObject target = HeapObject::cast(weak_cell.target());
@@ -306,13 +302,13 @@ class ConcurrentMarkingVisitor final
int VisitSeqOneByteString(Map map, SeqOneByteString object) {
if (!ShouldVisit(object)) return 0;
- VisitMapPointer(object, object.map_slot());
+ VisitMapPointer(object);
return SeqOneByteString::SizeFor(object.synchronized_length());
}
int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
if (!ShouldVisit(object)) return 0;
- VisitMapPointer(object, object.map_slot());
+ VisitMapPointer(object);
return SeqTwoByteString::SizeFor(object.synchronized_length());
}
@@ -367,7 +363,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(shared_info)) return 0;
int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
- VisitMapPointer(shared_info, shared_info.map_slot());
+ VisitMapPointer(shared_info);
SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
this);
@@ -385,7 +381,7 @@ class ConcurrentMarkingVisitor final
int VisitBytecodeArray(Map map, BytecodeArray object) {
if (!ShouldVisit(object)) return 0;
int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
- VisitMapPointer(object, object.map_slot());
+ VisitMapPointer(object);
BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
if (!is_forced_gc_) {
object.MakeOlder();
@@ -453,7 +449,7 @@ class ConcurrentMarkingVisitor final
int VisitDescriptorArray(Map map, DescriptorArray array) {
if (!ShouldVisit(array)) return 0;
- VisitMapPointer(array, array.map_slot());
+ VisitMapPointer(array);
int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
VisitPointers(array, array.GetFirstPointerSlot(),
array.GetDescriptorSlot(0));
@@ -463,7 +459,7 @@ class ConcurrentMarkingVisitor final
int VisitTransitionArray(Map map, TransitionArray array) {
if (!ShouldVisit(array)) return 0;
- VisitMapPointer(array, array.map_slot());
+ VisitMapPointer(array);
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
weak_objects_->transition_arrays.Push(task_id_, array);
@@ -528,11 +524,7 @@ class ConcurrentMarkingVisitor final
void MarkObject(HeapObject object) {
#ifdef THREAD_SANITIZER
- // Perform a dummy acquire load to tell TSAN that there is no data race
- // in mark-bit initialization. See MemoryChunk::Initialize for the
- // corresponding release store.
- MemoryChunk* chunk = MemoryChunk::FromAddress(object.address());
- CHECK_NOT_NULL(chunk->synchronized_heap());
+ MemoryChunk::FromHeapObject(object)->SynchronizedHeapLoad();
#endif
if (marking_state_.WhiteToGrey(object)) {
shared_.Push(object);
@@ -631,7 +623,7 @@ class ConcurrentMarkingVisitor final
// Left trimming marks the array black before over-writing the length.
DCHECK(length.IsSmi());
int size = T::SizeFor(Smi::ToInt(length));
- VisitMapPointer(object, object.map_slot());
+ VisitMapPointer(object);
T::BodyDescriptor::IterateBody(map, object, size, this);
return size;
}
@@ -656,7 +648,7 @@ class ConcurrentMarkingVisitor final
template <typename T, typename TBodyDescriptor>
const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
SlotSnapshottingVisitor visitor(&slot_snapshot_);
- visitor.VisitPointer(object, ObjectSlot(object.map_slot().address()));
+ visitor.VisitPointer(object, object.map_slot());
TBodyDescriptor::IterateBody(map, object, size, &visitor);
return slot_snapshot_;
}
diff --git a/chromium/v8/src/heap/embedder-tracing.cc b/chromium/v8/src/heap/embedder-tracing.cc
index c032f384b3b..ab91367bc60 100644
--- a/chromium/v8/src/heap/embedder-tracing.cc
+++ b/chromium/v8/src/heap/embedder-tracing.cc
@@ -34,7 +34,7 @@ void LocalEmbedderHeapTracer::TraceEpilogue() {
EmbedderHeapTracer::TraceSummary summary;
remote_tracer_->TraceEpilogue(&summary);
- remote_stats_.allocated_size = summary.allocated_size;
+ remote_stats_.used_size = summary.allocated_size;
// Force a check next time increased memory is reported. This allows for
// setting limits close to actual heap sizes.
remote_stats_.allocated_size_limit_for_check = 0;
@@ -118,6 +118,10 @@ void LocalEmbedderHeapTracer::StartIncrementalMarkingIfNeeded() {
heap->StartIncrementalMarkingIfAllocationLimitIsReached(
heap->GCFlagsForIncrementalMarking(),
kGCCallbackScheduleIdleGarbageCollection);
+ if (heap->AllocationLimitOvershotByLargeMargin()) {
+ heap->FinalizeIncrementalMarkingAtomically(
+ i::GarbageCollectionReason::kExternalFinalize);
+ }
}
} // namespace internal
diff --git a/chromium/v8/src/heap/embedder-tracing.h b/chromium/v8/src/heap/embedder-tracing.h
index 4309fb722ae..eae29cbf5ce 100644
--- a/chromium/v8/src/heap/embedder-tracing.h
+++ b/chromium/v8/src/heap/embedder-tracing.h
@@ -77,8 +77,8 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
}
void IncreaseAllocatedSize(size_t bytes) {
+ remote_stats_.used_size += bytes;
remote_stats_.allocated_size += bytes;
- remote_stats_.accumulated_allocated_size += bytes;
if (remote_stats_.allocated_size >
remote_stats_.allocated_size_limit_for_check) {
StartIncrementalMarkingIfNeeded();
@@ -87,12 +87,15 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
}
}
+ void DecreaseAllocatedSize(size_t bytes) {
+ DCHECK_GE(remote_stats_.used_size, bytes);
+ remote_stats_.used_size -= bytes;
+ }
+
void StartIncrementalMarkingIfNeeded();
+ size_t used_size() const { return remote_stats_.used_size; }
size_t allocated_size() const { return remote_stats_.allocated_size; }
- size_t accumulated_allocated_size() const {
- return remote_stats_.accumulated_allocated_size;
- }
private:
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
@@ -109,16 +112,16 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
bool embedder_worklist_empty_ = false;
struct RemoteStatistics {
- // Allocated size of objects in bytes reported by the embedder. Updated via
+ // Used size of objects in bytes reported by the embedder. Updated via
// TraceSummary at the end of tracing and incrementally when the GC is not
// in progress.
+ size_t used_size = 0;
+ // Totally bytes allocated by the embedder. Monotonically
+ // increasing value. Used to approximate allocation rate.
size_t allocated_size = 0;
- // Limit for |allocated_size_| in bytes to avoid checking for starting a GC
+ // Limit for |allocated_size| in bytes to avoid checking for starting a GC
// on each increment.
size_t allocated_size_limit_for_check = 0;
- // Totally accumulated bytes allocated by the embedder. Monotonically
- // increasing value. Used to approximate allocation rate.
- size_t accumulated_allocated_size = 0;
} remote_stats_;
friend class EmbedderStackStateScope;
diff --git a/chromium/v8/src/heap/factory-inl.h b/chromium/v8/src/heap/factory-inl.h
index 32237da8771..9aa705047c8 100644
--- a/chromium/v8/src/heap/factory-inl.h
+++ b/chromium/v8/src/heap/factory-inl.h
@@ -104,6 +104,15 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
allocation);
}
+Handle<JSObject> Factory::NewFastOrSlowJSObjectFromMap(
+ Handle<Map> map, int number_of_slow_properties, AllocationType allocation,
+ Handle<AllocationSite> allocation_site) {
+ return map->is_dictionary_map()
+ ? NewSlowJSObjectFromMap(map, number_of_slow_properties,
+ allocation, allocation_site)
+ : NewJSObjectFromMap(map, allocation, allocation_site);
+}
+
Handle<Object> Factory::NewURIError() {
return NewError(isolate()->uri_error_function(),
MessageTemplate::kURIMalformed);
diff --git a/chromium/v8/src/heap/factory.cc b/chromium/v8/src/heap/factory.cc
index 03896f78278..19c36656225 100644
--- a/chromium/v8/src/heap/factory.cc
+++ b/chromium/v8/src/heap/factory.cc
@@ -580,7 +580,7 @@ Handle<ObjectBoilerplateDescription> Factory::NewObjectBoilerplateDescription(
if (has_different_size_backing_store) {
DCHECK_IMPLIES((boilerplate == (all_properties - index_keys)),
has_seen_proto);
- description->set_backing_store_size(isolate(), backing_store_size);
+ description->set_backing_store_size(backing_store_size);
}
description->set_flags(0);
@@ -1232,8 +1232,8 @@ Handle<String> Factory::NewConsString(Handle<String> left, Handle<String> right,
result->set_hash_field(String::kEmptyHashField);
result->set_length(length);
- result->set_first(isolate(), *left, mode);
- result->set_second(isolate(), *right, mode);
+ result->set_first(*left, mode);
+ result->set_second(*right, mode);
return result;
}
@@ -1314,7 +1314,7 @@ Handle<String> Factory::NewProperSubString(Handle<String> str, int begin,
slice->set_hash_field(String::kEmptyHashField);
slice->set_length(length);
- slice->set_parent(isolate(), *str);
+ slice->set_parent(*str);
slice->set_offset(offset);
return slice;
}
@@ -1483,7 +1483,7 @@ Handle<ScriptContextTable> Factory::NewScriptContextTable() {
return context_table;
}
-Handle<Context> Factory::NewModuleContext(Handle<Module> module,
+Handle<Context> Factory::NewModuleContext(Handle<SourceTextModule> module,
Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
@@ -1611,17 +1611,7 @@ Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
Handle<Struct> Factory::NewStruct(InstanceType type,
AllocationType allocation) {
- Map map;
- switch (type) {
-#define MAKE_CASE(TYPE, Name, name) \
- case TYPE: \
- map = *name##_map(); \
- break;
- STRUCT_LIST(MAKE_CASE)
-#undef MAKE_CASE
- default:
- UNREACHABLE();
- }
+ Map map = Map::GetStructMap(isolate(), type);
int size = map.instance_size();
HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
Handle<Struct> str(Struct::cast(result), isolate());
@@ -1640,10 +1630,17 @@ Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
Handle<AccessorInfo> Factory::NewAccessorInfo() {
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(
NewStruct(ACCESSOR_INFO_TYPE, AllocationType::kOld));
+ DisallowHeapAllocation no_gc;
info->set_name(*empty_string());
info->set_flags(0); // Must clear the flags, it was initialized as undefined.
info->set_is_sloppy(true);
info->set_initial_property_attributes(NONE);
+
+ // Clear some other fields that should not be undefined.
+ info->set_getter(Smi::kZero);
+ info->set_setter(Smi::kZero);
+ info->set_js_getter(Smi::kZero);
+
return info;
}
@@ -1970,15 +1967,15 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
// |layout_descriptor| are set.
map.set_visitor_id(Map::GetVisitorId(map));
map.set_bit_field(0);
- map.set_bit_field2(Map::IsExtensibleBit::kMask);
+ map.set_bit_field2(Map::NewTargetIsBaseBit::encode(true));
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptorsBit::encode(true) |
- Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
+ Map::ConstructionCounterBits::encode(Map::kNoSlackTracking) |
+ Map::IsExtensibleBit::encode(true);
map.set_bit_field3(bit_field3);
DCHECK(!map.is_in_retained_map_list());
map.clear_padding();
map.set_elements_kind(elements_kind);
- map.set_new_target_is_base(true);
isolate()->counters()->maps_created()->Increment();
if (FLAG_trace_maps) LOG(isolate(), MapCreate(map));
return map;
@@ -2293,9 +2290,9 @@ Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
// as the result.
Handle<Object> no_caller;
- MaybeHandle<Object> maybe_error =
- ErrorUtils::Construct(isolate(), constructor, constructor, message,
- SKIP_NONE, no_caller, false);
+ MaybeHandle<Object> maybe_error = ErrorUtils::Construct(
+ isolate(), constructor, constructor, message, SKIP_NONE, no_caller,
+ ErrorUtils::StackTraceCollection::kDetailed);
if (maybe_error.is_null()) {
DCHECK(isolate()->has_pending_exception());
maybe_error = handle(isolate()->pending_exception(), isolate());
@@ -2341,7 +2338,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
Handle<JSFunction> function(JSFunction::cast(New(map, allocation)),
isolate());
- function->initialize_properties();
+ function->initialize_properties(isolate());
function->initialize_elements();
function->set_shared(*info);
function->set_code(info->GetCode());
@@ -2563,9 +2560,10 @@ Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
AllocationType::kOld);
}
-Handle<ModuleInfo> Factory::NewModuleInfo() {
- return NewFixedArrayWithMap<ModuleInfo>(
- RootIndex::kModuleInfoMap, ModuleInfo::kLength, AllocationType::kOld);
+Handle<SourceTextModuleInfo> Factory::NewSourceTextModuleInfo() {
+ return NewFixedArrayWithMap<SourceTextModuleInfo>(
+ RootIndex::kModuleInfoMap, SourceTextModuleInfo::kLength,
+ AllocationType::kOld);
}
Handle<PreparseData> Factory::NewPreparseData(int data_length,
@@ -2585,15 +2583,14 @@ Handle<PreparseData> Factory::NewPreparseData(int data_length,
Handle<UncompiledDataWithoutPreparseData>
Factory::NewUncompiledDataWithoutPreparseData(Handle<String> inferred_name,
int32_t start_position,
- int32_t end_position,
- int32_t function_literal_id) {
+ int32_t end_position) {
Handle<UncompiledDataWithoutPreparseData> result(
UncompiledDataWithoutPreparseData::cast(New(
uncompiled_data_without_preparse_data_map(), AllocationType::kOld)),
isolate());
UncompiledData::Initialize(*result, *inferred_name, start_position,
- end_position, function_literal_id);
+ end_position);
return result;
}
@@ -2601,7 +2598,6 @@ Handle<UncompiledDataWithPreparseData>
Factory::NewUncompiledDataWithPreparseData(Handle<String> inferred_name,
int32_t start_position,
int32_t end_position,
- int32_t function_literal_id,
Handle<PreparseData> preparse_data) {
Handle<UncompiledDataWithPreparseData> result(
UncompiledDataWithPreparseData::cast(
@@ -2609,8 +2605,7 @@ Factory::NewUncompiledDataWithPreparseData(Handle<String> inferred_name,
isolate());
UncompiledDataWithPreparseData::Initialize(
- *result, *inferred_name, start_position, end_position,
- function_literal_id, *preparse_data);
+ *result, *inferred_name, start_position, end_position, *preparse_data);
return result;
}
@@ -2755,7 +2750,7 @@ Handle<JSObject> Factory::NewJSObjectWithNullProto(AllocationType allocation) {
Handle<Map> new_map = Map::Copy(
isolate(), Handle<Map>(result->map(), isolate()), "ObjectWithNullProto");
Map::SetPrototype(isolate(), new_map, null_value());
- JSObject::MigrateToMap(result, new_map);
+ JSObject::MigrateToMap(isolate(), result, new_map);
return result;
}
@@ -2886,12 +2881,14 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
return js_obj;
}
-Handle<JSObject> Factory::NewSlowJSObjectFromMap(Handle<Map> map, int capacity,
- AllocationType allocation) {
+Handle<JSObject> Factory::NewSlowJSObjectFromMap(
+ Handle<Map> map, int capacity, AllocationType allocation,
+ Handle<AllocationSite> allocation_site) {
DCHECK(map->is_dictionary_map());
Handle<NameDictionary> object_properties =
NameDictionary::New(isolate(), capacity);
- Handle<JSObject> js_object = NewJSObjectFromMap(map, allocation);
+ Handle<JSObject> js_object =
+ NewJSObjectFromMap(map, allocation, allocation_site);
js_object->set_raw_properties_or_hash(*object_properties);
return js_object;
}
@@ -2910,43 +2907,54 @@ Handle<JSObject> Factory::NewSlowJSObjectWithPropertiesAndElements(
DCHECK(elements->IsNumberDictionary());
object_map =
JSObject::GetElementsTransitionMap(object, DICTIONARY_ELEMENTS);
- JSObject::MigrateToMap(object, object_map);
+ JSObject::MigrateToMap(isolate(), object, object_map);
object->set_elements(*elements);
}
return object;
}
-Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
- AllocationType allocation) {
- NativeContext native_context = isolate()->raw_native_context();
- Map map = native_context.GetInitialJSArrayMap(elements_kind);
- if (map.is_null()) {
- JSFunction array_function = native_context.array_function();
- map = array_function.initial_map();
- }
- return Handle<JSArray>::cast(
- NewJSObjectFromMap(handle(map, isolate()), allocation));
-}
-
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind, int length,
int capacity,
ArrayStorageAllocationMode mode,
AllocationType allocation) {
- Handle<JSArray> array = NewJSArray(elements_kind, allocation);
- NewJSArrayStorage(array, length, capacity, mode);
- return array;
+ DCHECK(capacity >= length);
+ if (capacity == 0) {
+ return NewJSArrayWithElements(empty_fixed_array(), elements_kind, length,
+ allocation);
+ }
+
+ HandleScope inner_scope(isolate());
+ Handle<FixedArrayBase> elms =
+ NewJSArrayStorage(elements_kind, capacity, mode);
+ return inner_scope.CloseAndEscape(NewJSArrayWithUnverifiedElements(
+ elms, elements_kind, length, allocation));
}
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
ElementsKind elements_kind,
int length,
AllocationType allocation) {
- DCHECK(length <= elements->length());
- Handle<JSArray> array = NewJSArray(elements_kind, allocation);
+ Handle<JSArray> array = NewJSArrayWithUnverifiedElements(
+ elements, elements_kind, length, allocation);
+ JSObject::ValidateElements(*array);
+ return array;
+}
+Handle<JSArray> Factory::NewJSArrayWithUnverifiedElements(
+ Handle<FixedArrayBase> elements, ElementsKind elements_kind, int length,
+ AllocationType allocation) {
+ DCHECK(length <= elements->length());
+ NativeContext native_context = isolate()->raw_native_context();
+ Map map = native_context.GetInitialJSArrayMap(elements_kind);
+ if (map.is_null()) {
+ JSFunction array_function = native_context.array_function();
+ map = array_function.initial_map();
+ }
+ Handle<JSArray> array = Handle<JSArray>::cast(
+ NewJSObjectFromMap(handle(map, isolate()), allocation));
+ DisallowHeapAllocation no_gc;
array->set_elements(*elements);
array->set_length(Smi::FromInt(length));
- JSObject::ValidateElements(*array);
return array;
}
@@ -2961,8 +2969,17 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
}
HandleScope inner_scope(isolate());
+ Handle<FixedArrayBase> elms =
+ NewJSArrayStorage(array->GetElementsKind(), capacity, mode);
+
+ array->set_elements(*elms);
+ array->set_length(Smi::FromInt(length));
+}
+
+Handle<FixedArrayBase> Factory::NewJSArrayStorage(
+ ElementsKind elements_kind, int capacity, ArrayStorageAllocationMode mode) {
+ DCHECK_GT(capacity, 0);
Handle<FixedArrayBase> elms;
- ElementsKind elements_kind = array->GetElementsKind();
if (IsDoubleElementsKind(elements_kind)) {
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
elms = NewFixedDoubleArray(capacity);
@@ -2979,9 +2996,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
elms = NewFixedArrayWithHoles(capacity);
}
}
-
- array->set_elements(*elms);
- array->set_length(Smi::FromInt(length));
+ return elms;
}
Handle<JSWeakMap> Factory::NewJSWeakMap() {
@@ -3020,9 +3035,10 @@ Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
return Handle<JSGeneratorObject>::cast(NewJSObjectFromMap(map));
}
-Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
- Handle<ModuleInfo> module_info(code->scope_info().ModuleDescriptorInfo(),
- isolate());
+Handle<SourceTextModule> Factory::NewSourceTextModule(
+ Handle<SharedFunctionInfo> code) {
+ Handle<SourceTextModuleInfo> module_info(
+ code->scope_info().ModuleDescriptorInfo(), isolate());
Handle<ObjectHashTable> exports =
ObjectHashTable::New(isolate(), module_info->RegularExportCount());
Handle<FixedArray> regular_exports =
@@ -3035,8 +3051,10 @@ Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
: empty_fixed_array();
ReadOnlyRoots roots(isolate());
- Handle<Module> module =
- Handle<Module>::cast(NewStruct(MODULE_TYPE, AllocationType::kOld));
+ Handle<SourceTextModule> module(
+ SourceTextModule::cast(
+ New(source_text_module_map(), AllocationType::kOld)),
+ isolate());
module->set_code(*code);
module->set_exports(*exports);
module->set_regular_exports(*regular_exports);
@@ -3053,6 +3071,28 @@ Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
return module;
}
+Handle<SyntheticModule> Factory::NewSyntheticModule(
+ Handle<String> module_name, Handle<FixedArray> export_names,
+ v8::Module::SyntheticModuleEvaluationSteps evaluation_steps) {
+ ReadOnlyRoots roots(isolate());
+ Handle<SyntheticModule> module(
+ SyntheticModule::cast(New(synthetic_module_map(), AllocationType::kOld)),
+ isolate());
+ Handle<ObjectHashTable> exports =
+ ObjectHashTable::New(isolate(), static_cast<int>(export_names->length()));
+ Handle<Foreign> evaluation_steps_foreign =
+ NewForeign(reinterpret_cast<i::Address>(evaluation_steps));
+ module->set_exports(*exports);
+ module->set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
+ module->set_module_namespace(roots.undefined_value());
+ module->set_status(Module::kUninstantiated);
+ module->set_exception(roots.the_hole_value());
+ module->set_name(*module_name);
+ module->set_export_names(*export_names);
+ module->set_evaluation_steps(*evaluation_steps_foreign);
+ return module;
+}
+
Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared,
AllocationType allocation) {
Handle<JSFunction> array_buffer_fun(
@@ -3274,7 +3314,7 @@ Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
DCHECK(map->prototype().IsNull(isolate()));
Handle<JSProxy> result(JSProxy::cast(New(map, AllocationType::kYoung)),
isolate());
- result->initialize_properties();
+ result->initialize_properties(isolate());
result->set_target(*target);
result->set_handler(*handler);
return result;
@@ -3335,10 +3375,12 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
false);
TRACE_EVENT_OBJECT_CREATED_WITH_ID(
TRACE_DISABLED_BY_DEFAULT("v8.compile"), "SharedFunctionInfo",
- TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope, shared->TraceID()));
+ TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope,
+ shared->TraceID(literal)));
TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
TRACE_DISABLED_BY_DEFAULT("v8.compile"), "SharedFunctionInfo",
- TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope, shared->TraceID()),
+ TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope,
+ shared->TraceID(literal)),
shared->ToTracedValue(literal));
return shared;
}
@@ -3447,6 +3489,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
*empty_feedback_metadata(), SKIP_WRITE_BARRIER);
}
share->set_script_or_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_function_literal_id(kFunctionLiteralIdInvalid);
#if V8_SFI_HAS_UNIQUE_ID
share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
#endif
@@ -3639,68 +3682,82 @@ Handle<StackTraceFrame> Factory::NewStackTraceFrame(
return frame;
}
-Handle<StackFrameInfo> Factory::NewStackFrameInfo() {
- Handle<StackFrameInfo> stack_frame_info = Handle<StackFrameInfo>::cast(
- NewStruct(STACK_FRAME_INFO_TYPE, AllocationType::kYoung));
- stack_frame_info->set_line_number(0);
- stack_frame_info->set_column_number(0);
- stack_frame_info->set_script_id(0);
- stack_frame_info->set_promise_all_index(-1);
- stack_frame_info->set_script_name(*null_value());
- stack_frame_info->set_script_name_or_source_url(*null_value());
- stack_frame_info->set_function_name(*null_value());
- stack_frame_info->set_flag(0);
- return stack_frame_info;
-}
-
Handle<StackFrameInfo> Factory::NewStackFrameInfo(
Handle<FrameArray> frame_array, int index) {
FrameArrayIterator it(isolate(), frame_array, index);
DCHECK(it.HasFrame());
- Handle<StackFrameInfo> info = NewStackFrameInfo();
- info->set_flag(0);
-
const bool is_wasm = frame_array->IsAnyWasmFrame(index);
- info->set_is_wasm(is_wasm);
+ StackFrameBase* frame = it.Frame();
- // Line numbers are 1-based, for Wasm we need to adjust.
- int line = it.Frame()->GetLineNumber();
- if (is_wasm && line >= 0) line++;
- info->set_line_number(line);
+ int line = frame->GetLineNumber();
+ int column = frame->GetColumnNumber();
- // Column numbers are 1-based. For Wasm we use the position
- // as the iterator does not currently provide a column number.
- const int column =
- is_wasm ? it.Frame()->GetPosition() + 1 : it.Frame()->GetColumnNumber();
- info->set_column_number(column);
+ const int script_id = frame->GetScriptId();
- info->set_script_id(it.Frame()->GetScriptId());
- info->set_script_name(*it.Frame()->GetFileName());
- info->set_script_name_or_source_url(*it.Frame()->GetScriptNameOrSourceUrl());
+ Handle<Object> script_name = frame->GetFileName();
+ Handle<Object> script_or_url = frame->GetScriptNameOrSourceUrl();
// TODO(szuend): Adjust this, once it is decided what name to use in both
// "simple" and "detailed" stack traces. This code is for
// backwards compatibility to fullfill test expectations.
- auto function_name = it.Frame()->GetFunctionName();
+ auto function_name = frame->GetFunctionName();
+ bool is_user_java_script = false;
if (!is_wasm) {
- Handle<Object> function = it.Frame()->GetFunction();
+ Handle<Object> function = frame->GetFunction();
if (function->IsJSFunction()) {
Handle<JSFunction> fun = Handle<JSFunction>::cast(function);
- function_name = JSFunction::GetDebugName(fun);
- const bool is_user_java_script = fun->shared().IsUserJavaScript();
- info->set_is_user_java_script(is_user_java_script);
+ is_user_java_script = fun->shared().IsUserJavaScript();
}
}
+
+ Handle<Object> method_name = undefined_value();
+ Handle<Object> type_name = undefined_value();
+ Handle<Object> eval_origin = frame->GetEvalOrigin();
+ Handle<Object> wasm_module_name = frame->GetWasmModuleName();
+
+ // MethodName and TypeName are expensive to look up, so they are only
+ // included when they are strictly needed by the stack trace
+ // serialization code.
+ // Note: The {is_method_call} predicate needs to be kept in sync with
+ // the corresponding predicate in the stack trace serialization code
+ // in stack-frame-info.cc.
+ const bool is_toplevel = frame->IsToplevel();
+ const bool is_constructor = frame->IsConstructor();
+ const bool is_method_call = !(is_toplevel || is_constructor);
+ if (is_method_call) {
+ method_name = frame->GetMethodName();
+ type_name = frame->GetTypeName();
+ }
+
+ Handle<StackFrameInfo> info = Handle<StackFrameInfo>::cast(
+ NewStruct(STACK_FRAME_INFO_TYPE, AllocationType::kYoung));
+
+ DisallowHeapAllocation no_gc;
+
+ info->set_flag(0);
+ info->set_is_wasm(is_wasm);
+ info->set_is_asmjs_wasm(frame_array->IsAsmJsWasmFrame(index));
+ info->set_is_user_java_script(is_user_java_script);
+ info->set_line_number(line);
+ info->set_column_number(column);
+ info->set_script_id(script_id);
+
+ info->set_script_name(*script_name);
+ info->set_script_name_or_source_url(*script_or_url);
info->set_function_name(*function_name);
- info->set_wasm_module_name(*it.Frame()->GetWasmModuleName());
- info->set_is_eval(it.Frame()->IsEval());
- info->set_is_constructor(it.Frame()->IsConstructor());
- info->set_is_toplevel(it.Frame()->IsToplevel());
- info->set_is_async(it.Frame()->IsAsync());
- info->set_is_promise_all(it.Frame()->IsPromiseAll());
- info->set_promise_all_index(it.Frame()->GetPromiseIndex());
+ info->set_method_name(*method_name);
+ info->set_type_name(*type_name);
+ info->set_eval_origin(*eval_origin);
+ info->set_wasm_module_name(*wasm_module_name);
+
+ info->set_is_eval(frame->IsEval());
+ info->set_is_constructor(is_constructor);
+ info->set_is_toplevel(is_toplevel);
+ info->set_is_async(frame->IsAsync());
+ info->set_is_promise_all(frame->IsPromiseAll());
+ info->set_promise_all_index(frame->GetPromiseIndex());
return info;
}
@@ -3785,7 +3842,8 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
return map;
}
-Handle<LoadHandler> Factory::NewLoadHandler(int data_count) {
+Handle<LoadHandler> Factory::NewLoadHandler(int data_count,
+ AllocationType allocation) {
Handle<Map> map;
switch (data_count) {
case 1:
@@ -3800,7 +3858,7 @@ Handle<LoadHandler> Factory::NewLoadHandler(int data_count) {
default:
UNREACHABLE();
}
- return handle(LoadHandler::cast(New(map, AllocationType::kOld)), isolate());
+ return handle(LoadHandler::cast(New(map, allocation)), isolate());
}
Handle<StoreHandler> Factory::NewStoreHandler(int data_count) {
diff --git a/chromium/v8/src/heap/factory.h b/chromium/v8/src/heap/factory.h
index 5af2529021a..3ccbe6856f8 100644
--- a/chromium/v8/src/heap/factory.h
+++ b/chromium/v8/src/heap/factory.h
@@ -53,16 +53,18 @@ class JSSetIterator;
class JSTypedArray;
class JSWeakMap;
class LoadHandler;
-class ModuleInfo;
class NativeContext;
class NewFunctionArgs;
class PreparseData;
class PromiseResolveThenableJobTask;
class RegExpMatchInfo;
class ScriptContextTable;
+class SourceTextModule;
+class SourceTextModuleInfo;
class StackFrameInfo;
class StackTraceFrame;
class StoreHandler;
+class SyntheticModule;
class TemplateObjectDescription;
class UncompiledDataWithoutPreparseData;
class UncompiledDataWithPreparseData;
@@ -406,7 +408,7 @@ class V8_EXPORT_PRIVATE Factory {
Handle<ScriptContextTable> NewScriptContextTable();
// Create a module context.
- Handle<Context> NewModuleContext(Handle<Module> module,
+ Handle<Context> NewModuleContext(Handle<SourceTextModule> module,
Handle<NativeContext> outer,
Handle<ScopeInfo> scope_info);
@@ -461,7 +463,6 @@ class V8_EXPORT_PRIVATE Factory {
Handle<BreakPoint> NewBreakPoint(int id, Handle<String> condition);
Handle<StackTraceFrame> NewStackTraceFrame(Handle<FrameArray> frame_array,
int index);
- Handle<StackFrameInfo> NewStackFrameInfo();
Handle<StackFrameInfo> NewStackFrameInfo(Handle<FrameArray> frame_array,
int index);
Handle<SourcePositionTableWithFrameCache>
@@ -626,10 +627,19 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSObject> NewJSObjectFromMap(
Handle<Map> map, AllocationType allocation = AllocationType::kYoung,
Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
+ // Like NewJSObjectFromMap, but includes allocating a properties dictionary.
Handle<JSObject> NewSlowJSObjectFromMap(
Handle<Map> map,
int number_of_slow_properties = NameDictionary::kInitialCapacity,
- AllocationType allocation = AllocationType::kYoung);
+ AllocationType allocation = AllocationType::kYoung,
+ Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
+ // Calls NewJSObjectFromMap or NewSlowJSObjectFromMap depending on whether the
+ // map is a dictionary map.
+ inline Handle<JSObject> NewFastOrSlowJSObjectFromMap(
+ Handle<Map> map,
+ int number_of_slow_properties = NameDictionary::kInitialCapacity,
+ AllocationType allocation = AllocationType::kYoung,
+ Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
// Allocates and initializes a new JavaScript object with the given
// {prototype} and {properties}. The newly created object will be
// in dictionary properties mode. The {elements} can either be the
@@ -680,7 +690,10 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSModuleNamespace> NewJSModuleNamespace();
- Handle<Module> NewModule(Handle<SharedFunctionInfo> code);
+ Handle<SourceTextModule> NewSourceTextModule(Handle<SharedFunctionInfo> code);
+ Handle<SyntheticModule> NewSyntheticModule(
+ Handle<String> module_name, Handle<FixedArray> export_names,
+ v8::Module::SyntheticModuleEvaluationSteps evaluation_steps);
Handle<JSArrayBuffer> NewJSArrayBuffer(
SharedFlag shared, AllocationType allocation = AllocationType::kYoung);
@@ -760,19 +773,18 @@ class V8_EXPORT_PRIVATE Factory {
// Create a serialized scope info.
Handle<ScopeInfo> NewScopeInfo(int length);
- Handle<ModuleInfo> NewModuleInfo();
+ Handle<SourceTextModuleInfo> NewSourceTextModuleInfo();
Handle<PreparseData> NewPreparseData(int data_length, int children_length);
Handle<UncompiledDataWithoutPreparseData>
NewUncompiledDataWithoutPreparseData(Handle<String> inferred_name,
int32_t start_position,
- int32_t end_position,
- int32_t function_literal_id);
+ int32_t end_position);
Handle<UncompiledDataWithPreparseData> NewUncompiledDataWithPreparseData(
Handle<String> inferred_name, int32_t start_position,
- int32_t end_position, int32_t function_literal_id, Handle<PreparseData>);
+ int32_t end_position, Handle<PreparseData>);
// Create an External object for V8's external API.
Handle<JSObject> NewExternal(void* value);
@@ -884,7 +896,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Map> ObjectLiteralMapFromCache(Handle<NativeContext> native_context,
int number_of_properties);
- Handle<LoadHandler> NewLoadHandler(int data_count);
+ Handle<LoadHandler> NewLoadHandler(
+ int data_count, AllocationType allocation = AllocationType::kOld);
Handle<StoreHandler> NewStoreHandler(int data_count);
Handle<RegExpMatchInfo> NewRegExpMatchInfo();
@@ -1074,11 +1087,20 @@ class V8_EXPORT_PRIVATE Factory {
Handle<String> NumberToStringCacheSet(Handle<Object> number, int hash,
const char* string, bool check_cache);
- // Create a JSArray with no elements and no length.
- Handle<JSArray> NewJSArray(
- ElementsKind elements_kind,
+ // Creates a new JSArray with the given backing storage. Performs no
+ // verification of the backing storage because it may not yet be filled.
+ Handle<JSArray> NewJSArrayWithUnverifiedElements(
+ Handle<FixedArrayBase> elements, ElementsKind elements_kind, int length,
AllocationType allocation = AllocationType::kYoung);
+ // Creates the backing storage for a JSArray. This handle must be discarded
+ // before returning the JSArray reference to code outside Factory, which might
+ // decide to left-trim the backing store. To avoid unnecessary HandleScopes,
+ // this method requires capacity greater than zero.
+ Handle<FixedArrayBase> NewJSArrayStorage(
+ ElementsKind elements_kind, int capacity,
+ ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
+
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
MaybeHandle<String> name, MaybeHandle<HeapObject> maybe_function_data,
int maybe_builtin_index, FunctionKind kind = kNormalFunction);
diff --git a/chromium/v8/src/heap/gc-tracer.cc b/chromium/v8/src/heap/gc-tracer.cc
index fab663d7677..77e6b999970 100644
--- a/chromium/v8/src/heap/gc-tracer.cc
+++ b/chromium/v8/src/heap/gc-tracer.cc
@@ -18,9 +18,9 @@ namespace internal {
static size_t CountTotalHolesSize(Heap* heap) {
size_t holes_size = 0;
- PagedSpaces spaces(heap);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ PagedSpaceIterator spaces(heap);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
DCHECK_GE(holes_size + space->Waste() + space->Available(), holes_size);
holes_size += space->Waste() + space->Available();
}
@@ -150,9 +150,11 @@ GCTracer::GCTracer(Heap* heap)
allocation_time_ms_(0.0),
new_space_allocation_counter_bytes_(0),
old_generation_allocation_counter_bytes_(0),
+ embedder_allocation_counter_bytes_(0),
allocation_duration_since_gc_(0.0),
new_space_allocation_in_bytes_since_gc_(0),
old_generation_allocation_in_bytes_since_gc_(0),
+ embedder_allocation_in_bytes_since_gc_(0),
combined_mark_compact_speed_cache_(0.0),
start_counter_(0),
average_mutator_duration_(0),
@@ -264,6 +266,12 @@ void GCTracer::Start(GarbageCollector collector,
counters->scavenge_reason()->AddSample(static_cast<int>(gc_reason));
} else {
counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
+
+ if (FLAG_trace_gc_freelists) {
+ PrintIsolate(heap_->isolate(),
+ "FreeLists statistics before collection:\n");
+ heap_->PrintFreeListsStats();
+ }
}
}
@@ -377,6 +385,14 @@ void GCTracer::Stop(GarbageCollector collector) {
}
}
+void GCTracer::NotifySweepingCompleted() {
+ if (FLAG_trace_gc_freelists) {
+ PrintIsolate(heap_->isolate(),
+ "FreeLists statistics after sweeping completed:\n");
+ heap_->PrintFreeListsStats();
+ }
+}
+
void GCTracer::SampleAllocation(double current_ms,
size_t new_space_counter_bytes,
size_t old_generation_counter_bytes,
@@ -948,10 +964,9 @@ double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
}
double GCTracer::EmbedderSpeedInBytesPerMillisecond() const {
- if (recorded_embedder_speed_ != 0.0) {
- return recorded_embedder_speed_;
- }
- return kConservativeSpeedInBytesPerMillisecond;
+ // Note: Returning 0 is ok here as callers check for whether embedder speeds
+ // have been recorded at all.
+ return recorded_embedder_speed_;
}
double GCTracer::ScavengeSpeedInBytesPerMillisecond(
diff --git a/chromium/v8/src/heap/gc-tracer.h b/chromium/v8/src/heap/gc-tracer.h
index 4ddd0ef1c20..ec54b6c1ab6 100644
--- a/chromium/v8/src/heap/gc-tracer.h
+++ b/chromium/v8/src/heap/gc-tracer.h
@@ -216,6 +216,8 @@ class V8_EXPORT_PRIVATE GCTracer {
// Stop collecting data and print results.
void Stop(GarbageCollector collector);
+ void NotifySweepingCompleted();
+
void NotifyYoungGenerationHandling(
YoungGenerationHandling young_generation_handling);
diff --git a/chromium/v8/src/heap/heap-controller.cc b/chromium/v8/src/heap/heap-controller.cc
index 77e4870913a..d59f8abe9f5 100644
--- a/chromium/v8/src/heap/heap-controller.cc
+++ b/chromium/v8/src/heap/heap-controller.cc
@@ -33,20 +33,20 @@ double MemoryController<Trait>::MaxGrowingFactor(size_t max_heap_size) {
constexpr double kMaxSmallFactor = 2.0;
constexpr double kHighFactor = 4.0;
- size_t max_size_in_mb = max_heap_size / MB;
- max_size_in_mb = Max(max_size_in_mb, Trait::kMinSize);
+ size_t max_size = max_heap_size;
+ max_size = Max(max_size, Trait::kMinSize);
// If we are on a device with lots of memory, we allow a high heap
// growing factor.
- if (max_size_in_mb >= Trait::kMaxSize) {
+ if (max_size >= Trait::kMaxSize) {
return kHighFactor;
}
- DCHECK_GE(max_size_in_mb, Trait::kMinSize);
- DCHECK_LT(max_size_in_mb, Trait::kMaxSize);
+ DCHECK_GE(max_size, Trait::kMinSize);
+ DCHECK_LT(max_size, Trait::kMaxSize);
// On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
- double factor = (max_size_in_mb - Trait::kMinSize) *
+ double factor = (max_size - Trait::kMinSize) *
(kMaxSmallFactor - kMinSmallFactor) /
(Trait::kMaxSize - Trait::kMinSize) +
kMinSmallFactor;
@@ -126,8 +126,9 @@ size_t MemoryController<Trait>::MinimumAllocationLimitGrowingStep(
template <typename Trait>
size_t MemoryController<Trait>::CalculateAllocationLimit(
- Heap* heap, size_t current_size, size_t max_size, size_t new_space_capacity,
- double factor, Heap::HeapGrowingMode growing_mode) {
+ Heap* heap, size_t current_size, size_t min_size, size_t max_size,
+ size_t new_space_capacity, double factor,
+ Heap::HeapGrowingMode growing_mode) {
switch (growing_mode) {
case Heap::HeapGrowingMode::kConservative:
case Heap::HeapGrowingMode::kSlow:
@@ -155,9 +156,11 @@ size_t MemoryController<Trait>::CalculateAllocationLimit(
static_cast<uint64_t>(current_size) +
MinimumAllocationLimitGrowingStep(growing_mode)) +
new_space_capacity;
+ const uint64_t limit_above_min_size = Max<uint64_t>(limit, min_size);
const uint64_t halfway_to_the_max =
(static_cast<uint64_t>(current_size) + max_size) / 2;
- const size_t result = static_cast<size_t>(Min(limit, halfway_to_the_max));
+ const size_t result =
+ static_cast<size_t>(Min(limit_above_min_size, halfway_to_the_max));
if (FLAG_trace_gc_verbose) {
Isolate::FromHeap(heap)->PrintWithTimestamp(
"[%s] Limit: old size: %zu KB, new limit: %zu KB (%.1f)\n",
diff --git a/chromium/v8/src/heap/heap-controller.h b/chromium/v8/src/heap/heap-controller.h
index bba1588669b..d4a3534cd7d 100644
--- a/chromium/v8/src/heap/heap-controller.h
+++ b/chromium/v8/src/heap/heap-controller.h
@@ -14,9 +14,8 @@ namespace v8 {
namespace internal {
struct BaseControllerTrait {
- // Sizes are in MB.
- static constexpr size_t kMinSize = 128 * Heap::kPointerMultiplier;
- static constexpr size_t kMaxSize = 1024 * Heap::kPointerMultiplier;
+ static constexpr size_t kMinSize = 128u * Heap::kPointerMultiplier * MB;
+ static constexpr size_t kMaxSize = 1024u * Heap::kPointerMultiplier * MB;
static constexpr double kMinGrowingFactor = 1.1;
static constexpr double kMaxGrowingFactor = 4.0;
@@ -43,7 +42,7 @@ class V8_EXPORT_PRIVATE MemoryController : public AllStatic {
double mutator_speed);
static size_t CalculateAllocationLimit(Heap* heap, size_t current_size,
- size_t max_size,
+ size_t min_size, size_t max_size,
size_t new_space_capacity,
double factor,
Heap::HeapGrowingMode growing_mode);
diff --git a/chromium/v8/src/heap/heap-inl.h b/chromium/v8/src/heap/heap-inl.h
index 4ce35bd9619..f2f7a7f6920 100644
--- a/chromium/v8/src/heap/heap-inl.h
+++ b/chromium/v8/src/heap/heap-inl.h
@@ -263,15 +263,13 @@ void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
}
bool Heap::CanAllocateInReadOnlySpace() {
- return !deserialization_complete_ &&
- (isolate()->serializer_enabled() ||
- !isolate()->initialized_from_snapshot());
+ return read_only_space()->writable();
}
void Heap::UpdateAllocationsHash(HeapObject object) {
Address object_address = object.address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
- AllocationSpace allocation_space = memory_chunk->owner()->identity();
+ AllocationSpace allocation_space = memory_chunk->owner_identity();
STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
uint32_t value =
@@ -374,13 +372,12 @@ bool Heap::InToPage(HeapObject heap_object) {
bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
// static
-Heap* Heap::FromWritableHeapObject(const HeapObject obj) {
+Heap* Heap::FromWritableHeapObject(HeapObject obj) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case.
- SLOW_DCHECK(chunk->owner()->identity() != RO_SPACE ||
- static_cast<ReadOnlySpace*>(chunk->owner())->writable());
+ SLOW_DCHECK(chunk->IsWritable());
Heap* heap = chunk->heap();
SLOW_DCHECK(heap != nullptr);
return heap;
@@ -408,7 +405,7 @@ AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
return AllocationMemento();
}
HeapObject candidate = HeapObject::FromAddress(memento_address);
- MapWordSlot candidate_map_slot = candidate.map_slot();
+ ObjectSlot candidate_map_slot = candidate.map_slot();
// This fast check may peek at an uninitialized word. However, the slow check
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
@@ -614,8 +611,8 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(
scope_active_(chunk_->heap()->write_protect_code_memory() &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (scope_active_) {
- DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
- (chunk_->owner()->identity() == CODE_LO_SPACE));
+ DCHECK(chunk_->owner_identity() == CODE_SPACE ||
+ (chunk_->owner_identity() == CODE_LO_SPACE));
chunk_->SetReadAndWritable();
}
}
diff --git a/chromium/v8/src/heap/heap-write-barrier-inl.h b/chromium/v8/src/heap/heap-write-barrier-inl.h
index 6c5f20ac72e..5687284b1e8 100644
--- a/chromium/v8/src/heap/heap-write-barrier-inl.h
+++ b/chromium/v8/src/heap/heap-write-barrier-inl.h
@@ -11,9 +11,6 @@
#include "src/heap/heap-write-barrier.h"
#include "src/common/globals.h"
-// TODO(jkummerow): Get rid of this by moving GetIsolateFromWritableObject
-// elsewhere.
-#include "src/execution/isolate.h"
#include "src/objects/code.h"
#include "src/objects/compressed-slots-inl.h"
#include "src/objects/fixed-array.h"
@@ -42,27 +39,21 @@ V8_EXPORT_PRIVATE void Heap_MarkingBarrierForDescriptorArraySlow(
Heap* heap, HeapObject host, HeapObject descriptor_array,
int number_of_own_descriptors);
+V8_EXPORT_PRIVATE void Heap_GenerationalEphemeronKeyBarrierSlow(
+ Heap* heap, EphemeronHashTable table, Address slot);
+
// Do not use these internal details anywhere outside of this file. These
// internals are only intended to shortcut write barrier checks.
namespace heap_internals {
-struct Space {
- static constexpr uintptr_t kIdOffset = 9 * kSystemPointerSize;
- V8_INLINE AllocationSpace identity() {
- return *reinterpret_cast<AllocationSpace*>(reinterpret_cast<Address>(this) +
- kIdOffset);
- }
-};
-
struct MemoryChunk {
- static constexpr uintptr_t kFlagsOffset = sizeof(size_t);
+ static constexpr uintptr_t kFlagsOffset = kSizetSize;
static constexpr uintptr_t kHeapOffset =
- kFlagsOffset + kUIntptrSize + 4 * kSystemPointerSize;
- static constexpr uintptr_t kOwnerOffset =
- kHeapOffset + 2 * kSystemPointerSize;
+ kSizetSize + kUIntptrSize + kSystemPointerSize;
static constexpr uintptr_t kMarkingBit = uintptr_t{1} << 18;
static constexpr uintptr_t kFromPageBit = uintptr_t{1} << 3;
static constexpr uintptr_t kToPageBit = uintptr_t{1} << 4;
+ static constexpr uintptr_t kReadOnlySpaceBit = uintptr_t{1} << 21;
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
HeapObject object) {
@@ -84,13 +75,12 @@ struct MemoryChunk {
V8_INLINE Heap* GetHeap() {
Heap* heap = *reinterpret_cast<Heap**>(reinterpret_cast<Address>(this) +
kHeapOffset);
- SLOW_DCHECK(heap != nullptr);
+ DCHECK_NOT_NULL(heap);
return heap;
}
- V8_INLINE Space* GetOwner() {
- return *reinterpret_cast<Space**>(reinterpret_cast<Address>(this) +
- kOwnerOffset);
+ V8_INLINE bool InReadOnlySpace() const {
+ return GetFlags() & kReadOnlySpaceBit;
}
};
@@ -122,8 +112,7 @@ inline void GenerationalEphemeronKeyBarrierInternal(EphemeronHashTable table,
return;
}
- Heap* heap = GetHeapFromWritableObject(table);
- heap->RecordEphemeronKeyWrite(table, slot);
+ Heap_GenerationalEphemeronKeyBarrierSlow(table_chunk->GetHeap(), table, slot);
}
inline void MarkingBarrierInternal(HeapObject object, Address slot,
@@ -231,27 +220,16 @@ inline WriteBarrierMode GetWriteBarrierModeForObject(
return UPDATE_WRITE_BARRIER;
}
-inline bool ObjectInYoungGeneration(const Object object) {
+inline bool ObjectInYoungGeneration(Object object) {
if (object.IsSmi()) return false;
return heap_internals::MemoryChunk::FromHeapObject(HeapObject::cast(object))
->InYoungGeneration();
}
-inline Heap* GetHeapFromWritableObject(const HeapObject object) {
+inline bool IsReadOnlyHeapObject(HeapObject object) {
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
- return chunk->GetHeap();
-}
-
-inline bool GetIsolateFromWritableObject(HeapObject obj, Isolate** isolate) {
- heap_internals::MemoryChunk* chunk =
- heap_internals::MemoryChunk::FromHeapObject(obj);
- if (chunk->GetOwner()->identity() == RO_SPACE) {
- *isolate = nullptr;
- return false;
- }
- *isolate = Isolate::FromHeap(chunk->GetHeap());
- return true;
+ return chunk->InReadOnlySpace();
}
} // namespace internal
diff --git a/chromium/v8/src/heap/heap-write-barrier.h b/chromium/v8/src/heap/heap-write-barrier.h
index ead17f93969..1126fd6f4be 100644
--- a/chromium/v8/src/heap/heap-write-barrier.h
+++ b/chromium/v8/src/heap/heap-write-barrier.h
@@ -41,7 +41,7 @@ void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host,
HeapObject descriptor_array,
int number_of_own_descriptors);
-Heap* GetHeapFromWritableObject(const HeapObject object);
+inline bool IsReadOnlyHeapObject(HeapObject object);
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/heap.cc b/chromium/v8/src/heap/heap.cc
index 52387b5bc16..7feb1c11ba9 100644
--- a/chromium/v8/src/heap/heap.cc
+++ b/chromium/v8/src/heap/heap.cc
@@ -5,6 +5,7 @@
#include "src/heap/heap.h"
#include <cinttypes>
+#include <iomanip>
#include <unordered_map>
#include <unordered_set>
@@ -63,7 +64,7 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/slots-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/serializer-common.h"
@@ -118,6 +119,12 @@ void Heap_MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
number_of_own_descriptors);
}
+void Heap_GenerationalEphemeronKeyBarrierSlow(Heap* heap,
+ EphemeronHashTable table,
+ Address slot) {
+ heap->RecordEphemeronKeyWrite(table, slot);
+}
+
void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
@@ -164,29 +171,21 @@ struct Heap::StrongRootsList {
class IdleScavengeObserver : public AllocationObserver {
public:
- IdleScavengeObserver(Heap& heap, intptr_t step_size)
+ IdleScavengeObserver(Heap* heap, intptr_t step_size)
: AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
- heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
+ heap_->ScheduleIdleScavengeIfNeeded(bytes_allocated);
}
private:
- Heap& heap_;
+ Heap* heap_;
};
Heap::Heap()
: isolate_(isolate()),
- initial_max_old_generation_size_(max_old_generation_size_),
- initial_max_old_generation_size_threshold_(0),
- initial_old_generation_size_(
- Min(max_old_generation_size_, kMaxInitialOldGenerationSize)),
memory_pressure_level_(MemoryPressureLevel::kNone),
- old_generation_allocation_limit_(initial_old_generation_size_),
- global_allocation_limit_(initial_old_generation_size_),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
- current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
- is_current_gc_forced_(false),
external_string_table_(this) {
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
@@ -207,23 +206,87 @@ size_t Heap::MaxReserved() {
max_old_generation_size_);
}
-size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
- const size_t old_space_physical_memory_factor = 4;
- size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
- old_space_physical_memory_factor *
- kPointerMultiplier);
- size_t max_size_in_mb = V8HeapTrait::kMaxSize;
+size_t Heap::YoungGenerationSizeFromOldGenerationSize(size_t old_generation) {
+ // Compute the semi space size and cap it.
+ size_t ratio = old_generation <= kOldGenerationLowMemory
+ ? kOldGenerationToSemiSpaceRatioLowMemory
+ : kOldGenerationToSemiSpaceRatio;
+ size_t semi_space = old_generation / ratio;
+ semi_space = Min<size_t>(semi_space, kMaxSemiSpaceSize);
+ semi_space = Max<size_t>(semi_space, kMinSemiSpaceSize);
+ semi_space = RoundUp(semi_space, Page::kPageSize);
+ return YoungGenerationSizeFromSemiSpaceSize(semi_space);
+}
+
+size_t Heap::HeapSizeFromPhysicalMemory(uint64_t physical_memory) {
+ // Compute the old generation size and cap it.
+ uint64_t old_generation = physical_memory /
+ kPhysicalMemoryToOldGenerationRatio *
+ kPointerMultiplier;
+ old_generation =
+ Min<uint64_t>(old_generation, MaxOldGenerationSize(physical_memory));
+ old_generation = Max<uint64_t>(old_generation, V8HeapTrait::kMinSize);
+ old_generation = RoundUp(old_generation, Page::kPageSize);
+
+ size_t young_generation = YoungGenerationSizeFromOldGenerationSize(
+ static_cast<size_t>(old_generation));
+ return static_cast<size_t>(old_generation) + young_generation;
+}
+
+void Heap::GenerationSizesFromHeapSize(size_t heap_size,
+ size_t* young_generation_size,
+ size_t* old_generation_size) {
+ // Initialize values for the case when the given heap size is too small.
+ *young_generation_size = 0;
+ *old_generation_size = 0;
+ // Binary search for the largest old generation size that fits to the given
+ // heap limit considering the correspondingly sized young generation.
+ size_t lower = 0, upper = heap_size;
+ while (lower + 1 < upper) {
+ size_t old_generation = lower + (upper - lower) / 2;
+ size_t young_generation =
+ YoungGenerationSizeFromOldGenerationSize(old_generation);
+ if (old_generation + young_generation <= heap_size) {
+ // This size configuration fits into the given heap limit.
+ *young_generation_size = young_generation;
+ *old_generation_size = old_generation;
+ lower = old_generation;
+ } else {
+ upper = old_generation;
+ }
+ }
+}
+size_t Heap::MinYoungGenerationSize() {
+ return YoungGenerationSizeFromSemiSpaceSize(kMinSemiSpaceSize);
+}
+
+size_t Heap::MinOldGenerationSize() {
+ size_t paged_space_count =
+ LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
+ return paged_space_count * Page::kPageSize;
+}
+
+size_t Heap::MaxOldGenerationSize(uint64_t physical_memory) {
+ size_t max_size = V8HeapTrait::kMaxSize;
// Finch experiment: Increase the heap size from 2GB to 4GB for 64-bit
// systems with physical memory bigger than 16GB.
constexpr bool x64_bit = Heap::kPointerMultiplier >= 2;
if (FLAG_huge_max_old_generation_size && x64_bit &&
physical_memory / GB > 16) {
- DCHECK_LE(max_size_in_mb, 4096);
- max_size_in_mb = 4096; // 4GB
+ DCHECK_EQ(max_size / GB, 2);
+ max_size *= 2;
}
+ return max_size;
+}
- return Max(Min(computed_size, max_size_in_mb), V8HeapTrait::kMinSize);
+size_t Heap::YoungGenerationSizeFromSemiSpaceSize(size_t semi_space_size) {
+ return semi_space_size * (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
+}
+
+size_t Heap::SemiSpaceSizeFromYoungGenerationSize(
+ size_t young_generation_size) {
+ return young_generation_size / (2 + kNewLargeObjectSpaceToSemiSpaceRatio);
}
size_t Heap::Capacity() {
@@ -234,10 +297,10 @@ size_t Heap::Capacity() {
size_t Heap::OldGenerationCapacity() {
if (!HasBeenSetUp()) return 0;
- PagedSpaces spaces(this);
+ PagedSpaceIterator spaces(this);
size_t total = 0;
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
total += space->Capacity();
}
return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects();
@@ -246,10 +309,10 @@ size_t Heap::OldGenerationCapacity() {
size_t Heap::CommittedOldGenerationMemory() {
if (!HasBeenSetUp()) return 0;
- PagedSpaces spaces(this);
+ PagedSpaceIterator spaces(this);
size_t total = 0;
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
total += space->CommittedMemory();
}
return total + lo_space_->Size() + code_lo_space_->Size();
@@ -273,8 +336,8 @@ size_t Heap::CommittedPhysicalMemory() {
if (!HasBeenSetUp()) return 0;
size_t total = 0;
- for (SpaceIterator it(this); it.has_next();) {
- total += it.next()->CommittedPhysicalMemory();
+ for (SpaceIterator it(this); it.HasNext();) {
+ total += it.Next()->CommittedPhysicalMemory();
}
return total;
@@ -301,8 +364,8 @@ size_t Heap::Available() {
size_t total = 0;
- for (SpaceIterator it(this); it.has_next();) {
- total += it.next()->Available();
+ for (SpaceIterator it(this); it.HasNext();) {
+ total += it.Next()->Available();
}
total += memory_allocator()->Available();
@@ -311,7 +374,7 @@ size_t Heap::Available() {
bool Heap::CanExpandOldGeneration(size_t size) {
if (force_oom_) return false;
- if (OldGenerationCapacity() + size > MaxOldGenerationSize()) return false;
+ if (OldGenerationCapacity() + size > max_old_generation_size_) return false;
// The OldGenerationCapacity does not account compaction spaces used
// during evacuation. Ensure that expanding the old generation does push
// the total allocated memory size over the maximum heap size.
@@ -443,6 +506,81 @@ void Heap::PrintShortHeapStatistics() {
total_gc_time_ms_);
}
+void Heap::PrintFreeListsStats() {
+ DCHECK(FLAG_trace_gc_freelists);
+
+ if (FLAG_trace_gc_freelists_verbose) {
+ PrintIsolate(isolate_,
+ "Freelists statistics per Page: "
+ "[category: length || total free bytes]\n");
+ }
+
+ std::vector<int> categories_lengths(
+ old_space()->free_list()->number_of_categories(), 0);
+ std::vector<size_t> categories_sums(
+ old_space()->free_list()->number_of_categories(), 0);
+ unsigned int pageCnt = 0;
+
+ // This loops computes freelists lengths and sum.
+ // If FLAG_trace_gc_freelists_verbose is enabled, it also prints
+ // the stats of each FreeListCategory of each Page.
+ for (Page* page : *old_space()) {
+ std::ostringstream out_str;
+
+ if (FLAG_trace_gc_freelists_verbose) {
+ out_str << "Page " << std::setw(4) << pageCnt;
+ }
+
+ for (int cat = kFirstCategory;
+ cat <= old_space()->free_list()->last_category(); cat++) {
+ FreeListCategory* free_list =
+ page->free_list_category(static_cast<FreeListCategoryType>(cat));
+ int length = free_list->FreeListLength();
+ size_t sum = free_list->SumFreeList();
+
+ if (FLAG_trace_gc_freelists_verbose) {
+ out_str << "[" << cat << ": " << std::setw(4) << length << " || "
+ << std::setw(6) << sum << " ]"
+ << (cat == old_space()->free_list()->last_category() ? "\n"
+ : ", ");
+ }
+ categories_lengths[cat] += length;
+ categories_sums[cat] += sum;
+ }
+
+ if (FLAG_trace_gc_freelists_verbose) {
+ PrintIsolate(isolate_, "%s", out_str.str().c_str());
+ }
+
+ pageCnt++;
+ }
+
+ // Print statistics about old_space (pages, free/wasted/used memory...).
+ PrintIsolate(
+ isolate_,
+ "%d pages. Free space: %.1f MB (waste: %.2f). "
+ "Usage: %.1f/%.1f (MB) -> %.2f%%.\n",
+ pageCnt, static_cast<double>(old_space_->Available()) / MB,
+ static_cast<double>(old_space_->Waste()) / MB,
+ static_cast<double>(old_space_->Size()) / MB,
+ static_cast<double>(old_space_->Capacity()) / MB,
+ static_cast<double>(old_space_->Size()) / old_space_->Capacity() * 100);
+
+ // Print global statistics of each FreeListCategory (length & sum).
+ PrintIsolate(isolate_,
+ "FreeLists global statistics: "
+ "[category: length || total free KB]\n");
+ std::ostringstream out_str;
+ for (int cat = kFirstCategory;
+ cat <= old_space()->free_list()->last_category(); cat++) {
+ out_str << "[" << cat << ": " << categories_lengths[cat] << " || "
+ << std::fixed << std::setprecision(2)
+ << static_cast<double>(categories_sums[cat]) / KB << " KB]"
+ << (cat == old_space()->free_list()->last_category() ? "\n" : ", ");
+ }
+ PrintIsolate(isolate_, "%s", out_str.str().c_str());
+}
+
void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
HeapStatistics stats;
reinterpret_cast<v8::Isolate*>(isolate())->GetHeapStatistics(&stats);
@@ -483,7 +621,7 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
MEMBER("malloced_memory") << stats.malloced_memory() << ","
MEMBER("external_memory") << stats.external_memory() << ","
MEMBER("peak_malloced_memory") << stats.peak_malloced_memory() << ","
- MEMBER("pages") << LIST(
+ MEMBER("spaces") << LIST(
SpaceStatistics(RO_SPACE) << "," <<
SpaceStatistics(NEW_SPACE) << "," <<
SpaceStatistics(OLD_SPACE) << "," <<
@@ -693,8 +831,8 @@ void Heap::GarbageCollectionPrologue() {
size_t Heap::SizeOfObjects() {
size_t total = 0;
- for (SpaceIterator it(this); it.has_next();) {
- total += it.next()->SizeOfObjects();
+ for (SpaceIterator it(this); it.HasNext();) {
+ total += it.Next()->SizeOfObjects();
}
return total;
}
@@ -750,8 +888,8 @@ void Heap::AddAllocationObserversToAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
- for (SpaceIterator it(this); it.has_next();) {
- Space* space = it.next();
+ for (SpaceIterator it(this); it.HasNext();) {
+ Space* space = it.Next();
if (space == new_space()) {
space->AddAllocationObserver(new_space_observer);
} else {
@@ -764,8 +902,8 @@ void Heap::RemoveAllocationObserversFromAllSpaces(
AllocationObserver* observer, AllocationObserver* new_space_observer) {
DCHECK(observer && new_space_observer);
- for (SpaceIterator it(this); it.has_next();) {
- Space* space = it.next();
+ for (SpaceIterator it(this); it.HasNext();) {
+ Space* space = it.Next();
if (space == new_space()) {
space->RemoveAllocationObserver(new_space_observer);
} else {
@@ -1194,27 +1332,27 @@ intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
return 0;
}
-void ReportDuplicates(int size, std::vector<HeapObject>& objects) {
- if (objects.size() == 0) return;
+void ReportDuplicates(int size, std::vector<HeapObject>* objects) {
+ if (objects->size() == 0) return;
- sort(objects.begin(), objects.end(), [size](HeapObject a, HeapObject b) {
+ sort(objects->begin(), objects->end(), [size](HeapObject a, HeapObject b) {
intptr_t c = CompareWords(size, a, b);
if (c != 0) return c < 0;
return a < b;
});
std::vector<std::pair<int, HeapObject>> duplicates;
- HeapObject current = objects[0];
+ HeapObject current = (*objects)[0];
int count = 1;
- for (size_t i = 1; i < objects.size(); i++) {
- if (CompareWords(size, current, objects[i]) == 0) {
+ for (size_t i = 1; i < objects->size(); i++) {
+ if (CompareWords(size, current, (*objects)[i]) == 0) {
count++;
} else {
if (count > 1) {
duplicates.push_back(std::make_pair(count - 1, current));
}
count = 1;
- current = objects[i];
+ current = (*objects)[i];
}
}
if (count > 1) {
@@ -1274,29 +1412,30 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
set_current_gc_flags(kNoGCFlags);
new_space_->Shrink();
- new_lo_space_->SetCapacity(new_space_->Capacity());
+ new_lo_space_->SetCapacity(new_space_->Capacity() *
+ kNewLargeObjectSpaceToSemiSpaceRatio);
UncommitFromSpace();
EagerlyFreeExternalMemory();
if (FLAG_trace_duplicate_threshold_kb) {
std::map<int, std::vector<HeapObject>> objects_by_size;
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
- HeapObjectIterator it(space);
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
+ PagedSpaceObjectIterator it(space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
objects_by_size[obj.Size()].push_back(obj);
}
}
{
- LargeObjectIterator it(lo_space());
+ LargeObjectSpaceObjectIterator it(lo_space());
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
objects_by_size[obj.Size()].push_back(obj);
}
}
for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
++it) {
- ReportDuplicates(it->first, it->second);
+ ReportDuplicates(it->first, &it->second);
}
}
}
@@ -1669,7 +1808,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
for (int space = FIRST_SPACE;
- space < SerializerDeserializer::kNumberOfSpaces; space++) {
+ space < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces);
+ space++) {
Reservation* reservation = &reservations[space];
DCHECK_LE(1, reservation->size());
if (reservation->at(0).size == 0) {
@@ -1727,8 +1867,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
Address free_space_address = free_space.address();
CreateFillerObjectAt(free_space_address, size,
ClearRecordedSlots::kNo);
- DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
- space);
+ DCHECK(IsPreAllocatedSpace(static_cast<SnapshotSpace>(space)));
chunk.start = free_space_address;
chunk.end = free_space_address + size;
} else {
@@ -1993,14 +2132,16 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
old_generation_allocation_limit_ =
MemoryController<V8HeapTrait>::CalculateAllocationLimit(
- this, old_gen_size, max_old_generation_size_, new_space_capacity,
- v8_growing_factor, mode);
+ this, old_gen_size, min_old_generation_size_,
+ max_old_generation_size_, new_space_capacity, v8_growing_factor,
+ mode);
if (UseGlobalMemoryScheduling()) {
DCHECK_GT(global_growing_factor, 0);
global_allocation_limit_ =
MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
- this, GlobalSizeOfObjects(), max_global_memory_size_,
- new_space_capacity, global_growing_factor, mode);
+ this, GlobalSizeOfObjects(), min_global_memory_size_,
+ max_global_memory_size_, new_space_capacity,
+ global_growing_factor, mode);
}
CheckIneffectiveMarkCompact(
old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
@@ -2008,8 +2149,9 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
old_generation_size_configured_) {
size_t new_old_generation_limit =
MemoryController<V8HeapTrait>::CalculateAllocationLimit(
- this, old_gen_size, max_old_generation_size_, new_space_capacity,
- v8_growing_factor, mode);
+ this, old_gen_size, min_old_generation_size_,
+ max_old_generation_size_, new_space_capacity, v8_growing_factor,
+ mode);
if (new_old_generation_limit < old_generation_allocation_limit_) {
old_generation_allocation_limit_ = new_old_generation_limit;
}
@@ -2017,8 +2159,9 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
DCHECK_GT(global_growing_factor, 0);
size_t new_global_limit =
MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
- this, GlobalSizeOfObjects(), max_global_memory_size_,
- new_space_capacity, global_growing_factor, mode);
+ this, GlobalSizeOfObjects(), min_global_memory_size_,
+ max_global_memory_size_, new_space_capacity,
+ global_growing_factor, mode);
if (new_global_limit < global_allocation_limit_) {
global_allocation_limit_ = new_global_limit;
}
@@ -2433,8 +2576,8 @@ void Heap::ExternalStringTable::IterateYoung(RootVisitor* v) {
if (!young_strings_.empty()) {
v->VisitRootPointers(
Root::kExternalStringsTable, nullptr,
- FullObjectSlot(&young_strings_[0]),
- FullObjectSlot(&young_strings_[young_strings_.size()]));
+ FullObjectSlot(young_strings_.data()),
+ FullObjectSlot(young_strings_.data() + young_strings_.size()));
}
}
@@ -2596,6 +2739,7 @@ STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment));
#endif
#ifdef V8_HOST_ARCH_32_BIT
+// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize);
#endif
@@ -2981,7 +3125,7 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
// We do not create a filler for objects in a large object space.
if (!IsLargeObject(object)) {
HeapObject filler =
- CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
+ CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kNo);
DCHECK(!filler.is_null());
// Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway.
@@ -3229,7 +3373,8 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// object space for side effects.
IncrementalMarking::MarkingState* marking_state =
incremental_marking()->marking_state();
- for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
+ for (int i = OLD_SPACE;
+ i < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces); i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
@@ -3634,8 +3779,8 @@ void Heap::Print() {
if (!HasBeenSetUp()) return;
isolate()->PrintStack(stdout);
- for (SpaceIterator it(this); it.has_next();) {
- it.next()->Print();
+ for (SpaceIterator it(this); it.HasNext();) {
+ it.Next()->Print();
}
}
@@ -3704,6 +3849,9 @@ const char* Heap::GarbageCollectionReasonToString(
}
bool Heap::Contains(HeapObject value) {
+ if (ReadOnlyHeap::Contains(value)) {
+ return false;
+ }
if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
return false;
}
@@ -3736,7 +3884,7 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) {
case NEW_LO_SPACE:
return new_lo_space_->Contains(value);
case RO_SPACE:
- return read_only_space_->Contains(value);
+ return ReadOnlyHeap::Contains(value);
}
UNREACHABLE();
}
@@ -3842,9 +3990,9 @@ void Heap::Verify() {
void Heap::VerifyReadOnlyHeap() {
CHECK(!read_only_space_->writable());
// TODO(v8:7464): Always verify read-only space once PagedSpace::Verify
- // supports verifying shared read-only space. Currently HeapObjectIterator is
- // explicitly disabled for read-only space when sharing is enabled, because it
- // relies on PagedSpace::heap_ being non-null.
+ // supports verifying shared read-only space. Currently
+ // PagedSpaceObjectIterator is explicitly disabled for read-only space when
+ // sharing is enabled, because it relies on PagedSpace::heap_ being non-null.
#ifndef V8_SHARED_RO_HEAP
VerifyReadOnlyPointersVisitor read_only_visitor(this);
read_only_space_->Verify(isolate(), &read_only_visitor);
@@ -3997,17 +4145,17 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
#ifdef DEBUG
void Heap::VerifyCountersAfterSweeping() {
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
space->VerifyCountersAfterSweeping();
}
}
void Heap::VerifyCountersBeforeConcurrentSweeping() {
- PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
space->VerifyCountersBeforeConcurrentSweeping();
}
}
@@ -4259,89 +4407,139 @@ void Heap::IterateBuiltins(RootVisitor* v) {
#endif // V8_EMBEDDED_BUILTINS
}
-// TODO(1236194): Since the heap size is configurable on the command line
-// and through the API, we should gracefully handle the case that the heap
-// size is not big enough to fit all the initial objects.
-void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
- size_t max_old_generation_size_in_mb,
- size_t code_range_size_in_mb) {
- // Overwrite default configuration.
- if (max_semi_space_size_in_kb != 0) {
+namespace {
+size_t GlobalMemorySizeFromV8Size(size_t v8_size) {
+ const size_t kGlobalMemoryToV8Ratio = 2;
+ return Min(static_cast<uint64_t>(std::numeric_limits<size_t>::max()),
+ static_cast<uint64_t>(v8_size) * kGlobalMemoryToV8Ratio);
+}
+} // anonymous namespace
+
+void Heap::ConfigureHeap(const v8::ResourceConstraints& constraints) {
+ // Initialize max_semi_space_size_.
+ {
+ max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
+ if (constraints.max_young_generation_size_in_bytes() > 0) {
+ max_semi_space_size_ = SemiSpaceSizeFromYoungGenerationSize(
+ constraints.max_young_generation_size_in_bytes());
+ }
+ if (FLAG_max_semi_space_size > 0) {
+ max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
+ } else if (FLAG_max_heap_size > 0) {
+ size_t max_heap_size = static_cast<size_t>(FLAG_max_heap_size) * MB;
+ size_t young_generation_size, old_generation_size;
+ if (FLAG_max_old_space_size > 0) {
+ old_generation_size = static_cast<size_t>(FLAG_max_old_space_size) * MB;
+ young_generation_size = max_heap_size > old_generation_size
+ ? max_heap_size - old_generation_size
+ : 0;
+ } else {
+ GenerationSizesFromHeapSize(max_heap_size, &young_generation_size,
+ &old_generation_size);
+ }
+ max_semi_space_size_ =
+ SemiSpaceSizeFromYoungGenerationSize(young_generation_size);
+ }
+ if (FLAG_stress_compaction) {
+ // This will cause more frequent GCs when stressing.
+ max_semi_space_size_ = MB;
+ }
+ // The new space size must be a power of two to support single-bit testing
+ // for containment.
+ // TODO(ulan): Rounding to a power of 2 is not longer needed. Remove it.
max_semi_space_size_ =
- RoundUp<Page::kPageSize>(max_semi_space_size_in_kb * KB);
- }
- if (max_old_generation_size_in_mb != 0) {
- max_old_generation_size_ = max_old_generation_size_in_mb * MB;
+ static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
+ static_cast<uint64_t>(max_semi_space_size_)));
+ max_semi_space_size_ = Max(max_semi_space_size_, kMinSemiSpaceSize);
+ max_semi_space_size_ = RoundDown<Page::kPageSize>(max_semi_space_size_);
}
- // If max space size flags are specified overwrite the configuration.
- if (FLAG_max_semi_space_size > 0) {
- max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
- }
- if (FLAG_max_old_space_size > 0) {
+ // Initialize max_old_generation_size_ and max_global_memory_.
+ {
+ max_old_generation_size_ = 700ul * (kSystemPointerSize / 4) * MB;
+ if (constraints.max_old_generation_size_in_bytes() > 0) {
+ max_old_generation_size_ = constraints.max_old_generation_size_in_bytes();
+ }
+ if (FLAG_max_old_space_size > 0) {
+ max_old_generation_size_ =
+ static_cast<size_t>(FLAG_max_old_space_size) * MB;
+ } else if (FLAG_max_heap_size > 0) {
+ size_t max_heap_size = static_cast<size_t>(FLAG_max_heap_size) * MB;
+ size_t young_generation_size =
+ YoungGenerationSizeFromSemiSpaceSize(max_semi_space_size_);
+ max_old_generation_size_ = max_heap_size > young_generation_size
+ ? max_heap_size - young_generation_size
+ : 0;
+ }
max_old_generation_size_ =
- static_cast<size_t>(FLAG_max_old_space_size) * MB;
- }
-
- if (Page::kPageSize > MB) {
- max_semi_space_size_ = RoundUp<Page::kPageSize>(max_semi_space_size_);
+ Max(max_old_generation_size_, MinOldGenerationSize());
max_old_generation_size_ =
- RoundUp<Page::kPageSize>(max_old_generation_size_);
- }
+ RoundDown<Page::kPageSize>(max_old_generation_size_);
- if (FLAG_stress_compaction) {
- // This will cause more frequent GCs when stressing.
- max_semi_space_size_ = MB;
+ max_global_memory_size_ =
+ GlobalMemorySizeFromV8Size(max_old_generation_size_);
}
- // The new space size must be a power of two to support single-bit testing
- // for containment.
- max_semi_space_size_ = static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
- static_cast<uint64_t>(max_semi_space_size_)));
+ CHECK_IMPLIES(FLAG_max_heap_size > 0,
+ FLAG_max_semi_space_size == 0 || FLAG_max_old_space_size == 0);
- if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
- // Start with at least 1*MB semi-space on machines with a lot of memory.
- initial_semispace_size_ =
- Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
- }
-
- if (FLAG_min_semi_space_size > 0) {
- size_t initial_semispace_size =
- static_cast<size_t>(FLAG_min_semi_space_size) * MB;
- if (initial_semispace_size > max_semi_space_size_) {
- initial_semispace_size_ = max_semi_space_size_;
- if (FLAG_trace_gc) {
- PrintIsolate(isolate_,
- "Min semi-space size cannot be more than the maximum "
- "semi-space size of %zu MB\n",
- max_semi_space_size_ / MB);
- }
- } else {
+ // Initialize initial_semispace_size_.
+ {
+ initial_semispace_size_ = kMinSemiSpaceSize;
+ if (max_semi_space_size_ == kMaxSemiSpaceSize) {
+ // Start with at least 1*MB semi-space on machines with a lot of memory.
+ initial_semispace_size_ =
+ Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
+ }
+ if (constraints.initial_young_generation_size_in_bytes() > 0) {
+ initial_semispace_size_ = SemiSpaceSizeFromYoungGenerationSize(
+ constraints.initial_young_generation_size_in_bytes());
+ }
+ if (FLAG_min_semi_space_size > 0) {
initial_semispace_size_ =
- RoundUp<Page::kPageSize>(initial_semispace_size);
+ static_cast<size_t>(FLAG_min_semi_space_size) * MB;
+ }
+ initial_semispace_size_ =
+ Min(initial_semispace_size_, max_semi_space_size_);
+ initial_semispace_size_ =
+ RoundDown<Page::kPageSize>(initial_semispace_size_);
+ }
+
+ // Initialize initial_old_space_size_.
+ {
+ initial_old_generation_size_ = kMaxInitialOldGenerationSize;
+ if (constraints.initial_old_generation_size_in_bytes() > 0) {
+ initial_old_generation_size_ =
+ constraints.initial_old_generation_size_in_bytes();
+ old_generation_size_configured_ = true;
+ }
+ if (FLAG_initial_old_space_size > 0) {
+ initial_old_generation_size_ =
+ static_cast<size_t>(FLAG_initial_old_space_size) * MB;
+ old_generation_size_configured_ = true;
}
+ initial_old_generation_size_ =
+ Min(initial_old_generation_size_, max_old_generation_size_ / 2);
+ initial_old_generation_size_ =
+ RoundDown<Page::kPageSize>(initial_old_generation_size_);
}
- initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
+ if (old_generation_size_configured_) {
+ // If the embedder pre-configures the initial old generation size,
+ // then allow V8 to skip full GCs below that threshold.
+ min_old_generation_size_ = initial_old_generation_size_;
+ min_global_memory_size_ =
+ GlobalMemorySizeFromV8Size(min_old_generation_size_);
+ }
if (FLAG_semi_space_growth_factor < 2) {
FLAG_semi_space_growth_factor = 2;
}
- // The old generation is paged and needs at least one page for each space.
- int paged_space_count =
- LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
- initial_max_old_generation_size_ = max_old_generation_size_ =
- Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
- max_old_generation_size_);
-
- if (FLAG_initial_old_space_size > 0) {
- initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
- } else {
- initial_old_generation_size_ =
- Min(max_old_generation_size_, kMaxInitialOldGenerationSize);
- }
old_generation_allocation_limit_ = initial_old_generation_size_;
+ global_allocation_limit_ =
+ GlobalMemorySizeFromV8Size(old_generation_allocation_limit_);
+ initial_max_old_generation_size_ = max_old_generation_size_;
// We rely on being able to allocate new arrays in paged spaces.
DCHECK(kMaxRegularHeapObjectSize >=
@@ -4349,12 +4547,11 @@ void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
AllocationMemento::kSize));
- code_range_size_ = code_range_size_in_mb * MB;
+ code_range_size_ = constraints.code_range_size_in_bytes();
configured_ = true;
}
-
void Heap::AddToRingBuffer(const char* string) {
size_t first_part =
Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
@@ -4378,7 +4575,10 @@ void Heap::GetFromRingBuffer(char* buffer) {
memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
}
-void Heap::ConfigureHeapDefault() { ConfigureHeap(0, 0, 0); }
+void Heap::ConfigureHeapDefault() {
+ v8::ResourceConstraints constraints;
+ ConfigureHeap(constraints);
+}
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->start_marker = HeapStats::kStartMarker;
@@ -4403,9 +4603,9 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
*stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
if (take_snapshot) {
- HeapIterator iterator(this);
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
+ HeapObjectIterator iterator(this);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
InstanceType type = obj.map().instance_type();
DCHECK(0 <= type && type <= LAST_TYPE);
stats->objects_per_type[type]++;
@@ -4426,10 +4626,10 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
}
size_t Heap::OldGenerationSizeOfObjects() {
- PagedSpaces spaces(this);
+ PagedSpaceIterator spaces(this);
size_t total = 0;
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
total += space->SizeOfObjects();
}
return total + lo_space_->SizeOfObjects();
@@ -4437,10 +4637,9 @@ size_t Heap::OldGenerationSizeOfObjects() {
size_t Heap::GlobalSizeOfObjects() {
const size_t on_heap_size = OldGenerationSizeOfObjects();
- const size_t embedder_size =
- local_embedder_heap_tracer()
- ? local_embedder_heap_tracer()->allocated_size()
- : 0;
+ const size_t embedder_size = local_embedder_heap_tracer()
+ ? local_embedder_heap_tracer()->used_size()
+ : 0;
return on_heap_size + embedder_size;
}
@@ -4455,6 +4654,40 @@ uint64_t Heap::PromotedExternalMemorySize() {
isolate_data->external_memory_at_last_mark_compact_);
}
+bool Heap::AllocationLimitOvershotByLargeMargin() {
+ // This guards against too eager finalization in small heaps.
+ // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
+ constexpr size_t kMarginForSmallHeaps = 32u * MB;
+
+ const size_t v8_overshoot =
+ old_generation_allocation_limit_ <
+ OldGenerationObjectsAndPromotedExternalMemorySize()
+ ? OldGenerationObjectsAndPromotedExternalMemorySize() -
+ old_generation_allocation_limit_
+ : 0;
+ const size_t global_overshoot =
+ global_allocation_limit_ < GlobalSizeOfObjects()
+ ? GlobalSizeOfObjects() - global_allocation_limit_
+ : 0;
+
+ // Bail out if the V8 and global sizes are still below their respective
+ // limits.
+ if (v8_overshoot == 0 && global_overshoot == 0) {
+ return false;
+ }
+
+ // Overshoot margin is 50% of allocation limit or half-way to the max heap
+ // with special handling of small heaps.
+ const size_t v8_margin =
+ Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
+ (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
+ const size_t global_margin =
+ Min(Max(global_allocation_limit_ / 2, kMarginForSmallHeaps),
+ (max_global_memory_size_ - global_allocation_limit_) / 2);
+
+ return v8_overshoot >= v8_margin || global_overshoot >= global_margin;
+}
+
bool Heap::ShouldOptimizeForLoadTime() {
return isolate()->rail_mode() == PERFORMANCE_LOAD &&
!AllocationLimitOvershotByLargeMargin() &&
@@ -4508,7 +4741,7 @@ size_t Heap::GlobalMemoryAvailable() {
? GlobalSizeOfObjects() < global_allocation_limit_
? global_allocation_limit_ - GlobalSizeOfObjects()
: 0
- : 1;
+ : new_space_->Capacity() + 1;
}
// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
@@ -4526,8 +4759,7 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (FLAG_stress_incremental_marking) {
return IncrementalMarkingLimit::kHardLimit;
}
- if (OldGenerationSizeOfObjects() <=
- IncrementalMarking::kActivationThreshold) {
+ if (incremental_marking()->IsBelowActivationThresholds()) {
// Incremental marking is disabled or it is too early to start.
return IncrementalMarkingLimit::kNoLimit;
}
@@ -4574,7 +4806,7 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
const size_t global_memory_available = GlobalMemoryAvailable();
if (old_generation_space_available > new_space_->Capacity() &&
- (global_memory_available > 0)) {
+ (global_memory_available > new_space_->Capacity())) {
return IncrementalMarkingLimit::kNoLimit;
}
if (ShouldOptimizeForMemoryUsage()) {
@@ -4609,10 +4841,10 @@ void Heap::DisableInlineAllocation() {
new_space()->UpdateInlineAllocationLimit(0);
// Update inline allocation limit for old spaces.
- PagedSpaces spaces(this);
+ PagedSpaceIterator spaces(this);
CodeSpaceMemoryModificationScope modification_scope(this);
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
space->FreeLinearAllocationArea();
}
}
@@ -4769,7 +5001,6 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) {
DCHECK_NOT_NULL(ro_heap);
DCHECK_IMPLIES(read_only_space_ != nullptr,
read_only_space_ == ro_heap->read_only_space());
- read_only_heap_ = ro_heap;
space_[RO_SPACE] = read_only_space_ = ro_heap->read_only_space();
}
@@ -4822,7 +5053,7 @@ void Heap::SetUpSpaces() {
if (FLAG_idle_time_scavenge) {
scavenge_job_.reset(new ScavengeJob());
idle_scavenge_observer_.reset(new IdleScavengeObserver(
- *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
+ this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
new_space()->AddAllocationObserver(idle_scavenge_observer_.get());
}
@@ -4831,12 +5062,12 @@ void Heap::SetUpSpaces() {
if (FLAG_stress_marking > 0) {
stress_marking_percentage_ = NextStressMarkingLimit();
- stress_marking_observer_ = new StressMarkingObserver(*this);
+ stress_marking_observer_ = new StressMarkingObserver(this);
AddAllocationObserversToAllSpaces(stress_marking_observer_,
stress_marking_observer_);
}
if (FLAG_stress_scavenge > 0) {
- stress_scavenge_observer_ = new StressScavengeObserver(*this);
+ stress_scavenge_observer_ = new StressScavengeObserver(this);
new_space()->AddAllocationObserver(stress_scavenge_observer_);
}
@@ -4908,8 +5139,8 @@ int Heap::NextStressMarkingLimit() {
}
void Heap::NotifyDeserializationComplete() {
- PagedSpaces spaces(this);
- for (PagedSpace* s = spaces.next(); s != nullptr; s = spaces.next()) {
+ PagedSpaceIterator spaces(this);
+ for (PagedSpace* s = spaces.Next(); s != nullptr; s = spaces.Next()) {
if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
@@ -5055,7 +5286,7 @@ void Heap::TearDown() {
tracer_.reset();
- read_only_heap_->OnHeapTearDown();
+ isolate()->read_only_heap()->OnHeapTearDown();
space_[RO_SPACE] = read_only_space_ = nullptr;
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i];
@@ -5158,8 +5389,8 @@ void Heap::CompactWeakArrayLists(AllocationType allocation) {
// Find known PrototypeUsers and compact them.
std::vector<Handle<PrototypeInfo>> prototype_infos;
{
- HeapIterator iterator(this);
- for (HeapObject o = iterator.next(); !o.is_null(); o = iterator.next()) {
+ HeapObjectIterator iterator(this);
+ for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
if (o.IsPrototypeInfo()) {
PrototypeInfo prototype_info = PrototypeInfo::cast(o);
if (prototype_info.prototype_users().IsWeakArrayList()) {
@@ -5309,7 +5540,7 @@ void Heap::ClearRecordedSlot(HeapObject object, ObjectSlot slot) {
DCHECK(!IsLargeObject(object));
Page* page = Page::FromAddress(slot.address());
if (!page->InYoungGeneration()) {
- DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+ DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->DeleteEntry(slot.address());
}
}
@@ -5319,7 +5550,7 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
DCHECK(!IsLargeObject(object));
if (InYoungGeneration(object)) return;
Page* page = Page::FromAddress(slot.address());
- DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+ DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->MoveAllEntriesToRememberedSet();
CHECK(!RememberedSet<OLD_TO_NEW>::Contains(page, slot.address()));
// Old to old slots are filtered with invalidated slots.
@@ -5332,17 +5563,16 @@ void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start);
DCHECK(!page->IsLargePage());
if (!page->InYoungGeneration()) {
- DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+ DCHECK_EQ(page->owner_identity(), OLD_SPACE);
store_buffer()->DeleteEntry(start, end);
}
}
-PagedSpace* PagedSpaces::next() {
+PagedSpace* PagedSpaceIterator::Next() {
switch (counter_++) {
case RO_SPACE:
- // skip NEW_SPACE
- counter_++;
- return heap_->read_only_space();
+ case NEW_SPACE:
+ UNREACHABLE();
case OLD_SPACE:
return heap_->old_space();
case CODE_SPACE:
@@ -5359,17 +5589,16 @@ SpaceIterator::SpaceIterator(Heap* heap)
SpaceIterator::~SpaceIterator() = default;
-bool SpaceIterator::has_next() {
+bool SpaceIterator::HasNext() {
// Iterate until no more spaces.
return current_space_ != LAST_SPACE;
}
-Space* SpaceIterator::next() {
- DCHECK(has_next());
+Space* SpaceIterator::Next() {
+ DCHECK(HasNext());
return heap_->space(++current_space_);
}
-
class HeapObjectsFilter {
public:
virtual ~HeapObjectsFilter() = default;
@@ -5486,8 +5715,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
reachable_;
};
-HeapIterator::HeapIterator(Heap* heap,
- HeapIterator::HeapObjectsFiltering filtering)
+HeapObjectIterator::HeapObjectIterator(
+ Heap* heap, HeapObjectIterator::HeapObjectsFiltering filtering)
: heap_(heap),
filtering_(filtering),
filter_(nullptr),
@@ -5503,11 +5732,10 @@ HeapIterator::HeapIterator(Heap* heap,
default:
break;
}
- object_iterator_ = space_iterator_->next()->GetObjectIterator();
+ object_iterator_ = space_iterator_->Next()->GetObjectIterator();
}
-
-HeapIterator::~HeapIterator() {
+HeapObjectIterator::~HeapObjectIterator() {
#ifdef DEBUG
// Assert that in filtering mode we have iterated through all
// objects. Otherwise, heap will be left in an inconsistent state.
@@ -5519,7 +5747,7 @@ HeapIterator::~HeapIterator() {
delete filter_;
}
-HeapObject HeapIterator::next() {
+HeapObject HeapObjectIterator::Next() {
if (filter_ == nullptr) return NextObject();
HeapObject obj = NextObject();
@@ -5527,7 +5755,7 @@ HeapObject HeapIterator::next() {
return obj;
}
-HeapObject HeapIterator::NextObject() {
+HeapObject HeapObjectIterator::NextObject() {
// No iterator means we are done.
if (object_iterator_.get() == nullptr) return HeapObject();
@@ -5537,8 +5765,8 @@ HeapObject HeapIterator::NextObject() {
return obj;
} else {
// Go though the spaces looking for one that has objects.
- while (space_iterator_->has_next()) {
- object_iterator_ = space_iterator_->next()->GetObjectIterator();
+ while (space_iterator_->HasNext()) {
+ object_iterator_ = space_iterator_->Next()->GetObjectIterator();
obj = object_iterator_.get()->Next();
if (!obj.is_null()) {
return obj;
@@ -5686,7 +5914,7 @@ void Heap::AddDirtyJSFinalizationGroup(
// for the root pointing to the first JSFinalizationGroup.
}
-void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
+void Heap::KeepDuringJob(Handle<JSReceiver> target) {
DCHECK(FLAG_harmony_weak_refs);
DCHECK(weak_refs_keep_during_job().IsUndefined() ||
weak_refs_keep_during_job().IsOrderedHashSet());
@@ -5701,7 +5929,7 @@ void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
set_weak_refs_keep_during_job(*table);
}
-void Heap::ClearKeepDuringJobSet() {
+void Heap::ClearKeptObjects() {
set_weak_refs_keep_during_job(ReadOnlyRoots(isolate()).undefined_value());
}
@@ -5844,7 +6072,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
if (map == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
InstanceType type = map.instance_type();
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
- AllocationSpace src = chunk->owner()->identity();
+ AllocationSpace src = chunk->owner_identity();
switch (src) {
case NEW_SPACE:
return dst == NEW_SPACE || dst == OLD_SPACE;
@@ -5864,7 +6092,7 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
size_t Heap::EmbedderAllocationCounter() const {
return local_embedder_heap_tracer()
- ? local_embedder_heap_tracer()->accumulated_allocated_size()
+ ? local_embedder_heap_tracer()->allocated_size()
: 0;
}
@@ -6133,16 +6361,16 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING),
slim_chunk->IsMarking());
- Space* chunk_owner = chunk->owner();
- AllocationSpace identity = chunk_owner->identity();
+ AllocationSpace identity = chunk->owner_identity();
// Generation consistency.
CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,
slim_chunk->InYoungGeneration());
+ // Read-only consistency.
+ CHECK_EQ(chunk->InReadOnlySpace(), slim_chunk->InReadOnlySpace());
// Marking consistency.
- if (identity != RO_SPACE ||
- static_cast<ReadOnlySpace*>(chunk->owner())->writable()) {
+ if (chunk->IsWritable()) {
// RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
// find a heap. The exception is when the ReadOnlySpace is writeable, during
// bootstrapping, so explicitly allow this case.
@@ -6155,25 +6383,6 @@ bool Heap::PageFlagsAreConsistent(HeapObject object) {
return true;
}
-static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
- heap_internals::MemoryChunk::kMarkingBit,
- "Incremental marking flag inconsistent");
-static_assert(MemoryChunk::Flag::FROM_PAGE ==
- heap_internals::MemoryChunk::kFromPageBit,
- "From page flag inconsistent");
-static_assert(MemoryChunk::Flag::TO_PAGE ==
- heap_internals::MemoryChunk::kToPageBit,
- "To page flag inconsistent");
-static_assert(MemoryChunk::kFlagsOffset ==
- heap_internals::MemoryChunk::kFlagsOffset,
- "Flag offset inconsistent");
-static_assert(MemoryChunk::kHeapOffset ==
- heap_internals::MemoryChunk::kHeapOffset,
- "Heap offset inconsistent");
-static_assert(MemoryChunk::kOwnerOffset ==
- heap_internals::MemoryChunk::kOwnerOffset,
- "Owner offset inconsistent");
-
void Heap::SetEmbedderStackStateForNextFinalizaton(
EmbedderHeapTracer::EmbedderStackState stack_state) {
local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
diff --git a/chromium/v8/src/heap/heap.h b/chromium/v8/src/heap/heap.h
index a242bd80d12..81f2b0dd8c3 100644
--- a/chromium/v8/src/heap/heap.h
+++ b/chromium/v8/src/heap/heap.h
@@ -44,29 +44,20 @@ class HeapTester;
class TestMemoryAllocatorScope;
} // namespace heap
-class ObjectBoilerplateDescription;
-class BytecodeArray;
-class CodeDataContainer;
-class DeoptimizationData;
-class HandlerTable;
class IncrementalMarking;
class JSArrayBuffer;
-class ExternalString;
using v8::MemoryPressureLevel;
class AllocationObserver;
class ArrayBufferCollector;
-class ArrayBufferTracker;
class CodeLargeObjectSpace;
class ConcurrentMarking;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
-class HeapController;
class HeapObjectAllocationTracker;
class HeapObjectsFilter;
class HeapStats;
-class HistogramTimer;
class Isolate;
class JSFinalizationGroup;
class LocalEmbedderHeapTracer;
@@ -86,7 +77,6 @@ class Space;
class StoreBuffer;
class StressScavengeObserver;
class TimedHistogram;
-class TracePossibleWrapperReporter;
class WeakObjectRetainer;
enum ArrayStorageAllocationMode {
@@ -243,19 +233,24 @@ class Heap {
// should instead adapt it's heap size based on available physical memory.
static const int kPointerMultiplier = 1;
#else
- // TODO(ishell): kSystePointerMultiplier?
- static const int kPointerMultiplier = i::kSystemPointerSize / 4;
+ static const int kPointerMultiplier = i::kTaggedSize / 4;
#endif
static const size_t kMaxInitialOldGenerationSize =
256 * MB * kPointerMultiplier;
- // Semi-space size needs to be a multiple of page size.
- static const size_t kMinSemiSpaceSizeInKB = 512 * kPointerMultiplier;
- static const size_t kMaxSemiSpaceSizeInKB = 8192 * kPointerMultiplier;
+ // These constants control heap configuration based on the physical memory.
+ static constexpr size_t kPhysicalMemoryToOldGenerationRatio = 4;
+ static constexpr size_t kOldGenerationToSemiSpaceRatio = 128;
+ static constexpr size_t kOldGenerationToSemiSpaceRatioLowMemory = 256;
+ static constexpr size_t kOldGenerationLowMemory =
+ 128 * MB * kPointerMultiplier;
+ static constexpr size_t kNewLargeObjectSpaceToSemiSpaceRatio = 1;
+ static constexpr size_t kMinSemiSpaceSize = 512 * KB * kPointerMultiplier;
+ static constexpr size_t kMaxSemiSpaceSize = 8192 * KB * kPointerMultiplier;
- STATIC_ASSERT(kMinSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0);
- STATIC_ASSERT(kMaxSemiSpaceSizeInKB* KB % (1 << kPageSizeBits) == 0);
+ STATIC_ASSERT(kMinSemiSpaceSize % (1 << kPageSizeBits) == 0);
+ STATIC_ASSERT(kMaxSemiSpaceSize % (1 << kPageSizeBits) == 0);
static const int kTraceRingBufferSize = 512;
static const int kStacktraceBufferSize = 512;
@@ -365,8 +360,8 @@ class Heap {
V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object,
Address slot,
HeapObject value);
- V8_EXPORT_PRIVATE void RecordEphemeronKeyWrite(EphemeronHashTable table,
- Address key_slot);
+ V8_EXPORT_PRIVATE inline void RecordEphemeronKeyWrite(
+ EphemeronHashTable table, Address key_slot);
V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode(
Address raw_object, Address address, Isolate* isolate);
V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
@@ -477,6 +472,12 @@ class Heap {
// Print short heap statistics.
void PrintShortHeapStatistics();
+ // Print statistics of freelists of old_space:
+ // with FLAG_trace_gc_freelists: summary of each FreeListCategory.
+ // with FLAG_trace_gc_freelists_verbose: also prints the statistics of each
+ // FreeListCategory of each page.
+ void PrintFreeListsStats();
+
// Dump heap statistics in JSON format.
void DumpJSONHeapStatistics(std::stringstream& stream);
@@ -571,7 +572,7 @@ class Heap {
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
- int64_t external_memory_hard_limit() { return MaxOldGenerationSize() / 2; }
+ int64_t external_memory_hard_limit() { return max_old_generation_size_ / 2; }
V8_INLINE int64_t external_memory();
V8_INLINE void update_external_memory(int64_t delta);
@@ -619,13 +620,7 @@ class Heap {
// Initialization. ===========================================================
// ===========================================================================
- // Configure heap sizes
- // max_semi_space_size_in_kb: maximum semi-space size in KB
- // max_old_generation_size_in_mb: maximum old generation size in MB
- // code_range_size_in_mb: code range size in MB
- void ConfigureHeap(size_t max_semi_space_size_in_kb,
- size_t max_old_generation_size_in_mb,
- size_t code_range_size_in_mb);
+ void ConfigureHeap(const v8::ResourceConstraints& constraints);
void ConfigureHeapDefault();
// Prepares the heap, setting up for deserialization.
@@ -681,8 +676,6 @@ class Heap {
// Getters to other components. ==============================================
// ===========================================================================
- ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
-
GCTracer* tracer() { return tracer_.get(); }
MemoryAllocator* memory_allocator() { return memory_allocator_.get(); }
@@ -748,8 +741,8 @@ class Heap {
std::function<void(HeapObject object, ObjectSlot slot, Object target)>
gc_notify_updated_slot);
- V8_EXPORT_PRIVATE void AddKeepDuringJobTarget(Handle<JSReceiver> target);
- void ClearKeepDuringJobSet();
+ V8_EXPORT_PRIVATE void KeepDuringJob(Handle<JSReceiver> target);
+ void ClearKeptObjects();
// ===========================================================================
// Inline allocation. ========================================================
@@ -986,8 +979,9 @@ class Heap {
// Returns whether the object resides in old space.
inline bool InOldSpace(Object object);
- // Checks whether an address/object in the heap (including auxiliary
- // area and unused area).
+ // Checks whether an address/object is in the non-read-only heap (including
+ // auxiliary area and unused area). Use IsValidHeapObject if checking both
+ // heaps is required.
V8_EXPORT_PRIVATE bool Contains(HeapObject value);
// Checks whether an address/object in a space.
@@ -998,7 +992,7 @@ class Heap {
// with off-heap Addresses.
bool InSpaceSlow(Address addr, AllocationSpace space);
- static inline Heap* FromWritableHeapObject(const HeapObject obj);
+ static inline Heap* FromWritableHeapObject(HeapObject obj);
// ===========================================================================
// Object statistics tracking. ===============================================
@@ -1042,23 +1036,21 @@ class Heap {
size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
size_t MaxOldGenerationSize() { return max_old_generation_size_; }
- V8_EXPORT_PRIVATE static size_t ComputeMaxOldGenerationSize(
+ V8_EXPORT_PRIVATE static size_t HeapSizeFromPhysicalMemory(
+ uint64_t physical_memory);
+ V8_EXPORT_PRIVATE static void GenerationSizesFromHeapSize(
+ size_t heap_size, size_t* young_generation_size,
+ size_t* old_generation_size);
+ V8_EXPORT_PRIVATE static size_t YoungGenerationSizeFromOldGenerationSize(
+ size_t old_generation_size);
+ V8_EXPORT_PRIVATE static size_t YoungGenerationSizeFromSemiSpaceSize(
+ size_t semi_space_size);
+ V8_EXPORT_PRIVATE static size_t SemiSpaceSizeFromYoungGenerationSize(
+ size_t young_generation_size);
+ V8_EXPORT_PRIVATE static size_t MinYoungGenerationSize();
+ V8_EXPORT_PRIVATE static size_t MinOldGenerationSize();
+ V8_EXPORT_PRIVATE static size_t MaxOldGenerationSize(
uint64_t physical_memory);
-
- static size_t ComputeMaxSemiSpaceSize(uint64_t physical_memory) {
- const uint64_t min_physical_memory = 512 * MB;
- const uint64_t max_physical_memory = 3 * static_cast<uint64_t>(GB);
-
- uint64_t capped_physical_memory =
- Max(Min(physical_memory, max_physical_memory), min_physical_memory);
- // linearly scale max semi-space size: (X-A)/(B-A)*(D-C)+C
- size_t semi_space_size_in_kb =
- static_cast<size_t>(((capped_physical_memory - min_physical_memory) *
- (kMaxSemiSpaceSizeInKB - kMinSemiSpaceSizeInKB)) /
- (max_physical_memory - min_physical_memory) +
- kMinSemiSpaceSizeInKB);
- return RoundUp(semi_space_size_in_kb, (1 << kPageSizeBits) / KB);
- }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@@ -1185,6 +1177,11 @@ class Heap {
V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects();
+ // We allow incremental marking to overshoot the V8 and global allocation
+ // limit for performace reasons. If the overshoot is too large then we are
+ // more eager to finalize incremental marking.
+ bool AllocationLimitOvershotByLargeMargin();
+
// ===========================================================================
// Prologue/epilogue callback methods.========================================
// ===========================================================================
@@ -1655,26 +1652,6 @@ class Heap {
OldGenerationObjectsAndPromotedExternalMemorySize());
}
- // We allow incremental marking to overshoot the allocation limit for
- // performace reasons. If the overshoot is too large then we are more
- // eager to finalize incremental marking.
- inline bool AllocationLimitOvershotByLargeMargin() {
- // This guards against too eager finalization in small heaps.
- // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
- size_t kMarginForSmallHeaps = 32u * MB;
- if (old_generation_allocation_limit_ >=
- OldGenerationObjectsAndPromotedExternalMemorySize())
- return false;
- uint64_t overshoot = OldGenerationObjectsAndPromotedExternalMemorySize() -
- old_generation_allocation_limit_;
- // Overshoot margin is 50% of allocation limit or half-way to the max heap
- // with special handling of small heaps.
- uint64_t margin =
- Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
- (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
- return overshoot >= margin;
- }
-
void UpdateTotalGCTime(double duration);
bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
@@ -1708,6 +1685,8 @@ class Heap {
return old_generation_allocation_limit_;
}
+ size_t global_allocation_limit() const { return global_allocation_limit_; }
+
bool always_allocate() { return always_allocate_scope_count_ != 0; }
V8_EXPORT_PRIVATE bool CanExpandOldGeneration(size_t size);
@@ -1816,18 +1795,25 @@ class Heap {
// more expedient to get at the isolate directly from within Heap methods.
Isolate* isolate_ = nullptr;
+ // These limits are initialized in Heap::ConfigureHeap based on the resource
+ // constraints and flags.
size_t code_range_size_ = 0;
- size_t max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
- size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;
- size_t max_old_generation_size_ = 700ul * (kSystemPointerSize / 4) * MB;
- // TODO(mlippautz): Clarify whether this should be take some embedder
+ size_t max_semi_space_size_ = 0;
+ size_t initial_semispace_size_ = 0;
+ // Full garbage collections can be skipped if the old generation size
+ // is below this threshold.
+ size_t min_old_generation_size_ = 0;
+ // If the old generation size exceeds this limit, then V8 will
+ // crash with out-of-memory error.
+ size_t max_old_generation_size_ = 0;
+ // TODO(mlippautz): Clarify whether this should take some embedder
// configurable limit into account.
- size_t max_global_memory_size_ =
- Min(static_cast<uint64_t>(std::numeric_limits<size_t>::max()),
- static_cast<uint64_t>(max_old_generation_size_) * 2);
- size_t initial_max_old_generation_size_;
- size_t initial_max_old_generation_size_threshold_;
- size_t initial_old_generation_size_;
+ size_t min_global_memory_size_ = 0;
+ size_t max_global_memory_size_ = 0;
+
+ size_t initial_max_old_generation_size_ = 0;
+ size_t initial_max_old_generation_size_threshold_ = 0;
+ size_t initial_old_generation_size_ = 0;
bool old_generation_size_configured_ = false;
size_t maximum_committed_ = 0;
size_t old_generation_capacity_after_bootstrap_ = 0;
@@ -1861,8 +1847,6 @@ class Heap {
// and after context disposal.
int number_of_disposed_maps_ = 0;
- ReadOnlyHeap* read_only_heap_ = nullptr;
-
NewSpace* new_space_ = nullptr;
OldSpace* old_space_ = nullptr;
CodeSpace* code_space_ = nullptr;
@@ -1932,8 +1916,8 @@ class Heap {
// is checked when we have already decided to do a GC to help determine
// which collector to invoke, before expanding a paged space in the old
// generation and on every allocation in large object space.
- size_t old_generation_allocation_limit_;
- size_t global_allocation_limit_;
+ size_t old_generation_allocation_limit_ = 0;
+ size_t global_allocation_limit_ = 0;
// Indicates that inline bump-pointer allocation has been globally disabled
// for all spaces. This is used to disable allocations in generated code.
@@ -2034,9 +2018,10 @@ class Heap {
// Currently set GC callback flags that are used to pass information between
// the embedder and V8's GC.
- GCCallbackFlags current_gc_callback_flags_;
+ GCCallbackFlags current_gc_callback_flags_ =
+ GCCallbackFlags::kNoGCCallbackFlags;
- bool is_current_gc_forced_;
+ bool is_current_gc_forced_ = false;
ExternalStringTable external_string_table_;
@@ -2082,7 +2067,7 @@ class Heap {
friend class ConcurrentMarking;
friend class GCCallbacksScope;
friend class GCTracer;
- friend class HeapIterator;
+ friend class HeapObjectIterator;
friend class IdleScavengeObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
@@ -2115,9 +2100,6 @@ class Heap {
// Used in cctest.
friend class heap::HeapTester;
- FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
- FRIEND_TEST(HeapTest, ExternalLimitDefault);
- FRIEND_TEST(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling);
DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -2245,56 +2227,56 @@ class VerifySmisVisitor : public RootVisitor {
};
// Space iterator for iterating over all the paged spaces of the heap: Map
-// space, old space, code space and optionally read only space. Returns each
-// space in turn, and null when it is done.
-class V8_EXPORT_PRIVATE PagedSpaces {
+// space, old space and code space. Returns each space in turn, and null when it
+// is done.
+class V8_EXPORT_PRIVATE PagedSpaceIterator {
public:
- explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
- PagedSpace* next();
+ explicit PagedSpaceIterator(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
+ PagedSpace* Next();
private:
Heap* heap_;
int counter_;
};
-
-class SpaceIterator : public Malloced {
+class V8_EXPORT_PRIVATE SpaceIterator : public Malloced {
public:
explicit SpaceIterator(Heap* heap);
virtual ~SpaceIterator();
- bool has_next();
- Space* next();
+ bool HasNext();
+ Space* Next();
private:
Heap* heap_;
int current_space_; // from enum AllocationSpace.
};
-// A HeapIterator provides iteration over the entire non-read-only heap. It
-// aggregates the specific iterators for the different spaces as these can only
-// iterate over one space only.
+// A HeapObjectIterator provides iteration over the entire non-read-only heap.
+// It aggregates the specific iterators for the different spaces as these can
+// only iterate over one space only.
//
-// HeapIterator ensures there is no allocation during its lifetime (using an
-// embedded DisallowHeapAllocation instance).
+// HeapObjectIterator ensures there is no allocation during its lifetime (using
+// an embedded DisallowHeapAllocation instance).
//
-// HeapIterator can skip free list nodes (that is, de-allocated heap objects
-// that still remain in the heap). As implementation of free nodes filtering
-// uses GC marks, it can't be used during MS/MC GC phases. Also, it is forbidden
-// to interrupt iteration in this mode, as this will leave heap objects marked
-// (and thus, unusable).
+// HeapObjectIterator can skip free list nodes (that is, de-allocated heap
+// objects that still remain in the heap). As implementation of free nodes
+// filtering uses GC marks, it can't be used during MS/MC GC phases. Also, it is
+// forbidden to interrupt iteration in this mode, as this will leave heap
+// objects marked (and thus, unusable).
//
-// See ReadOnlyHeapIterator if you need to iterate over read-only space objects,
-// or CombinedHeapIterator if you need to iterate over both heaps.
-class V8_EXPORT_PRIVATE HeapIterator {
+// See ReadOnlyHeapObjectIterator if you need to iterate over read-only space
+// objects, or CombinedHeapObjectIterator if you need to iterate over both
+// heaps.
+class V8_EXPORT_PRIVATE HeapObjectIterator {
public:
enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
- explicit HeapIterator(Heap* heap,
- HeapObjectsFiltering filtering = kNoFiltering);
- ~HeapIterator();
+ explicit HeapObjectIterator(Heap* heap,
+ HeapObjectsFiltering filtering = kNoFiltering);
+ ~HeapObjectIterator();
- HeapObject next();
+ HeapObject Next();
private:
HeapObject NextObject();
diff --git a/chromium/v8/src/heap/incremental-marking.cc b/chromium/v8/src/heap/incremental-marking.cc
index 4a901dc17a9..2980bdc8d43 100644
--- a/chromium/v8/src/heap/incremental-marking.cc
+++ b/chromium/v8/src/heap/incremental-marking.cc
@@ -37,14 +37,14 @@ using IncrementalMarkingMarkingVisitor =
void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
size_t size) {
- Heap* heap = incremental_marking_.heap();
+ Heap* heap = incremental_marking_->heap();
VMState<GC> state(heap->isolate());
RuntimeCallTimerScope runtime_timer(
heap->isolate(),
RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
- incremental_marking_.AdvanceOnAllocation();
+ incremental_marking_->AdvanceOnAllocation();
// AdvanceIncrementalMarkingOnAllocation can start incremental marking.
- incremental_marking_.EnsureBlackAllocated(addr, size);
+ incremental_marking_->EnsureBlackAllocated(addr, size);
}
IncrementalMarking::IncrementalMarking(
@@ -64,8 +64,8 @@ IncrementalMarking::IncrementalMarking(
black_allocation_(false),
finalize_marking_completed_(false),
request_type_(NONE),
- new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
- old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
+ new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
+ old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
DCHECK_NOT_NULL(marking_worklist_);
SetState(STOPPED);
}
@@ -246,6 +246,10 @@ bool IncrementalMarking::CanBeActivated() {
!heap_->isolate()->serializer_enabled();
}
+bool IncrementalMarking::IsBelowActivationThresholds() const {
+ return heap_->OldGenerationSizeOfObjects() <= kV8ActivationThreshold &&
+ heap_->GlobalSizeOfObjects() <= kGlobalActivationThreshold;
+}
void IncrementalMarking::Deactivate() {
DeactivateIncrementalWriteBarrier();
@@ -253,16 +257,23 @@ void IncrementalMarking::Deactivate() {
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
if (FLAG_trace_incremental_marking) {
- int old_generation_size_mb =
- static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
- int old_generation_limit_mb =
- static_cast<int>(heap()->old_generation_allocation_limit() / MB);
+ const size_t old_generation_size_mb =
+ heap()->OldGenerationSizeOfObjects() / MB;
+ const size_t old_generation_limit_mb =
+ heap()->old_generation_allocation_limit() / MB;
+ const size_t global_size_mb = heap()->GlobalSizeOfObjects() / MB;
+ const size_t global_limit_mb = heap()->global_allocation_limit() / MB;
heap()->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
- "slack %dMB\n",
+ "[IncrementalMarking] Start (%s): (size/limit/slack) v8: %zuMB / %zuMB "
+ "/ %zuMB global: %zuMB / %zuMB / %zuMB\n",
Heap::GarbageCollectionReasonToString(gc_reason),
old_generation_size_mb, old_generation_limit_mb,
- Max(0, old_generation_limit_mb - old_generation_size_mb));
+ old_generation_size_mb > old_generation_limit_mb
+ ? 0
+ : old_generation_limit_mb - old_generation_size_mb,
+ global_size_mb, global_limit_mb,
+ global_size_mb > global_limit_mb ? 0
+ : global_limit_mb - global_size_mb);
}
DCHECK(FLAG_incremental_marking);
DCHECK(state_ == STOPPED);
@@ -827,8 +838,8 @@ void IncrementalMarking::Stop() {
}
SpaceIterator it(heap_);
- while (it.has_next()) {
- Space* space = it.next();
+ while (it.HasNext()) {
+ Space* space = it.Next();
if (space == heap_->new_space()) {
space->RemoveAllocationObserver(&new_generation_observer_);
} else {
diff --git a/chromium/v8/src/heap/incremental-marking.h b/chromium/v8/src/heap/incremental-marking.h
index 72840341916..74bb7cfd5a0 100644
--- a/chromium/v8/src/heap/incremental-marking.h
+++ b/chromium/v8/src/heap/incremental-marking.h
@@ -79,9 +79,11 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static constexpr double kMaxStepSizeInMs = 5;
#ifndef DEBUG
- static const intptr_t kActivationThreshold = 8 * MB;
+ static constexpr size_t kV8ActivationThreshold = 8 * MB;
+ static constexpr size_t kGlobalActivationThreshold = 16 * MB;
#else
- static const intptr_t kActivationThreshold = 0;
+ static constexpr size_t kV8ActivationThreshold = 0;
+ static constexpr size_t kGlobalActivationThreshold = 0;
#endif
#ifdef V8_CONCURRENT_MARKING
@@ -248,17 +250,19 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// generation.
void EnsureBlackAllocated(Address allocated, size_t size);
+ bool IsBelowActivationThresholds() const;
+
private:
class Observer : public AllocationObserver {
public:
- Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
+ Observer(IncrementalMarking* incremental_marking, intptr_t step_size)
: AllocationObserver(step_size),
incremental_marking_(incremental_marking) {}
void Step(int bytes_allocated, Address, size_t) override;
private:
- IncrementalMarking& incremental_marking_;
+ IncrementalMarking* incremental_marking_;
};
void StartMarking();
diff --git a/chromium/v8/src/heap/item-parallel-job.cc b/chromium/v8/src/heap/item-parallel-job.cc
index 1945e3275af..001f40193ac 100644
--- a/chromium/v8/src/heap/item-parallel-job.cc
+++ b/chromium/v8/src/heap/item-parallel-job.cc
@@ -26,8 +26,12 @@ void ItemParallelJob::Task::SetupInternal(base::Semaphore* on_finish,
}
}
+void ItemParallelJob::Task::WillRunOnForeground() {
+ runner_ = Runner::kForeground;
+}
+
void ItemParallelJob::Task::RunInternal() {
- RunInParallel();
+ RunInParallel(runner_);
on_finish_->Signal();
}
@@ -95,6 +99,7 @@ void ItemParallelJob::Run() {
// Contribute on main thread.
DCHECK(main_task);
+ main_task->WillRunOnForeground();
main_task->Run();
// Wait for background tasks.
diff --git a/chromium/v8/src/heap/item-parallel-job.h b/chromium/v8/src/heap/item-parallel-job.h
index 54f09b87b55..0b739f8987a 100644
--- a/chromium/v8/src/heap/item-parallel-job.h
+++ b/chromium/v8/src/heap/item-parallel-job.h
@@ -65,10 +65,11 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
class V8_EXPORT_PRIVATE Task : public CancelableTask {
public:
+ enum class Runner { kForeground, kBackground };
explicit Task(Isolate* isolate);
~Task() override = default;
- virtual void RunInParallel() = 0;
+ virtual void RunInParallel(Runner runner) = 0;
protected:
// Retrieves a new item that needs to be processed. Returns |nullptr| if
@@ -99,13 +100,14 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
// processing, e.g. scavenging).
void SetupInternal(base::Semaphore* on_finish, std::vector<Item*>* items,
size_t start_index);
-
+ void WillRunOnForeground();
// We don't allow overriding this method any further.
void RunInternal() final;
std::vector<Item*>* items_ = nullptr;
size_t cur_index_ = 0;
size_t items_considered_ = 0;
+ Runner runner_ = Runner::kBackground;
base::Semaphore* on_finish_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(Task);
diff --git a/chromium/v8/src/heap/mark-compact.cc b/chromium/v8/src/heap/mark-compact.cc
index 03be1100b1a..3cd6620083b 100644
--- a/chromium/v8/src/heap/mark-compact.cc
+++ b/chromium/v8/src/heap/mark-compact.cc
@@ -156,7 +156,7 @@ void MarkingVerifier::VerifyMarking(PagedSpace* space) {
}
void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
- LargeObjectIterator it(lo_space);
+ LargeObjectSpaceObjectIterator it(lo_space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (IsBlackOrGrey(obj)) {
obj.Iterate(this);
@@ -456,6 +456,14 @@ void MarkCompactCollector::TearDown() {
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
DCHECK(!p->NeverEvacuate());
+
+ if (FLAG_trace_evacuation_candidates) {
+ PrintIsolate(
+ isolate(),
+ "Evacuation candidate: Free bytes: %6zu. Free Lists length: %4d.\n",
+ p->area_size() - p->allocated_bytes(), p->FreeListsLength());
+ }
+
p->MarkEvacuationCandidate();
evacuation_candidates_.push_back(p);
}
@@ -473,6 +481,9 @@ bool MarkCompactCollector::StartCompaction() {
if (!compacting_) {
DCHECK(evacuation_candidates_.empty());
+ if (FLAG_gc_experiment_less_compaction && !heap_->ShouldReduceMemory())
+ return false;
+
CollectEvacuationCandidates(heap()->old_space());
if (FLAG_compact_code_space) {
@@ -513,7 +524,7 @@ void MarkCompactCollector::CollectGarbage() {
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreDirty(ReadOnlySpace* space) {
- ReadOnlyHeapIterator iterator(space);
+ ReadOnlyHeapObjectIterator iterator(space);
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
CHECK(non_atomic_marking_state()->IsBlack(object));
@@ -536,7 +547,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
}
void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
- LargeObjectIterator it(space);
+ LargeObjectSpaceObjectIterator it(space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
CHECK(non_atomic_marking_state()->IsWhite(obj));
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
@@ -567,6 +578,8 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
heap()->code_space()->RefillFreeList();
heap()->map_space()->RefillFreeList();
+ heap()->tracer()->NotifySweepingCompleted();
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) {
FullEvacuationVerifier verifier(heap());
@@ -629,6 +642,27 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
int number_of_pages = space->CountTotalPages();
size_t area_size = space->AreaSize();
+ const bool in_standard_path =
+ !(FLAG_manual_evacuation_candidates_selection ||
+ FLAG_stress_compaction_random || FLAG_stress_compaction ||
+ FLAG_always_compact);
+ // Those variables will only be initialized if |in_standard_path|, and are not
+ // used otherwise.
+ size_t max_evacuated_bytes;
+ int target_fragmentation_percent;
+ size_t free_bytes_threshold;
+ if (in_standard_path) {
+ // We use two conditions to decide whether a page qualifies as an evacuation
+ // candidate, or not:
+ // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
+ // between live bytes and capacity of this page (= area).
+ // * Evacuation quota: A global quota determining how much bytes should be
+ // compacted.
+ ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
+ &max_evacuated_bytes);
+ free_bytes_threshold = target_fragmentation_percent * (area_size / 100);
+ }
+
// Pairs of (live_bytes_in_page, page).
using LiveBytesPagePair = std::pair<size_t, Page*>;
std::vector<LiveBytesPagePair> pages;
@@ -652,7 +686,15 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
CHECK(p->SweepingDone());
DCHECK(p->area_size() == area_size);
- pages.push_back(std::make_pair(p->allocated_bytes(), p));
+ if (in_standard_path) {
+ // Only the pages with at more than |free_bytes_threshold| free bytes are
+ // considered for evacuation.
+ if (area_size - p->allocated_bytes() >= free_bytes_threshold) {
+ pages.push_back(std::make_pair(p->allocated_bytes(), p));
+ }
+ } else {
+ pages.push_back(std::make_pair(p->allocated_bytes(), p));
+ }
}
int candidate_count = 0;
@@ -691,25 +733,6 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
} else {
// The following approach determines the pages that should be evacuated.
//
- // We use two conditions to decide whether a page qualifies as an evacuation
- // candidate, or not:
- // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
- // between live bytes and capacity of this page (= area).
- // * Evacuation quota: A global quota determining how much bytes should be
- // compacted.
- //
- // The algorithm sorts all pages by live bytes and then iterates through
- // them starting with the page with the most free memory, adding them to the
- // set of evacuation candidates as long as both conditions (fragmentation
- // and quota) hold.
- size_t max_evacuated_bytes;
- int target_fragmentation_percent;
- ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
- &max_evacuated_bytes);
-
- const size_t free_bytes_threshold =
- target_fragmentation_percent * (area_size / 100);
-
// Sort pages from the most free to the least free, then select
// the first n pages for evacuation such that:
// - the total size of evacuated objects does not exceed the specified
@@ -722,10 +745,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
for (size_t i = 0; i < pages.size(); i++) {
size_t live_bytes = pages[i].first;
DCHECK_GE(area_size, live_bytes);
- size_t free_bytes = area_size - live_bytes;
if (FLAG_always_compact ||
- ((free_bytes >= free_bytes_threshold) &&
- ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
+ ((total_live_bytes + live_bytes) <= max_evacuated_bytes)) {
candidate_count++;
total_live_bytes += live_bytes;
}
@@ -735,9 +756,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
"fragmentation_limit_kb=%zu "
"fragmentation_limit_percent=%d sum_compaction_kb=%zu "
"compaction_limit_kb=%zu\n",
- space->name(), free_bytes / KB, free_bytes_threshold / KB,
- target_fragmentation_percent, total_live_bytes / KB,
- max_evacuated_bytes / KB);
+ space->name(), (area_size - live_bytes) / KB,
+ free_bytes_threshold / KB, target_fragmentation_percent,
+ total_live_bytes / KB, max_evacuated_bytes / KB);
}
}
// How many pages we will allocated for the evacuated objects
@@ -807,9 +828,9 @@ void MarkCompactCollector::Prepare() {
StartCompaction();
}
- PagedSpaces spaces(heap());
- for (PagedSpace* space = spaces.next(); space != nullptr;
- space = spaces.next()) {
+ PagedSpaceIterator spaces(heap());
+ for (PagedSpace* space = spaces.Next(); space != nullptr;
+ space = spaces.Next()) {
space->PrepareForMarkCompact();
}
heap()->account_external_memory_concurrently_freed();
@@ -1364,8 +1385,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
if (map.visitor_id() == kVisitThinString) {
HeapObject actual = ThinString::cast(object).unchecked_actual();
if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
- object.map_slot().Relaxed_Store(
- MapWord::FromForwardingAddress(actual).ToMap());
+ object.set_map_word(MapWord::FromForwardingAddress(actual));
return true;
}
// TODO(mlippautz): Handle ConsString.
@@ -1463,7 +1483,7 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
inline bool Visit(HeapObject object, int size) override {
HeapObject target_object;
- if (TryEvacuateObject(Page::FromHeapObject(object)->owner()->identity(),
+ if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
object, size, &target_object)) {
DCHECK(object.map_word().IsForwardingAddress());
return true;
@@ -2084,7 +2104,6 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
UncompiledData::Initialize(
uncompiled_data, inferred_name, start_position, end_position,
- kFunctionLiteralIdInvalid,
[](HeapObject object, ObjectSlot slot, HeapObject target) {
RecordSlot(object, slot, target);
});
@@ -2731,6 +2750,7 @@ class Evacuator : public Malloced {
inline void Finalize();
virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
+ virtual GCTracer::Scope::ScopeId GetTracingScope() = 0;
protected:
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
@@ -2819,6 +2839,10 @@ class FullEvacuator : public Evacuator {
return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY;
}
+ GCTracer::Scope::ScopeId GetTracingScope() override {
+ return GCTracer::Scope::MC_EVACUATE_COPY_PARALLEL;
+ }
+
inline void Finalize() {
Evacuator::Finalize();
@@ -2909,16 +2933,24 @@ class PageEvacuationTask : public ItemParallelJob::Task {
evacuator_(evacuator),
tracer_(isolate->heap()->tracer()) {}
- void RunInParallel() override {
- TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
+ void RunInParallel(Runner runner) override {
+ if (runner == Runner::kForeground) {
+ TRACE_GC(tracer_, evacuator_->GetTracingScope());
+ ProcessItems();
+ } else {
+ TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
+ ProcessItems();
+ }
+ }
+
+ private:
+ void ProcessItems() {
EvacuationItem* item = nullptr;
while ((item = GetItem<EvacuationItem>()) != nullptr) {
evacuator_->EvacuatePage(item->chunk());
item->MarkFinished();
}
}
-
- private:
Evacuator* evacuator_;
GCTracer* tracer_;
};
@@ -3183,7 +3215,7 @@ void MarkCompactCollector::Evacuate() {
sweeper()->AddPageForIterability(p);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
- DCHECK_EQ(OLD_SPACE, p->owner()->identity());
+ DCHECK_EQ(OLD_SPACE, p->owner_identity());
sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
}
}
@@ -3191,7 +3223,7 @@ void MarkCompactCollector::Evacuate() {
for (Page* p : old_space_evacuation_pages_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
- sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
+ sweeper()->AddPage(p->owner_identity(), p, Sweeper::REGULAR);
p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
}
}
@@ -3218,24 +3250,35 @@ class UpdatingItem : public ItemParallelJob::Item {
class PointersUpdatingTask : public ItemParallelJob::Task {
public:
- explicit PointersUpdatingTask(Isolate* isolate,
- GCTracer::BackgroundScope::ScopeId scope)
+ explicit PointersUpdatingTask(
+ Isolate* isolate, GCTracer::Scope::ScopeId scope,
+ GCTracer::BackgroundScope::ScopeId background_scope)
: ItemParallelJob::Task(isolate),
tracer_(isolate->heap()->tracer()),
- scope_(scope) {}
+ scope_(scope),
+ background_scope_(background_scope) {}
- void RunInParallel() override {
- TRACE_BACKGROUND_GC(tracer_, scope_);
+ void RunInParallel(Runner runner) override {
+ if (runner == Runner::kForeground) {
+ TRACE_GC(tracer_, scope_);
+ UpdatePointers();
+ } else {
+ TRACE_BACKGROUND_GC(tracer_, background_scope_);
+ UpdatePointers();
+ }
+ }
+
+ private:
+ void UpdatePointers() {
UpdatingItem* item = nullptr;
while ((item = GetItem<UpdatingItem>()) != nullptr) {
item->Process();
item->MarkFinished();
}
}
-
- private:
GCTracer* tracer_;
- GCTracer::BackgroundScope::ScopeId scope_;
+ GCTracer::Scope::ScopeId scope_;
+ GCTracer::BackgroundScope::ScopeId background_scope_;
};
template <typename MarkingState>
@@ -3651,7 +3694,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
remembered_set_tasks + num_ephemeron_table_updating_tasks);
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpdatingTask(
- isolate(),
+ isolate(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.AddItem(new EphemeronTableUpdatingItem(heap()));
@@ -3684,7 +3727,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
if (num_tasks > 0) {
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpdatingTask(
- isolate(),
+ isolate(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.Run();
@@ -4194,8 +4237,9 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
for (int i = 0; i < num_tasks; i++) {
updating_job.AddTask(new PointersUpdatingTask(
- isolate(), GCTracer::BackgroundScope::
- MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
+ isolate(), GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
+ GCTracer::BackgroundScope::
+ MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
{
@@ -4498,9 +4542,30 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
Page::kPageSize);
}
- void RunInParallel() override {
- TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
- GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
+ void RunInParallel(Runner runner) override {
+ if (runner == Runner::kForeground) {
+ TRACE_GC(collector_->heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
+ ProcessItems();
+ } else {
+ TRACE_BACKGROUND_GC(
+ collector_->heap()->tracer(),
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
+ ProcessItems();
+ }
+ }
+
+ void MarkObject(Object object) {
+ if (!Heap::InYoungGeneration(object)) return;
+ HeapObject heap_object = HeapObject::cast(object);
+ if (marking_state_->WhiteToGrey(heap_object)) {
+ const int size = visitor_.Visit(heap_object);
+ IncrementLiveBytes(heap_object, size);
+ }
+ }
+
+ private:
+ void ProcessItems() {
double marking_time = 0.0;
{
TimedScope scope(&marking_time);
@@ -4519,17 +4584,6 @@ class YoungGenerationMarkingTask : public ItemParallelJob::Task {
static_cast<void*>(this), marking_time);
}
}
-
- void MarkObject(Object object) {
- if (!Heap::InYoungGeneration(object)) return;
- HeapObject heap_object = HeapObject::cast(object);
- if (marking_state_->WhiteToGrey(heap_object)) {
- const int size = visitor_.Visit(heap_object);
- IncrementLiveBytes(heap_object, size);
- }
- }
-
- private:
void EmptyLocalMarkingWorklist() {
HeapObject object;
while (marking_worklist_.Pop(&object)) {
@@ -4761,6 +4815,10 @@ class YoungGenerationEvacuator : public Evacuator {
return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
}
+ GCTracer::Scope::ScopeId GetTracingScope() override {
+ return GCTracer::Scope::MINOR_MC_EVACUATE_COPY_PARALLEL;
+ }
+
protected:
void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
diff --git a/chromium/v8/src/heap/object-stats.cc b/chromium/v8/src/heap/object-stats.cc
index 033f4fc6e92..2a63896242a 100644
--- a/chromium/v8/src/heap/object-stats.cc
+++ b/chromium/v8/src/heap/object-stats.cc
@@ -1079,7 +1079,7 @@ class ObjectStatsVisitor {
namespace {
void IterateHeap(Heap* heap, ObjectStatsVisitor* visitor) {
- CombinedHeapIterator iterator(heap);
+ CombinedHeapObjectIterator iterator(heap);
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
visitor->Visit(obj, obj.Size());
diff --git a/chromium/v8/src/heap/objects-visiting-inl.h b/chromium/v8/src/heap/objects-visiting-inl.h
index d96cded09a3..ba0bfa2415b 100644
--- a/chromium/v8/src/heap/objects-visiting-inl.h
+++ b/chromium/v8/src/heap/objects-visiting-inl.h
@@ -12,6 +12,7 @@
#include "src/heap/mark-compact.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/module-inl.h"
#include "src/objects/objects-body-descriptors-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
@@ -71,9 +72,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
template <typename ResultType, typename ConcreteVisitor>
void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
- HeapObject host, MapWordSlot map_slot) {
+ HeapObject host) {
DCHECK(!host.map_word().IsForwardingAddress());
- static_cast<ConcreteVisitor*>(this)->VisitPointer(host, ObjectSlot(map_slot));
+ static_cast<ConcreteVisitor*>(this)->VisitPointer(host, host.map_slot());
}
#define VISIT(TypeName, Type) \
@@ -88,8 +89,9 @@ void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
"concurrent marker"); \
} \
int size = TypeName::BodyDescriptor::SizeOf(map, object); \
- if (visitor->ShouldVisitMapPointer()) \
- visitor->VisitMapPointer(object, object.map_slot()); \
+ if (visitor->ShouldVisitMapPointer()) { \
+ visitor->VisitMapPointer(object); \
+ } \
TypeName::BodyDescriptor::IterateBody(map, object, size, visitor); \
return static_cast<ResultType>(size); \
}
@@ -109,7 +111,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map.instance_size();
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object.map_slot());
+ visitor->VisitMapPointer(object);
}
return static_cast<ResultType>(size);
}
@@ -120,8 +122,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer())
- visitor->VisitMapPointer(object, object.map_slot());
+ if (visitor->ShouldVisitMapPointer()) {
+ visitor->VisitMapPointer(object);
+ }
JSObject::FastBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -132,8 +135,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::BodyDescriptor::SizeOf(map, object);
- if (visitor->ShouldVisitMapPointer())
- visitor->VisitMapPointer(object, object.map_slot());
+ if (visitor->ShouldVisitMapPointer()) {
+ visitor->VisitMapPointer(object);
+ }
JSObject::BodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -145,7 +149,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = map.instance_size();
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object.map_slot());
+ visitor->VisitMapPointer(object);
}
StructBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
@@ -157,7 +161,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object.map_slot());
+ visitor->VisitMapPointer(object);
}
return static_cast<ResultType>(object.size());
}
@@ -169,7 +173,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitWeakArray(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = WeakArrayBodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object.map_slot());
+ visitor->VisitMapPointer(object);
}
WeakArrayBodyDescriptor::IterateBody(map, object, size, visitor);
return size;
diff --git a/chromium/v8/src/heap/objects-visiting.h b/chromium/v8/src/heap/objects-visiting.h
index 9ebd94427ee..a5c291458f5 100644
--- a/chromium/v8/src/heap/objects-visiting.h
+++ b/chromium/v8/src/heap/objects-visiting.h
@@ -54,12 +54,15 @@ namespace internal {
V(SmallOrderedHashMap, SmallOrderedHashMap) \
V(SmallOrderedHashSet, SmallOrderedHashSet) \
V(SmallOrderedNameDictionary, SmallOrderedNameDictionary) \
+ V(SourceTextModule, SourceTextModule) \
V(Symbol, Symbol) \
+ V(SyntheticModule, SyntheticModule) \
V(ThinString, ThinString) \
V(TransitionArray, TransitionArray) \
V(UncompiledDataWithoutPreparseData, UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData, UncompiledDataWithPreparseData) \
V(WasmCapiFunctionData, WasmCapiFunctionData) \
+ V(WasmIndirectFunctionTable, WasmIndirectFunctionTable) \
V(WasmInstanceObject, WasmInstanceObject)
#define FORWARD_DECLARE(TypeName, Type) class Type;
@@ -91,7 +94,7 @@ class HeapVisitor : public ObjectVisitor {
// Guard predicate for visiting the objects map pointer separately.
V8_INLINE bool ShouldVisitMapPointer() { return true; }
// A callback for visiting the map pointer in the object header.
- V8_INLINE void VisitMapPointer(HeapObject host, MapWordSlot map_slot);
+ V8_INLINE void VisitMapPointer(HeapObject host);
// If this predicate returns false, then the heap visitor will fail
// in default Visit implemention for subclasses of JSObject.
V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }
diff --git a/chromium/v8/src/heap/read-only-heap-inl.h b/chromium/v8/src/heap/read-only-heap-inl.h
new file mode 100644
index 00000000000..c725b4bca86
--- /dev/null
+++ b/chromium/v8/src/heap/read-only-heap-inl.h
@@ -0,0 +1,31 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_READ_ONLY_HEAP_INL_H_
+#define V8_HEAP_READ_ONLY_HEAP_INL_H_
+
+#include "src/heap/read-only-heap.h"
+
+#include "src/execution/isolate-utils-inl.h"
+#include "src/roots/roots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
+#ifdef V8_SHARED_RO_HEAP
+ // This fails if we are creating heap objects and the roots haven't yet been
+ // copied into the read-only heap or it has been cleared for testing.
+ if (shared_ro_heap_ != nullptr && shared_ro_heap_->init_complete_) {
+ return ReadOnlyRoots(shared_ro_heap_->read_only_roots_);
+ }
+#endif
+ return ReadOnlyRoots(GetHeapFromWritableObject(object));
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_READ_ONLY_HEAP_INL_H_
diff --git a/chromium/v8/src/heap/read-only-heap.cc b/chromium/v8/src/heap/read-only-heap.cc
index 1021bc147f2..c325aea7e6b 100644
--- a/chromium/v8/src/heap/read-only-heap.cc
+++ b/chromium/v8/src/heap/read-only-heap.cc
@@ -6,6 +6,7 @@
#include <cstring>
+#include "src/base/lsan.h"
#include "src/base/once.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -20,29 +21,53 @@ namespace internal {
#ifdef V8_SHARED_RO_HEAP
V8_DECLARE_ONCE(setup_ro_heap_once);
-ReadOnlyHeap* shared_ro_heap = nullptr;
+ReadOnlyHeap* ReadOnlyHeap::shared_ro_heap_ = nullptr;
#endif
// static
void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
DCHECK_NOT_NULL(isolate);
#ifdef V8_SHARED_RO_HEAP
- // Make sure we are only sharing read-only space when deserializing. Otherwise
- // we would be trying to create heap objects inside an already initialized
- // read-only space. Use ClearSharedHeapForTest if you need a new read-only
- // space.
- DCHECK_IMPLIES(shared_ro_heap != nullptr, des != nullptr);
-
- base::CallOnce(&setup_ro_heap_once, [isolate, des]() {
- shared_ro_heap = CreateAndAttachToIsolate(isolate);
- if (des != nullptr) shared_ro_heap->DeseralizeIntoIsolate(isolate, des);
- });
-
- isolate->heap()->SetUpFromReadOnlyHeap(shared_ro_heap);
+ bool call_once_ran = false;
+ base::Optional<Checksum> des_checksum;
+#ifdef DEBUG
+ if (des != nullptr) des_checksum = des->GetChecksum();
+#endif // DEBUG
+
+ base::CallOnce(&setup_ro_heap_once,
+ [isolate, des, des_checksum, &call_once_ran]() {
+ USE(des_checksum);
+ shared_ro_heap_ = CreateAndAttachToIsolate(isolate);
+ if (des != nullptr) {
+#ifdef DEBUG
+ shared_ro_heap_->read_only_blob_checksum_ = des_checksum;
+#endif // DEBUG
+ shared_ro_heap_->DeseralizeIntoIsolate(isolate, des);
+ }
+ call_once_ran = true;
+ });
+
+ USE(call_once_ran);
+ USE(des_checksum);
+#ifdef DEBUG
+ const base::Optional<Checksum> last_checksum =
+ shared_ro_heap_->read_only_blob_checksum_;
+ if (last_checksum || des_checksum) {
+ // The read-only heap was set up from a snapshot. Make sure it's the always
+ // the same snapshot.
+ CHECK_EQ(last_checksum, des_checksum);
+ } else {
+ // The read-only heap objects were created. Make sure this happens only
+ // once, during this call.
+ CHECK(call_once_ran);
+ }
+#endif // DEBUG
+
+ isolate->SetUpFromReadOnlyHeap(shared_ro_heap_);
if (des != nullptr) {
void* const isolate_ro_roots = reinterpret_cast<void*>(
isolate->roots_table().read_only_roots_begin().address());
- std::memcpy(isolate_ro_roots, shared_ro_heap->read_only_roots_,
+ std::memcpy(isolate_ro_roots, shared_ro_heap_->read_only_roots_,
kEntriesCount * sizeof(Address));
}
#else
@@ -66,7 +91,7 @@ void ReadOnlyHeap::OnCreateHeapObjectsComplete(Isolate* isolate) {
// static
ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(Isolate* isolate) {
auto* ro_heap = new ReadOnlyHeap(new ReadOnlySpace(isolate->heap()));
- isolate->heap()->SetUpFromReadOnlyHeap(ro_heap);
+ isolate->SetUpFromReadOnlyHeap(ro_heap);
return ro_heap;
}
@@ -77,6 +102,9 @@ void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
isolate->roots_table().read_only_roots_begin().address());
std::memcpy(read_only_roots_, isolate_ro_roots,
kEntriesCount * sizeof(Address));
+ // N.B. Since pages are manually allocated with mmap, Lsan doesn't track
+ // their pointers. Seal explicitly ignores the necessary objects.
+ LSAN_IGNORE_OBJECT(this);
read_only_space_->Seal(ReadOnlySpace::SealMode::kDetachFromHeapAndForget);
#else
read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
@@ -94,30 +122,17 @@ void ReadOnlyHeap::OnHeapTearDown() {
// static
void ReadOnlyHeap::ClearSharedHeapForTest() {
#ifdef V8_SHARED_RO_HEAP
- DCHECK_NOT_NULL(shared_ro_heap);
+ DCHECK_NOT_NULL(shared_ro_heap_);
// TODO(v8:7464): Just leak read-only space for now. The paged-space heap
// is null so there isn't a nice way to do this.
- delete shared_ro_heap;
- shared_ro_heap = nullptr;
+ shared_ro_heap_ = nullptr;
setup_ro_heap_once = 0;
#endif
}
// static
bool ReadOnlyHeap::Contains(HeapObject object) {
- return Page::FromAddress(object.ptr())->owner()->identity() == RO_SPACE;
-}
-
-// static
-ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
-#ifdef V8_SHARED_RO_HEAP
- // This fails if we are creating heap objects and the roots haven't yet been
- // copied into the read-only heap or it has been cleared for testing.
- if (shared_ro_heap != nullptr && shared_ro_heap->init_complete_) {
- return ReadOnlyRoots(shared_ro_heap->read_only_roots_);
- }
-#endif
- return ReadOnlyRoots(GetHeapFromWritableObject(object));
+ return MemoryChunk::FromHeapObject(object)->InReadOnlySpace();
}
Object* ReadOnlyHeap::ExtendReadOnlyObjectCache() {
@@ -134,15 +149,15 @@ bool ReadOnlyHeap::read_only_object_cache_is_initialized() const {
return read_only_object_cache_.size() > 0;
}
-ReadOnlyHeapIterator::ReadOnlyHeapIterator(ReadOnlyHeap* ro_heap)
- : ReadOnlyHeapIterator(ro_heap->read_only_space()) {}
+ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap)
+ : ReadOnlyHeapObjectIterator(ro_heap->read_only_space()) {}
-ReadOnlyHeapIterator::ReadOnlyHeapIterator(ReadOnlySpace* ro_space)
+ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space)
: ro_space_(ro_space),
current_page_(ro_space->first_page()),
current_addr_(current_page_->area_start()) {}
-HeapObject ReadOnlyHeapIterator::Next() {
+HeapObject ReadOnlyHeapObjectIterator::Next() {
if (current_page_ == nullptr) {
return HeapObject();
}
diff --git a/chromium/v8/src/heap/read-only-heap.h b/chromium/v8/src/heap/read-only-heap.h
index 697c9e26efd..4c1da62a157 100644
--- a/chromium/v8/src/heap/read-only-heap.h
+++ b/chromium/v8/src/heap/read-only-heap.h
@@ -5,7 +5,10 @@
#ifndef V8_HEAP_READ_ONLY_HEAP_H_
#define V8_HEAP_READ_ONLY_HEAP_H_
+#include <utility>
+
#include "src/base/macros.h"
+#include "src/base/optional.h"
#include "src/objects/heap-object.h"
#include "src/objects/objects.h"
#include "src/roots/roots.h"
@@ -44,7 +47,8 @@ class ReadOnlyHeap final {
// Gets read-only roots from an appropriate root list: shared read-only root
// list if the shared read-only heap has been initialized or the isolate
// specific roots table.
- V8_EXPORT_PRIVATE static ReadOnlyRoots GetReadOnlyRoots(HeapObject object);
+ V8_EXPORT_PRIVATE inline static ReadOnlyRoots GetReadOnlyRoots(
+ HeapObject object);
// Clears any shared read-only heap artifacts for testing, forcing read-only
// heap to be re-created on next set up.
@@ -60,6 +64,8 @@ class ReadOnlyHeap final {
ReadOnlySpace* read_only_space() const { return read_only_space_; }
private:
+ using Checksum = std::pair<uint32_t, uint32_t>;
+
// Creates a new read-only heap and attaches it to the provided isolate.
static ReadOnlyHeap* CreateAndAttachToIsolate(Isolate* isolate);
// Runs the read-only deserailizer and calls InitFromIsolate to complete
@@ -76,18 +82,25 @@ class ReadOnlyHeap final {
std::vector<Object> read_only_object_cache_;
#ifdef V8_SHARED_RO_HEAP
+#ifdef DEBUG
+ // The checksum of the blob the read-only heap was deserialized from, if any.
+ base::Optional<Checksum> read_only_blob_checksum_;
+#endif // DEBUG
+
Address read_only_roots_[kEntriesCount];
-#endif
+
+ V8_EXPORT_PRIVATE static ReadOnlyHeap* shared_ro_heap_;
+#endif // V8_SHARED_RO_HEAP
explicit ReadOnlyHeap(ReadOnlySpace* ro_space) : read_only_space_(ro_space) {}
DISALLOW_COPY_AND_ASSIGN(ReadOnlyHeap);
};
// This class enables iterating over all read-only heap objects.
-class V8_EXPORT_PRIVATE ReadOnlyHeapIterator {
+class V8_EXPORT_PRIVATE ReadOnlyHeapObjectIterator {
public:
- explicit ReadOnlyHeapIterator(ReadOnlyHeap* ro_heap);
- explicit ReadOnlyHeapIterator(ReadOnlySpace* ro_space);
+ explicit ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap);
+ explicit ReadOnlyHeapObjectIterator(ReadOnlySpace* ro_space);
HeapObject Next();
diff --git a/chromium/v8/src/heap/remembered-set.h b/chromium/v8/src/heap/remembered-set.h
index cd2344b3499..ea7fe0149ba 100644
--- a/chromium/v8/src/heap/remembered-set.h
+++ b/chromium/v8/src/heap/remembered-set.h
@@ -5,8 +5,8 @@
#ifndef V8_HEAP_REMEMBERED_SET_H_
#define V8_HEAP_REMEMBERED_SET_H_
+#include "src/base/memory.h"
#include "src/codegen/reloc-info.h"
-#include "src/common/v8memory.h"
#include "src/heap/heap.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
@@ -309,7 +309,7 @@ class UpdateTypedSlotHelper {
SlotCallbackResult result = callback(FullMaybeObjectSlot(&code));
DCHECK(!HasWeakHeapObjectTag(code));
if (code != old_code) {
- Memory<Address>(entry_address) = code.entry();
+ base::Memory<Address>(entry_address) = code.entry();
}
return result;
}
diff --git a/chromium/v8/src/heap/scavenger-inl.h b/chromium/v8/src/heap/scavenger-inl.h
index 50dc5f25c9c..9c605f70893 100644
--- a/chromium/v8/src/heap/scavenger-inl.h
+++ b/chromium/v8/src/heap/scavenger-inl.h
@@ -97,8 +97,7 @@ void Scavenger::PageMemoryFence(MaybeObject object) {
// with page initialization.
HeapObject heap_object;
if (object->GetHeapObject(&heap_object)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object.address());
- CHECK_NOT_NULL(chunk->synchronized_heap());
+ MemoryChunk::FromHeapObject(heap_object)->SynchronizedHeapLoad();
}
#endif
}
@@ -110,9 +109,8 @@ bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
heap()->CopyBlock(target.address() + kTaggedSize,
source.address() + kTaggedSize, size - kTaggedSize);
- Object old = source.map_slot().Release_CompareAndSwap(
- map, MapWord::FromForwardingAddress(target).ToMap());
- if (old != map) {
+ if (!source.synchronized_compare_and_swap_map_word(
+ MapWord::FromMap(map), MapWord::FromForwardingAddress(target))) {
// Other task migrated the object.
return false;
}
@@ -215,9 +213,9 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
FLAG_young_generation_large_objects &&
MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
- MemoryChunk::FromHeapObject(object)->owner()->identity());
- if (object.map_slot().Release_CompareAndSwap(
- map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
+ MemoryChunk::FromHeapObject(object)->owner_identity());
+ if (object.synchronized_compare_and_swap_map_word(
+ MapWord::FromMap(map), MapWord::FromForwardingAddress(object))) {
surviving_new_large_objects_.insert({object, map});
promoted_size_ += object_size;
if (object_fields == ObjectFields::kMaybePointers) {
@@ -314,8 +312,7 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
HeapObjectReference::Update(slot, first);
if (!Heap::InYoungGeneration(first)) {
- object.map_slot().Release_Store(
- MapWord::FromForwardingAddress(first).ToMap());
+ object.synchronized_set_map_word(MapWord::FromForwardingAddress(first));
return REMOVE_SLOT;
}
@@ -324,16 +321,15 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
HeapObject target = first_word.ToForwardingAddress();
HeapObjectReference::Update(slot, target);
- object.map_slot().Release_Store(
- MapWord::FromForwardingAddress(target).ToMap());
+ object.synchronized_set_map_word(MapWord::FromForwardingAddress(target));
return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
}
Map map = first_word.ToMap();
SlotCallbackResult result =
EvacuateObjectDefault(map, slot, first, first.SizeFromMap(map),
Map::ObjectFieldsFrom(map.visitor_id()));
- object.map_slot().Release_Store(
- MapWord::FromForwardingAddress(slot.ToHeapObject()).ToMap());
+ object.synchronized_set_map_word(
+ MapWord::FromForwardingAddress(slot.ToHeapObject()));
return result;
}
DCHECK_EQ(ObjectFields::kMaybePointers,
diff --git a/chromium/v8/src/heap/scavenger.cc b/chromium/v8/src/heap/scavenger.cc
index c7666b7da71..70b514142fe 100644
--- a/chromium/v8/src/heap/scavenger.cc
+++ b/chromium/v8/src/heap/scavenger.cc
@@ -41,10 +41,20 @@ class ScavengingTask final : public ItemParallelJob::Task {
scavenger_(scavenger),
barrier_(barrier) {}
- void RunInParallel() final {
- TRACE_BACKGROUND_GC(
- heap_->tracer(),
- GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
+ void RunInParallel(Runner runner) final {
+ if (runner == Runner::kForeground) {
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
+ ProcessItems();
+ } else {
+ TRACE_BACKGROUND_GC(
+ heap_->tracer(),
+ GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
+ ProcessItems();
+ }
+ }
+
+ private:
+ void ProcessItems() {
double scavenging_time = 0.0;
{
barrier_->Start();
@@ -66,8 +76,6 @@ class ScavengingTask final : public ItemParallelJob::Task {
scavenger_->bytes_copied(), scavenger_->bytes_promoted());
}
}
-
- private:
Heap* const heap_;
Scavenger* const scavenger_;
OneshotBarrier* const barrier_;
@@ -413,7 +421,7 @@ void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) {
}
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
- AllocationSpace space = page->owner()->identity();
+ AllocationSpace space = page->owner_identity();
if ((space == OLD_SPACE) && !page->SweepingDone()) {
heap()->mark_compact_collector()->sweeper()->AddPage(
space, reinterpret_cast<Page*>(page),
diff --git a/chromium/v8/src/heap/setup-heap-internal.cc b/chromium/v8/src/heap/setup-heap-internal.cc
index 458fd819aef..a936521a7e4 100644
--- a/chromium/v8/src/heap/setup-heap-internal.cc
+++ b/chromium/v8/src/heap/setup-heap-internal.cc
@@ -29,7 +29,6 @@
#include "src/objects/lookup-cache.h"
#include "src/objects/map.h"
#include "src/objects/microtask.h"
-#include "src/objects/module.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball-inl.h"
#include "src/objects/ordered-hash-table.h"
@@ -37,11 +36,15 @@
#include "src/objects/script.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/smi.h"
+#include "src/objects/source-text-module.h"
#include "src/objects/stack-frame-info.h"
#include "src/objects/string.h"
+#include "src/objects/synthetic-module.h"
#include "src/objects/template-objects-inl.h"
-#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp.h"
#include "src/wasm/wasm-objects.h"
+#include "torque-generated/class-definitions-tq.h"
+#include "torque-generated/internal-class-definitions-tq-inl.h"
namespace v8 {
namespace internal {
@@ -485,7 +488,10 @@ bool Heap::CreateInitialMaps() {
uncompiled_data_with_preparse_data)
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
shared_function_info)
-
+ ALLOCATE_MAP(SOURCE_TEXT_MODULE_TYPE, SourceTextModule::kSize,
+ source_text_module)
+ ALLOCATE_MAP(SYNTHETIC_MODULE_TYPE, SyntheticModule::kSize,
+ synthetic_module)
ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
code_data_container)
@@ -870,10 +876,6 @@ void Heap::CreateInitialObjects() {
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_regexp_species_protector(*cell);
-
- cell = factory->NewPropertyCell(factory->empty_string());
- cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_string_iterator_protector(*cell);
Handle<Cell> string_length_overflow_cell = factory->NewCell(
diff --git a/chromium/v8/src/heap/spaces-inl.h b/chromium/v8/src/heap/spaces-inl.h
index 308d4f51b16..3b4ed8d30ad 100644
--- a/chromium/v8/src/heap/spaces-inl.h
+++ b/chromium/v8/src/heap/spaces-inl.h
@@ -42,9 +42,9 @@ PageRange::PageRange(Address start, Address limit)
}
// -----------------------------------------------------------------------------
-// SemiSpaceIterator
+// SemiSpaceObjectIterator
-HeapObject SemiSpaceIterator::Next() {
+HeapObject SemiSpaceObjectIterator::Next() {
while (current_ != limit_) {
if (Page::IsAlignedToPageSize(current_)) {
Page* page = Page::FromAllocationAreaAddress(current_);
@@ -63,9 +63,9 @@ HeapObject SemiSpaceIterator::Next() {
}
// -----------------------------------------------------------------------------
-// HeapObjectIterator
+// PagedSpaceObjectIterator
-HeapObject HeapObjectIterator::Next() {
+HeapObject PagedSpaceObjectIterator::Next() {
do {
HeapObject next_obj = FromCurrentPage();
if (!next_obj.is_null()) return next_obj;
@@ -73,7 +73,7 @@ HeapObject HeapObjectIterator::Next() {
return HeapObject();
}
-HeapObject HeapObjectIterator::FromCurrentPage() {
+HeapObject PagedSpaceObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
cur_addr_ = space_->limit();
@@ -182,7 +182,7 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
DCHECK_EQ(this, page->owner());
size_t added = 0;
page->ForAllFreeListCategories([this, &added](FreeListCategory* category) {
- category->set_free_list(&free_list_);
+ category->set_free_list(free_list());
added += category->available();
category->Relink();
});
@@ -204,13 +204,6 @@ bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
return false;
}
-bool MemoryChunk::HasHeaderSentinel(Address slot_addr) {
- Address base = BaseAddress(slot_addr);
- if (slot_addr < base + kHeaderSize) return false;
- return HeapObject::FromAddress(base) ==
- ObjectSlot(base + kHeaderSentinelOffset).Relaxed_Load();
-}
-
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
while (!HasHeaderSentinel(addr)) {
addr = BaseAddress(addr) - 1;
@@ -234,14 +227,21 @@ void MemoryChunk::MoveExternalBackingStoreBytes(ExternalBackingStoreType type,
MemoryChunk* from,
MemoryChunk* to,
size_t amount) {
+ DCHECK_NOT_NULL(from->owner());
+ DCHECK_NOT_NULL(to->owner());
base::CheckedDecrement(&(from->external_backing_store_bytes_[type]), amount);
base::CheckedIncrement(&(to->external_backing_store_bytes_[type]), amount);
Space::MoveExternalBackingStoreBytes(type, from->owner(), to->owner(),
amount);
}
+AllocationSpace MemoryChunk::owner_identity() const {
+ if (InReadOnlySpace()) return RO_SPACE;
+ return owner()->identity();
+}
+
void Page::MarkNeverAllocateForTesting() {
- DCHECK(this->owner()->identity() != NEW_SPACE);
+ DCHECK(this->owner_identity() != NEW_SPACE);
DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
SetFlag(NEVER_ALLOCATE_ON_PAGE);
SetFlag(NEVER_EVACUATE);
@@ -315,10 +315,6 @@ MemoryChunk* OldGenerationMemoryChunkIterator::next() {
UNREACHABLE();
}
-Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
- return top(type) ? top(type)->page() : nullptr;
-}
-
FreeList* FreeListCategory::owner() { return free_list_; }
bool FreeListCategory::is_linked() {
@@ -376,7 +372,7 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
}
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
- DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
+ DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
if (!EnsureLinearAllocationArea(size_in_bytes)) {
return AllocationResult::Retry(identity());
}
@@ -389,7 +385,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
- DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
+ DCHECK_IMPLIES(identity() == RO_SPACE, !IsDetached());
int allocation_size = size_in_bytes;
HeapObject object = TryAllocateLinearlyAligned(&allocation_size, alignment);
if (object.is_null()) {
diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc
index 2c8cbdfc32c..438308a346d 100644
--- a/chromium/v8/src/heap/spaces.cc
+++ b/chromium/v8/src/heap/spaces.cc
@@ -8,6 +8,7 @@
#include <utility>
#include "src/base/bits.h"
+#include "src/base/lsan.h"
#include "src/base/macros.h"
#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
@@ -44,9 +45,9 @@ STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
// ----------------------------------------------------------------------------
-// HeapObjectIterator
+// PagedSpaceObjectIterator
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
+PagedSpaceObjectIterator::PagedSpaceObjectIterator(PagedSpace* space)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(space),
@@ -57,28 +58,28 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
#endif
}
-HeapObjectIterator::HeapObjectIterator(Page* page)
+PagedSpaceObjectIterator::PagedSpaceObjectIterator(Page* page)
: cur_addr_(kNullAddress),
cur_end_(kNullAddress),
space_(reinterpret_cast<PagedSpace*>(page->owner())),
page_range_(page),
current_page_(page_range_.begin()) {
-#ifdef DEBUG
- Space* owner = page->owner();
+#ifdef V8_SHARED_RO_HEAP
// TODO(v8:7464): Always enforce this once PagedSpace::Verify is no longer
// used to verify read-only space for non-shared builds.
-#ifdef V8_SHARED_RO_HEAP
- DCHECK_NE(owner->identity(), RO_SPACE);
-#endif
- // Do not access the heap of the read-only space.
- DCHECK(owner->identity() == RO_SPACE || owner->identity() == OLD_SPACE ||
- owner->identity() == MAP_SPACE || owner->identity() == CODE_SPACE);
+ DCHECK(!page->InReadOnlySpace());
+#endif // V8_SHARED_RO_HEAP
+
+#ifdef DEBUG
+ AllocationSpace owner = page->owner_identity();
+ DCHECK(owner == RO_SPACE || owner == OLD_SPACE || owner == MAP_SPACE ||
+ owner == CODE_SPACE);
#endif // DEBUG
}
// We have hit the end of the page and should advance to the next block of
// objects. This happens at the end of the page.
-bool HeapObjectIterator::AdvanceToNextPage() {
+bool PagedSpaceObjectIterator::AdvanceToNextPage() {
DCHECK_EQ(cur_addr_, cur_end_);
if (current_page_ == page_range_.end()) return false;
Page* cur_page = *(current_page_++);
@@ -105,14 +106,14 @@ PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
: heap_(heap) {
DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
- for (SpaceIterator it(heap_); it.has_next();) {
- it.next()->PauseAllocationObservers();
+ for (SpaceIterator it(heap_); it.HasNext();) {
+ it.Next()->PauseAllocationObservers();
}
}
PauseAllocationObserversScope::~PauseAllocationObserversScope() {
- for (SpaceIterator it(heap_); it.has_next();) {
- it.next()->ResumeAllocationObservers();
+ for (SpaceIterator it(heap_); it.HasNext();) {
+ it.Next()->ResumeAllocationObservers();
}
}
@@ -539,10 +540,13 @@ size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
return AllocatableMemoryInDataPage();
}
-Heap* MemoryChunk::synchronized_heap() {
- return reinterpret_cast<Heap*>(
- base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
+#ifdef THREAD_SANITIZER
+void MemoryChunk::SynchronizedHeapLoad() {
+ CHECK(reinterpret_cast<Heap*>(base::Acquire_Load(
+ reinterpret_cast<base::AtomicWord*>(&heap_))) != nullptr ||
+ InReadOnlySpace());
}
+#endif
void MemoryChunk::InitializationMemoryFence() {
base::SeqCst_MemoryFence();
@@ -561,8 +565,7 @@ void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
DCHECK(permission == PageAllocator::kRead ||
permission == PageAllocator::kReadExecute);
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK(owner()->identity() == CODE_SPACE ||
- owner()->identity() == CODE_LO_SPACE);
+ DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Decrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_);
@@ -596,8 +599,7 @@ void MemoryChunk::SetReadAndExecutable() {
void MemoryChunk::SetReadAndWritable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
- DCHECK(owner()->identity() == CODE_SPACE ||
- owner()->identity() == CODE_LO_SPACE);
+ DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic.
base::MutexGuard guard(page_protection_change_mutex_);
@@ -688,16 +690,11 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Executability executable, Space* owner,
VirtualMemory reservation) {
MemoryChunk* chunk = FromAddress(base);
-
DCHECK_EQ(base, chunk->address());
+ new (chunk) BasicMemoryChunk(size, area_start, area_end);
+ DCHECK(HasHeaderSentinel(area_start));
chunk->heap_ = heap;
- chunk->size_ = size;
- chunk->header_sentinel_ = HeapObject::FromAddress(base).ptr();
- DCHECK(HasHeaderSentinel(area_start));
- chunk->area_start_ = area_start;
- chunk->area_end_ = area_end;
- chunk->flags_ = Flags(NO_FLAGS);
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
@@ -716,7 +713,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->allocated_bytes_ = chunk->area_size();
chunk->wasted_memory_ = 0;
chunk->young_generation_bitmap_ = nullptr;
- chunk->marking_bitmap_ = nullptr;
chunk->local_tracker_ = nullptr;
chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
@@ -724,25 +720,18 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->external_backing_store_bytes_
[ExternalBackingStoreType::kExternalString] = 0;
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- chunk->categories_[i] = nullptr;
- }
+ chunk->categories_ = nullptr;
- chunk->AllocateMarkingBitmap();
+ heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
+ 0);
if (owner->identity() == RO_SPACE) {
heap->incremental_marking()
->non_atomic_marking_state()
->bitmap(chunk)
->MarkAllBits();
- } else {
- heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
- 0);
+ chunk->SetFlag(READ_ONLY_HEAP);
}
- DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
- DCHECK_EQ(kHeapOffset, OFFSET_OF(MemoryChunk, heap_));
- DCHECK_EQ(kOwnerOffset, OFFSET_OF(MemoryChunk, owner_));
-
if (executable == EXECUTABLE) {
chunk->SetFlag(IS_EXECUTABLE);
if (heap->write_protect_code_memory()) {
@@ -768,11 +757,11 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
return chunk;
}
-Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
+Page* PagedSpace::InitializePage(MemoryChunk* chunk) {
Page* page = static_cast<Page*>(chunk);
- DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
- page->owner()->identity()),
- page->area_size());
+ DCHECK_EQ(
+ MemoryChunkLayout::AllocatableMemoryInMemoryChunk(page->owner_identity()),
+ page->area_size());
// Make sure that categories are initialized before freeing the area.
page->ResetAllocationStatistics();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
@@ -783,8 +772,7 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
return page;
}
-Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
- DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
+Page* SemiSpace::InitializePage(MemoryChunk* chunk) {
bool in_to_space = (id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE);
Page* page = static_cast<Page*>(chunk);
@@ -829,24 +817,31 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
}
void Page::AllocateFreeListCategories() {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ DCHECK_NULL(categories_);
+ categories_ = new FreeListCategory*[free_list()->number_of_categories()]();
+ for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
+ DCHECK_NULL(categories_[i]);
categories_[i] = new FreeListCategory(
reinterpret_cast<PagedSpace*>(owner())->free_list(), this);
}
}
void Page::InitializeFreeListCategories() {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
categories_[i]->Initialize(static_cast<FreeListCategoryType>(i));
}
}
void Page::ReleaseFreeListCategories() {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- if (categories_[i] != nullptr) {
- delete categories_[i];
- categories_[i] = nullptr;
+ if (categories_ != nullptr) {
+ for (int i = kFirstCategory; i <= free_list()->last_category(); i++) {
+ if (categories_[i] != nullptr) {
+ delete categories_[i];
+ categories_[i] = nullptr;
+ }
}
+ delete[] categories_;
+ categories_ = nullptr;
}
}
@@ -856,23 +851,21 @@ Page* Page::ConvertNewToOld(Page* old_page) {
OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space);
old_page->SetFlags(0, static_cast<uintptr_t>(~0));
- Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
+ Page* new_page = old_space->InitializePage(old_page);
old_space->AddPage(new_page);
return new_page;
}
size_t MemoryChunk::CommittedPhysicalMemory() {
- if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
+ if (!base::OS::HasLazyCommits() || owner_identity() == LO_SPACE)
return size();
return high_water_mark_;
}
-bool MemoryChunk::InOldSpace() const {
- return owner()->identity() == OLD_SPACE;
-}
+bool MemoryChunk::InOldSpace() const { return owner_identity() == OLD_SPACE; }
bool MemoryChunk::InLargeObjectSpace() const {
- return owner()->identity() == LO_SPACE;
+ return owner_identity() == LO_SPACE;
}
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
@@ -1131,15 +1124,15 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
Address new_area_end) {
VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved());
- chunk->size_ -= bytes_to_free;
- chunk->area_end_ = new_area_end;
+ chunk->set_size(chunk->size() - bytes_to_free);
+ chunk->set_area_end(new_area_end);
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
// Add guard page at the end.
size_t page_size = GetCommitPageSize();
- DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
+ DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
DCHECK_EQ(chunk->address() + chunk->size(),
chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
- reservation->SetPermissions(chunk->area_end_, page_size,
+ reservation->SetPermissions(chunk->area_end(), page_size,
PageAllocator::kNoAccess);
}
// On e.g. Windows, a reservation may be larger than a page and releasing
@@ -1181,7 +1174,7 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
- chunk->ReleaseAllocatedMemory();
+ chunk->ReleaseAllAllocatedMemory();
VirtualMemory* reservation = chunk->reserved_memory();
if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
@@ -1191,7 +1184,7 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
reservation->Free();
} else {
// Only read-only pages can have non-initialized reservation object.
- DCHECK_EQ(RO_SPACE, chunk->owner()->identity());
+ DCHECK_EQ(RO_SPACE, chunk->owner_identity());
FreeMemory(page_allocator(chunk->executable()), chunk->address(),
chunk->size());
}
@@ -1251,7 +1244,7 @@ Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
chunk = AllocateChunk(size, size, executable, owner);
}
if (chunk == nullptr) return nullptr;
- return owner->InitializePage(chunk, executable);
+ return owner->InitializePage(chunk);
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
@@ -1368,7 +1361,7 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
// -----------------------------------------------------------------------------
// MemoryChunk implementation
-void MemoryChunk::ReleaseAllocatedMemory() {
+void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() {
if (mutex_ != nullptr) {
delete mutex_;
mutex_ = nullptr;
@@ -1377,20 +1370,29 @@ void MemoryChunk::ReleaseAllocatedMemory() {
delete page_protection_change_mutex_;
page_protection_change_mutex_ = nullptr;
}
+ if (code_object_registry_ != nullptr) {
+ delete code_object_registry_;
+ code_object_registry_ = nullptr;
+ }
+
ReleaseSlotSet<OLD_TO_NEW>();
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
ReleaseInvalidatedSlots();
+
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
- if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
- if (code_object_registry_ != nullptr) delete code_object_registry_;
+}
+void MemoryChunk::ReleaseAllAllocatedMemory() {
if (!IsLargePage()) {
Page* page = static_cast<Page*>(this);
page->ReleaseFreeListCategories();
}
+
+ ReleaseAllocatedMemoryNeededForWritableChunk();
+ if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
}
static SlotSet* AllocateAndInitializeSlotSet(size_t size, Address page_start) {
@@ -1408,7 +1410,7 @@ template V8_EXPORT_PRIVATE SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
template <RememberedSetType type>
SlotSet* MemoryChunk::AllocateSlotSet() {
- SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
+ SlotSet* slot_set = AllocateAndInitializeSlotSet(size(), address());
SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
&slot_set_[type], nullptr, slot_set);
if (old_slot_set != nullptr) {
@@ -1527,23 +1529,10 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
young_generation_bitmap_ = nullptr;
}
-void MemoryChunk::AllocateMarkingBitmap() {
- DCHECK_NULL(marking_bitmap_);
- marking_bitmap_ = static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
-}
-
-void MemoryChunk::ReleaseMarkingBitmap() {
- DCHECK_NOT_NULL(marking_bitmap_);
- free(marking_bitmap_);
- marking_bitmap_ = nullptr;
-}
-
// -----------------------------------------------------------------------------
// PagedSpace implementation
void Space::CheckOffsetsAreConsistent() const {
- static_assert(Space::kIdOffset == heap_internals::Space::kIdOffset,
- "ID offset inconsistent");
DCHECK_EQ(Space::kIdOffset, OFFSET_OF(Space, id_));
}
@@ -1592,8 +1581,8 @@ intptr_t Space::GetNextInlineAllocationStepSize() {
}
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
- Executability executable)
- : SpaceWithLinearArea(heap, space), executable_(executable) {
+ Executability executable, FreeList* free_list)
+ : SpaceWithLinearArea(heap, space, free_list), executable_(executable) {
area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
accounting_stats_.Clear();
}
@@ -1614,6 +1603,7 @@ void PagedSpace::RefillFreeList() {
identity() != MAP_SPACE && identity() != RO_SPACE) {
return;
}
+ DCHECK(!IsDetached());
MarkCompactCollector* collector = heap()->mark_compact_collector();
size_t added = 0;
{
@@ -1713,21 +1703,7 @@ void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
base::MutexGuard guard(mutex());
- // Check for pages that still contain free list entries. Bail out for smaller
- // categories.
- const int minimum_category =
- static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
- Page* page = free_list()->GetPageForCategoryType(kHuge);
- if (!page && static_cast<int>(kLarge) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kLarge);
- if (!page && static_cast<int>(kMedium) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kMedium);
- if (!page && static_cast<int>(kSmall) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kSmall);
- if (!page && static_cast<int>(kTiny) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kTiny);
- if (!page && static_cast<int>(kTiniest) >= minimum_category)
- page = free_list()->GetPageForCategoryType(kTiniest);
+ Page* page = free_list()->GetPageForSize(size_in_bytes);
if (!page) return nullptr;
RemovePage(page);
return page;
@@ -1769,9 +1745,9 @@ size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
void PagedSpace::ResetFreeList() {
for (Page* page : *this) {
- free_list_.EvictFreeListItems(page);
+ free_list_->EvictFreeListItems(page);
}
- DCHECK(free_list_.IsEmpty());
+ DCHECK(free_list_->IsEmpty());
}
void PagedSpace::ShrinkImmortalImmovablePages() {
@@ -1934,8 +1910,8 @@ void PagedSpace::ReleasePage(Page* page) {
page));
DCHECK_EQ(page->owner(), this);
- free_list_.EvictFreeListItems(page);
- DCHECK(!free_list_.ContainsPageFreeListItems(page));
+ free_list_->EvictFreeListItems(page);
+ DCHECK(!free_list_->ContainsPageFreeListItems(page));
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
DCHECK(!top_on_previous_step_);
@@ -1972,7 +1948,7 @@ void PagedSpace::SetReadAndWritable() {
}
std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
- return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
+ return std::unique_ptr<ObjectIterator>(new PagedSpaceObjectIterator(this));
}
bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
@@ -1998,7 +1974,7 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
}
size_t new_node_size = 0;
- FreeSpace new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
+ FreeSpace new_node = free_list_->Allocate(size_in_bytes, &new_node_size);
if (new_node.is_null()) return false;
DCHECK_GE(new_node_size, size_in_bytes);
@@ -2055,7 +2031,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
allocation_pointer_found_in_space = true;
}
CHECK(page->SweepingDone());
- HeapObjectIterator it(page);
+ PagedSpaceObjectIterator it(page);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
@@ -2066,8 +2042,8 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
// be in map space.
Map map = object.map();
CHECK(map.IsMap());
- CHECK(isolate->heap()->map_space()->Contains(map) ||
- ReadOnlyHeap::Contains(map));
+ CHECK(ReadOnlyHeap::Contains(map) ||
+ isolate->heap()->map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
@@ -2118,7 +2094,7 @@ void PagedSpace::VerifyLiveBytes() {
heap()->incremental_marking()->marking_state();
for (Page* page : *this) {
CHECK(page->SweepingDone());
- HeapObjectIterator it(page);
+ PagedSpaceObjectIterator it(page);
int black_size = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
// All the interior pointers should be contained in the heap.
@@ -2138,7 +2114,7 @@ void PagedSpace::VerifyCountersAfterSweeping() {
for (Page* page : *this) {
DCHECK(page->SweepingDone());
total_capacity += page->area_size();
- HeapObjectIterator it(page);
+ PagedSpaceObjectIterator it(page);
size_t real_allocated = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
if (!object.IsFiller()) {
@@ -2185,7 +2161,7 @@ void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
size_t initial_semispace_capacity,
size_t max_semispace_capacity)
- : SpaceWithLinearArea(heap, NEW_SPACE),
+ : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList()),
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace) {
DCHECK(initial_semispace_capacity <= max_semispace_capacity);
@@ -2528,11 +2504,11 @@ void SpaceWithLinearArea::InlineAllocationStep(Address top,
}
std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
- return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
+ return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this));
}
#ifdef VERIFY_HEAP
-// We do not use the SemiSpaceIterator because verification doesn't assume
+// We do not use the SemiSpaceObjectIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
void NewSpace::Verify(Isolate* isolate) {
// The allocation pointer should be in the space or at the very end.
@@ -2560,8 +2536,7 @@ void NewSpace::Verify(Isolate* isolate) {
// be in map space or read-only space.
Map map = object.map();
CHECK(map.IsMap());
- CHECK(heap()->map_space()->Contains(map) ||
- heap()->read_only_space()->Contains(map));
+ CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// The object should not be code or a map.
CHECK(!object.IsMap());
@@ -2633,6 +2608,9 @@ bool SemiSpace::Commit() {
DCHECK(!is_committed());
const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
+ // Pages in the new spaces can be moved to the old space by the full
+ // collector. Therefore, they must be initialized with the same FreeList as
+ // old pages.
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
@@ -2890,16 +2868,14 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
}
#endif
-
// -----------------------------------------------------------------------------
-// SemiSpaceIterator implementation.
+// SemiSpaceObjectIterator implementation.
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
+SemiSpaceObjectIterator::SemiSpaceObjectIterator(NewSpace* space) {
Initialize(space->first_allocatable_address(), space->top());
}
-
-void SemiSpaceIterator::Initialize(Address start, Address end) {
+void SemiSpaceObjectIterator::Initialize(Address start, Address end) {
SemiSpace::AssertValidRange(start, end);
current_ = start;
limit_ = end;
@@ -2925,19 +2901,22 @@ void FreeListCategory::Reset() {
set_prev(nullptr);
set_next(nullptr);
available_ = 0;
+ length_ = 0;
}
FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
size_t* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace node = top();
- if (node.is_null() || static_cast<size_t>(node.Size()) < minimum_size) {
+ DCHECK(!node.is_null());
+ if (static_cast<size_t>(node.Size()) < minimum_size) {
*node_size = 0;
return FreeSpace();
}
set_top(node.next());
*node_size = node.Size();
available_ -= *node_size;
+ length_--;
return node;
}
@@ -2951,12 +2930,13 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
if (size >= minimum_size) {
DCHECK_GE(available_, size);
available_ -= size;
+ length_--;
if (cur_node == top()) {
set_top(cur_node.next());
}
if (!prev_non_evac_node.is_null()) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
- if (chunk->owner()->identity() == CODE_SPACE) {
+ if (chunk->owner_identity() == CODE_SPACE) {
chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
}
prev_non_evac_node.set_next(cur_node.next());
@@ -2976,6 +2956,7 @@ void FreeListCategory::Free(Address start, size_t size_in_bytes,
free_space.set_next(top());
set_top(free_space);
available_ += size_in_bytes;
+ length_++;
if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
owner()->AddCategory(this);
}
@@ -2983,17 +2964,14 @@ void FreeListCategory::Free(Address start, size_t size_in_bytes,
void FreeListCategory::RepairFreeList(Heap* heap) {
+ Map free_space_map = ReadOnlyRoots(heap).free_space_map();
FreeSpace n = top();
while (!n.is_null()) {
- MapWordSlot map_location = n.map_slot();
- // We can't use .is_null() here because *map_location returns an
- // Object (for which "is null" is not defined, as it would be
- // indistinguishable from "is Smi(0)"). Only HeapObject has "is_null()".
- if (map_location.contains_value(kNullAddress)) {
- map_location.store(ReadOnlyRoots(heap).free_space_map());
+ ObjectSlot map_slot = n.map_slot();
+ if (map_slot.contains_value(kNullAddress)) {
+ map_slot.store(free_space_map);
} else {
- DCHECK(map_location.contains_value(
- ReadOnlyRoots(heap).free_space_map().ptr()));
+ DCHECK(map_slot.contains_value(free_space_map.ptr()));
}
n = n.next();
}
@@ -3004,21 +2982,50 @@ void FreeListCategory::Relink() {
owner()->AddCategory(this);
}
-FreeList::FreeList() : wasted_bytes_(0) {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- categories_[i] = nullptr;
+// ------------------------------------------------
+// Generic FreeList methods (alloc/free related)
+
+FreeList* FreeList::CreateFreeList() {
+ if (FLAG_gc_freelist_strategy == 1) {
+ return new FreeListFastAlloc();
+ } else if (FLAG_gc_freelist_strategy == 2) {
+ return new FreeListMany();
+ } else {
+ return new FreeListLegacy();
}
- Reset();
}
+FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
+ size_t minimum_size, size_t* node_size) {
+ FreeListCategory* category = categories_[type];
+ if (category == nullptr) return FreeSpace();
+ FreeSpace node = category->PickNodeFromList(minimum_size, node_size);
+ if (!node.is_null()) {
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ }
+ if (category->is_empty()) {
+ RemoveCategory(category);
+ }
+ return node;
+}
-void FreeList::Reset() {
- ForAllFreeListCategories(
- [](FreeListCategory* category) { category->Reset(); });
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- categories_[i] = nullptr;
+FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
+ size_t minimum_size,
+ size_t* node_size) {
+ FreeListCategoryIterator it(this, type);
+ FreeSpace node;
+ while (it.HasNext()) {
+ FreeListCategory* current = it.Next();
+ node = current->SearchForNodeInList(minimum_size, node_size);
+ if (!node.is_null()) {
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ if (current->is_empty()) {
+ RemoveCategory(current);
+ }
+ return node;
+ }
}
- wasted_bytes_ = 0;
+ return node;
}
size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
@@ -3026,7 +3033,7 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
page->DecreaseAllocatedBytes(size_in_bytes);
// Blocks have to be a minimum size to hold free list items.
- if (size_in_bytes < kMinBlockSize) {
+ if (size_in_bytes < min_block_size_) {
page->add_wasted_memory(size_in_bytes);
wasted_bytes_ += size_in_bytes;
return size_in_bytes;
@@ -3041,52 +3048,22 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
return 0;
}
-FreeSpace FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size) {
- FreeListCategoryIterator it(this, type);
- FreeSpace node;
- while (it.HasNext()) {
- FreeListCategory* current = it.Next();
- node = current->PickNodeFromList(minimum_size, node_size);
- if (!node.is_null()) {
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
- }
- RemoveCategory(current);
- }
- return node;
-}
+// ------------------------------------------------
+// FreeListLegacy implementation
-FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type,
- size_t minimum_size, size_t* node_size) {
- if (categories_[type] == nullptr) return FreeSpace();
- FreeSpace node = categories_[type]->PickNodeFromList(minimum_size, node_size);
- if (!node.is_null()) {
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- }
- return node;
-}
+FreeListLegacy::FreeListLegacy() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = kHuge + 1;
+ last_category_ = kHuge;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
-FreeSpace FreeList::SearchForNodeInList(FreeListCategoryType type,
- size_t* node_size,
- size_t minimum_size) {
- FreeListCategoryIterator it(this, type);
- FreeSpace node;
- while (it.HasNext()) {
- FreeListCategory* current = it.Next();
- node = current->SearchForNodeInList(minimum_size, node_size);
- if (!node.is_null()) {
- DCHECK(IsVeryLong() || Available() == SumFreeLists());
- return node;
- }
- if (current->is_empty()) {
- RemoveCategory(current);
- }
- }
- return node;
+ Reset();
}
-FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
+FreeListLegacy::~FreeListLegacy() { delete[] categories_; }
+
+FreeSpace FreeListLegacy::Allocate(size_t size_in_bytes, size_t* node_size) {
DCHECK_GE(kMaxBlockSize, size_in_bytes);
FreeSpace node;
// First try the allocation fast path: try to allocate the minimum element
@@ -3094,21 +3071,31 @@ FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
FreeListCategoryType type =
SelectFastAllocationFreeListCategoryType(size_in_bytes);
for (int i = type; i < kHuge && node.is_null(); i++) {
- node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
- node_size);
+ node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
+ node_size);
}
if (node.is_null()) {
// Next search the huge list for free list nodes. This takes linear time in
// the number of huge elements.
- node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
+ node = SearchForNodeInList(kHuge, size_in_bytes, node_size);
}
if (node.is_null() && type != kHuge) {
- // We didn't find anything in the huge list. Now search the best fitting
- // free list for a node that has at least the requested size.
+ // We didn't find anything in the huge list.
type = SelectFreeListCategoryType(size_in_bytes);
- node = TryFindNodeIn(type, size_in_bytes, node_size);
+
+ if (type == kTiniest) {
+ // For this tiniest object, the tiny list hasn't been searched yet.
+ // Now searching the tiny list.
+ node = TryFindNodeIn(kTiny, size_in_bytes, node_size);
+ }
+
+ if (node.is_null()) {
+ // Now search the best fitting free list for a node that has at least the
+ // requested size.
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
+ }
}
if (!node.is_null()) {
@@ -3119,6 +3106,122 @@ FreeSpace FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
return node;
}
+// ------------------------------------------------
+// FreeListFastAlloc implementation
+
+FreeListFastAlloc::FreeListFastAlloc() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = kHuge + 1;
+ last_category_ = kHuge;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
+
+ Reset();
+}
+
+FreeListFastAlloc::~FreeListFastAlloc() { delete[] categories_; }
+
+FreeSpace FreeListFastAlloc::Allocate(size_t size_in_bytes, size_t* node_size) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+ FreeSpace node;
+ // Try to allocate the biggest element possible (to make the most of later
+ // bump-pointer allocations).
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ for (int i = kHuge; i >= type && node.is_null(); i--) {
+ node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
+ node_size);
+ }
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// FreeListMany implementation
+
+// Cf. the declaration of |categories_max| in |spaces.h| to see how this is
+// computed.
+const size_t FreeListMany::categories_max[kNumberOfCategories] = {
+ 24, 32, 40, 48, 56, 64, 72,
+ 80, 88, 96, 104, 112, 120, 128,
+ 136, 144, 152, 160, 168, 176, 184,
+ 192, 200, 208, 216, 224, 232, 240,
+ 248, 256, 384, 512, 768, 1024, 1536,
+ 2048, 3072, 4080, 4088, 4096, 6144, 8192,
+ 12288, 16384, 24576, 32768, 49152, 65536, Page::kPageSize};
+
+FreeListMany::FreeListMany() {
+ // Initializing base (FreeList) fields
+ number_of_categories_ = kNumberOfCategories;
+ last_category_ = number_of_categories_ - 1;
+ min_block_size_ = kMinBlockSize;
+ categories_ = new FreeListCategory*[number_of_categories_]();
+
+ Reset();
+}
+
+size_t FreeListMany::GuaranteedAllocatable(size_t maximum_freed) {
+ if (maximum_freed < categories_max[0]) {
+ return 0;
+ }
+ for (int cat = kFirstCategory + 1; cat < last_category_; cat++) {
+ if (maximum_freed <= categories_max[cat]) {
+ return categories_max[cat - 1];
+ }
+ }
+ return maximum_freed;
+}
+
+Page* FreeListMany::GetPageForSize(size_t size_in_bytes) {
+ const int minimum_category =
+ static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+ Page* page = GetPageForCategoryType(last_category_);
+ for (int cat = last_category_ - 1; !page && cat >= minimum_category; cat--) {
+ page = GetPageForCategoryType(cat);
+ }
+ return page;
+}
+
+FreeListMany::~FreeListMany() { delete[] categories_; }
+
+FreeSpace FreeListMany::Allocate(size_t size_in_bytes, size_t* node_size) {
+ DCHECK_GE(kMaxBlockSize, size_in_bytes);
+ FreeSpace node;
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ for (int i = type; i < last_category_ && node.is_null(); i++) {
+ node = TryFindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
+ node_size);
+ }
+
+ if (node.is_null()) {
+ // Searching each element of the last category.
+ node = SearchForNodeInList(last_category_, size_in_bytes, node_size);
+ }
+
+ if (!node.is_null()) {
+ Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size);
+ }
+
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+// ------------------------------------------------
+// Generic FreeList methods (non alloc/free related)
+
+void FreeList::Reset() {
+ ForAllFreeListCategories(
+ [](FreeListCategory* category) { category->Reset(); });
+ for (int i = kFirstCategory; i < number_of_categories_; i++) {
+ categories_[i] = nullptr;
+ }
+ wasted_bytes_ = 0;
+}
+
size_t FreeList::EvictFreeListItems(Page* page) {
size_t sum = 0;
page->ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
@@ -3148,7 +3251,7 @@ void FreeList::RepairLists(Heap* heap) {
bool FreeList::AddCategory(FreeListCategory* category) {
FreeListCategoryType type = category->type_;
- DCHECK_LT(type, kNumberOfCategories);
+ DCHECK_LT(type, number_of_categories_);
FreeListCategory* top = categories_[type];
if (category->is_empty()) return false;
@@ -3165,7 +3268,7 @@ bool FreeList::AddCategory(FreeListCategory* category) {
void FreeList::RemoveCategory(FreeListCategory* category) {
FreeListCategoryType type = category->type_;
- DCHECK_LT(type, kNumberOfCategories);
+ DCHECK_LT(type, number_of_categories_);
FreeListCategory* top = categories_[type];
// Common double-linked list removal.
@@ -3193,8 +3296,16 @@ void FreeList::PrintCategories(FreeListCategoryType type) {
PrintF("null\n");
}
+int MemoryChunk::FreeListsLength() {
+ int length = 0;
+ for (int cat = kFirstCategory; cat <= free_list()->last_category(); cat++) {
+ if (categories_[cat] != nullptr) {
+ length += categories_[cat]->FreeListLength();
+ }
+ }
+ return length;
+}
-#ifdef DEBUG
size_t FreeListCategory::SumFreeList() {
size_t sum = 0;
FreeSpace cur = top();
@@ -3209,20 +3320,10 @@ size_t FreeListCategory::SumFreeList() {
return sum;
}
-int FreeListCategory::FreeListLength() {
- int length = 0;
- FreeSpace cur = top();
- while (!cur.is_null()) {
- length++;
- cur = cur.next();
- if (length == kVeryLongFreeList) return length;
- }
- return length;
-}
-
+#ifdef DEBUG
bool FreeList::IsVeryLong() {
int len = 0;
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ for (int i = kFirstCategory; i < number_of_categories_; i++) {
FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
while (it.HasNext()) {
len += it.Next()->FreeListLength();
@@ -3254,7 +3355,7 @@ void PagedSpace::PrepareForMarkCompact() {
FreeLinearAllocationArea();
// Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_.Reset();
+ free_list_->Reset();
}
size_t PagedSpace::SizeOfObjects() {
@@ -3347,7 +3448,7 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
- (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
+ (static_cast<size_t>(size_in_bytes) <= free_list_->Available()));
return RefillLinearAllocationAreaFromFreeList(
static_cast<size_t>(size_in_bytes));
}
@@ -3366,18 +3467,21 @@ void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
#endif
ReadOnlySpace::ReadOnlySpace(Heap* heap)
- : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE),
+ : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList()),
is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
}
void ReadOnlyPage::MakeHeaderRelocatable() {
- if (mutex_ != nullptr) {
- delete mutex_;
- heap_ = nullptr;
- mutex_ = nullptr;
- local_tracker_ = nullptr;
- reservation_.Reset();
+ ReleaseAllocatedMemoryNeededForWritableChunk();
+ // Detached read-only space needs to have a valid marking bitmap and free list
+ // categories. Instruct Lsan to ignore them if required.
+ LSAN_IGNORE_OBJECT(categories_);
+ for (int i = kFirstCategory; i < free_list()->number_of_categories(); i++) {
+ LSAN_IGNORE_OBJECT(categories_[i]);
}
+ LSAN_IGNORE_OBJECT(marking_bitmap_);
+ heap_ = nullptr;
+ owner_ = nullptr;
}
void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
@@ -3396,7 +3500,7 @@ void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
// were created with the wrong FreeSpaceMap (normally nullptr), so we need to
// fix them.
void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
- free_list_.RepairLists(heap());
+ free_list_->RepairLists(heap());
// Each page may have a small free space that is not tracked by a free list.
// Those free spaces still contain null as their map pointer.
// Overwrite them with new fillers.
@@ -3422,7 +3526,7 @@ void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
void ReadOnlySpace::ClearStringPaddingIfNeeded() {
if (is_string_padding_cleared_) return;
- ReadOnlyHeapIterator iterator(this);
+ ReadOnlyHeapObjectIterator iterator(this);
for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
if (o.IsSeqOneByteString()) {
SeqOneByteString::cast(o).clear_padding();
@@ -3480,13 +3584,14 @@ void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
}
// -----------------------------------------------------------------------------
-// LargeObjectIterator
+// LargeObjectSpaceObjectIterator
-LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
+LargeObjectSpaceObjectIterator::LargeObjectSpaceObjectIterator(
+ LargeObjectSpace* space) {
current_ = space->first_page();
}
-HeapObject LargeObjectIterator::Next() {
+HeapObject LargeObjectSpaceObjectIterator::Next() {
if (current_ == nullptr) return HeapObject();
HeapObject object = current_->GetObject();
@@ -3501,7 +3606,10 @@ LargeObjectSpace::LargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, LO_SPACE) {}
LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
- : Space(heap, id), size_(0), page_count_(0), objects_size_(0) {}
+ : Space(heap, id, new NoFreeList()),
+ size_(0),
+ page_count_(0),
+ objects_size_(0) {}
void LargeObjectSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
@@ -3584,7 +3692,7 @@ LargePage* CodeLargeObjectSpace::FindPage(Address a) {
void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
- LargeObjectIterator it(this);
+ LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (marking_state->IsBlackOrGrey(obj)) {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
@@ -3614,7 +3722,7 @@ void CodeLargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
}
void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
- DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
+ DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
DCHECK(page->IsLargePage());
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
@@ -3697,7 +3805,8 @@ bool LargeObjectSpace::ContainsSlow(Address addr) {
}
std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
- return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
+ return std::unique_ptr<ObjectIterator>(
+ new LargeObjectSpaceObjectIterator(this));
}
#ifdef VERIFY_HEAP
@@ -3722,8 +3831,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// in map space or read-only space.
Map map = object.map();
CHECK(map.IsMap());
- CHECK(heap()->map_space()->Contains(map) ||
- heap()->read_only_space()->Contains(map));
+ CHECK(ReadOnlyHeap::Contains(map) || heap()->map_space()->Contains(map));
// We have only the following types in the large object space:
if (!(object.IsAbstractCode() || object.IsSeqString() ||
@@ -3787,7 +3895,7 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
#ifdef DEBUG
void LargeObjectSpace::Print() {
StdoutStream os;
- LargeObjectIterator it(this);
+ LargeObjectSpaceObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
obj.Print(os);
}
@@ -3796,9 +3904,9 @@ void LargeObjectSpace::Print() {
void Page::Print() {
// Make a best-effort to print the objects in the page.
PrintF("Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
- this->owner()->name());
- printf(" --------------------------------------\n");
- HeapObjectIterator objects(this);
+ Heap::GetSpaceName(this->owner_identity()));
+ PrintF(" --------------------------------------\n");
+ PagedSpaceObjectIterator objects(this);
unsigned mark_size = 0;
for (HeapObject object = objects.Next(); !object.is_null();
object = objects.Next()) {
@@ -3811,8 +3919,8 @@ void Page::Print() {
object.ShortPrint();
PrintF("\n");
}
- printf(" --------------------------------------\n");
- printf(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
+ PrintF(" --------------------------------------\n");
+ PrintF(" Marked: %x, LiveCount: %" V8PRIdPTR "\n", mark_size,
heap()->incremental_marking()->marking_state()->live_bytes(this));
}
@@ -3856,7 +3964,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
- DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
+ DCHECK_EQ(page->owner_identity(), NEW_LO_SPACE);
AllocationStep(object_size, result.address(), object_size);
return result;
}
diff --git a/chromium/v8/src/heap/spaces.h b/chromium/v8/src/heap/spaces.h
index 7522cac9cb5..384c731f376 100644
--- a/chromium/v8/src/heap/spaces.h
+++ b/chromium/v8/src/heap/spaces.h
@@ -20,6 +20,7 @@
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
+#include "src/heap/basic-memory-chunk.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/marking.h"
@@ -119,19 +120,10 @@ class Space;
#define DCHECK_CODEOBJECT_SIZE(size, code_space) \
DCHECK((0 < size) && (size <= code_space->AreaSize()))
-enum FreeListCategoryType {
- kTiniest,
- kTiny,
- kSmall,
- kMedium,
- kLarge,
- kHuge,
-
- kFirstCategory = kTiniest,
- kLastCategory = kHuge,
- kNumberOfCategories = kLastCategory + 1,
- kInvalidCategory
-};
+using FreeListCategoryType = int;
+
+static const FreeListCategoryType kFirstCategory = 0;
+static const FreeListCategoryType kInvalidCategory = -1;
enum FreeMode { kLinkCategory, kDoNotLinkCategory };
@@ -151,12 +143,14 @@ class FreeListCategory {
page_(page),
type_(kInvalidCategory),
available_(0),
+ length_(0),
prev_(nullptr),
next_(nullptr) {}
void Initialize(FreeListCategoryType type) {
type_ = type;
available_ = 0;
+ length_ = 0;
prev_ = nullptr;
next_ = nullptr;
}
@@ -188,10 +182,8 @@ class FreeListCategory {
void set_free_list(FreeList* free_list) { free_list_ = free_list; }
-#ifdef DEBUG
size_t SumFreeList();
- int FreeListLength();
-#endif
+ int FreeListLength() { return length_; }
private:
// For debug builds we accurately compute free lists lengths up until
@@ -218,6 +210,9 @@ class FreeListCategory {
// category.
size_t available_;
+ // |length_|: Total blocks in this free list category.
+ int length_;
+
// |top_|: Points to the top FreeSpace in the free list category.
FreeSpace top_;
@@ -230,6 +225,327 @@ class FreeListCategory {
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
};
+// A free list maintains free blocks of memory. The free list is organized in
+// a way to encourage objects allocated around the same time to be near each
+// other. The normal way to allocate is intended to be by bumping a 'top'
+// pointer until it hits a 'limit' pointer. When the limit is hit we need to
+// find a new space to allocate from. This is done with the free list, which is
+// divided up into rough categories to cut down on waste. Having finer
+// categories would scatter allocation more.
+class FreeList {
+ public:
+ // Creates a Freelist of the default class (FreeListLegacy for now).
+ V8_EXPORT_PRIVATE static FreeList* CreateFreeList();
+
+ virtual ~FreeList() = default;
+
+ // Returns how much memory can be allocated after freeing maximum_freed
+ // memory.
+ virtual size_t GuaranteedAllocatable(size_t maximum_freed) = 0;
+
+ // Adds a node on the free list. The block of size {size_in_bytes} starting
+ // at {start} is placed on the free list. The return value is the number of
+ // bytes that were not added to the free list, because the freed memory block
+ // was too small. Bookkeeping information will be written to the block, i.e.,
+ // its contents will be destroyed. The start address should be word aligned,
+ // and the size should be a non-zero multiple of the word size.
+ virtual size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
+
+ // Allocates a free space node frome the free list of at least size_in_bytes
+ // bytes. Returns the actual node size in node_size which can be bigger than
+ // size_in_bytes. This method returns null if the allocation request cannot be
+ // handled by the free list.
+ virtual V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) = 0;
+
+ // Returns a page containing an entry for a given type, or nullptr otherwise.
+ V8_EXPORT_PRIVATE virtual Page* GetPageForSize(size_t size_in_bytes) = 0;
+
+ void Reset();
+
+ // Return the number of bytes available on the free list.
+ size_t Available() {
+ size_t available = 0;
+ ForAllFreeListCategories([&available](FreeListCategory* category) {
+ available += category->available();
+ });
+ return available;
+ }
+
+ bool IsEmpty() {
+ bool empty = true;
+ ForAllFreeListCategories([&empty](FreeListCategory* category) {
+ if (!category->is_empty()) empty = false;
+ });
+ return empty;
+ }
+
+ // Used after booting the VM.
+ void RepairLists(Heap* heap);
+
+ V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
+ bool ContainsPageFreeListItems(Page* page);
+
+ int number_of_categories() { return number_of_categories_; }
+ FreeListCategoryType last_category() { return last_category_; }
+
+ size_t wasted_bytes() { return wasted_bytes_; }
+
+ template <typename Callback>
+ void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
+ FreeListCategory* current = categories_[type];
+ while (current != nullptr) {
+ FreeListCategory* next = current->next();
+ callback(current);
+ current = next;
+ }
+ }
+
+ template <typename Callback>
+ void ForAllFreeListCategories(Callback callback) {
+ for (int i = kFirstCategory; i < number_of_categories(); i++) {
+ ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
+ }
+ }
+
+ bool AddCategory(FreeListCategory* category);
+ V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
+ void PrintCategories(FreeListCategoryType type);
+
+#ifdef DEBUG
+ size_t SumFreeLists();
+ bool IsVeryLong();
+#endif
+
+ protected:
+ class FreeListCategoryIterator final {
+ public:
+ FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
+ : current_(free_list->categories_[type]) {}
+
+ bool HasNext() const { return current_ != nullptr; }
+
+ FreeListCategory* Next() {
+ DCHECK(HasNext());
+ FreeListCategory* tmp = current_;
+ current_ = current_->next();
+ return tmp;
+ }
+
+ private:
+ FreeListCategory* current_;
+ };
+
+ // Tries to retrieve a node from the first category in a given |type|.
+ // Returns nullptr if the category is empty or the top entry is smaller
+ // than minimum_size.
+ FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
+
+ // Searches a given |type| for a node of at least |minimum_size|.
+ FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
+
+ // Returns the smallest category in which an object of |size_in_bytes| could
+ // fit.
+ virtual FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) = 0;
+
+ FreeListCategory* top(FreeListCategoryType type) const {
+ return categories_[type];
+ }
+
+ Page* GetPageForCategoryType(FreeListCategoryType type) {
+ return top(type) ? top(type)->page() : nullptr;
+ }
+
+ int number_of_categories_ = 0;
+ FreeListCategoryType last_category_ = 0;
+ size_t min_block_size_ = 0;
+
+ std::atomic<size_t> wasted_bytes_{0};
+ FreeListCategory** categories_ = nullptr;
+
+ friend class FreeListCategory;
+ friend class Page;
+ friend class MemoryChunk;
+ friend class ReadOnlyPage;
+};
+
+// FreeList used for spaces that don't have freelists
+// (only the LargeObject space for now).
+class NoFreeList final : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) final {
+ FATAL("NoFreeList can't be used as a standard FreeList. ");
+ }
+ size_t Free(Address start, size_t size_in_bytes, FreeMode mode) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+ Page* GetPageForSize(size_t size_in_bytes) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+
+ private:
+ FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) final {
+ FATAL("NoFreeList can't be used as a standard FreeList.");
+ }
+};
+
+// ----------------------------------------------------------------------------
+// Space is the abstract superclass for all allocation spaces.
+class V8_EXPORT_PRIVATE Space : public Malloced {
+ public:
+ Space(Heap* heap, AllocationSpace id, FreeList* free_list)
+ : allocation_observers_paused_(false),
+ heap_(heap),
+ id_(id),
+ committed_(0),
+ max_committed_(0),
+ free_list_(std::unique_ptr<FreeList>(free_list)) {
+ external_backing_store_bytes_ =
+ new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
+ external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
+ external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
+ 0;
+ CheckOffsetsAreConsistent();
+ }
+
+ void CheckOffsetsAreConsistent() const;
+
+ static inline void MoveExternalBackingStoreBytes(
+ ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
+
+ virtual ~Space() {
+ delete[] external_backing_store_bytes_;
+ external_backing_store_bytes_ = nullptr;
+ }
+
+ Heap* heap() const {
+ DCHECK_NOT_NULL(heap_);
+ return heap_;
+ }
+
+ bool IsDetached() const { return heap_ == nullptr; }
+
+ AllocationSpace identity() { return id_; }
+
+ const char* name() { return Heap::GetSpaceName(id_); }
+
+ virtual void AddAllocationObserver(AllocationObserver* observer);
+
+ virtual void RemoveAllocationObserver(AllocationObserver* observer);
+
+ virtual void PauseAllocationObservers();
+
+ virtual void ResumeAllocationObservers();
+
+ virtual void StartNextInlineAllocationStep() {}
+
+ void AllocationStep(int bytes_since_last, Address soon_object, int size);
+
+ // Return the total amount committed memory for this space, i.e., allocatable
+ // memory and page headers.
+ virtual size_t CommittedMemory() { return committed_; }
+
+ virtual size_t MaximumCommittedMemory() { return max_committed_; }
+
+ // Returns allocated size.
+ virtual size_t Size() = 0;
+
+ // Returns size of objects. Can differ from the allocated size
+ // (e.g. see LargeObjectSpace).
+ virtual size_t SizeOfObjects() { return Size(); }
+
+ // Approximate amount of physical memory committed for this space.
+ virtual size_t CommittedPhysicalMemory() = 0;
+
+ // Return the available bytes without growing.
+ virtual size_t Available() = 0;
+
+ virtual int RoundSizeDownToObjectAlignment(int size) {
+ if (id_ == CODE_SPACE) {
+ return RoundDown(size, kCodeAlignment);
+ } else {
+ return RoundDown(size, kTaggedSize);
+ }
+ }
+
+ virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
+
+ void AccountCommitted(size_t bytes) {
+ DCHECK_GE(committed_ + bytes, committed_);
+ committed_ += bytes;
+ if (committed_ > max_committed_) {
+ max_committed_ = committed_;
+ }
+ }
+
+ void AccountUncommitted(size_t bytes) {
+ DCHECK_GE(committed_, committed_ - bytes);
+ committed_ -= bytes;
+ }
+
+ inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
+ inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
+ size_t amount);
+
+ // Returns amount of off-heap memory in-use by objects in this Space.
+ virtual size_t ExternalBackingStoreBytes(
+ ExternalBackingStoreType type) const {
+ return external_backing_store_bytes_[type];
+ }
+
+ void* GetRandomMmapAddr();
+
+ MemoryChunk* first_page() { return memory_chunk_list_.front(); }
+ MemoryChunk* last_page() { return memory_chunk_list_.back(); }
+
+ base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
+
+ FreeList* free_list() { return free_list_.get(); }
+
+#ifdef DEBUG
+ virtual void Print() = 0;
+#endif
+
+ protected:
+ intptr_t GetNextInlineAllocationStepSize();
+ bool AllocationObserversActive() {
+ return !allocation_observers_paused_ && !allocation_observers_.empty();
+ }
+
+ void DetachFromHeap() { heap_ = nullptr; }
+
+ std::vector<AllocationObserver*> allocation_observers_;
+
+ // The List manages the pages that belong to the given space.
+ base::List<MemoryChunk> memory_chunk_list_;
+
+ // Tracks off-heap memory used by this space.
+ std::atomic<size_t>* external_backing_store_bytes_;
+
+ static const intptr_t kIdOffset = 9 * kSystemPointerSize;
+
+ bool allocation_observers_paused_;
+ Heap* heap_;
+ AllocationSpace id_;
+
+ // Keeps track of committed memory in a space.
+ size_t committed_;
+ size_t max_committed_;
+
+ std::unique_ptr<FreeList> free_list_;
+
+ DISALLOW_COPY_AND_ASSIGN(Space);
+};
+
// The CodeObjectRegistry holds all start addresses of code objects of a given
// MemoryChunk. Each MemoryChunk owns a separate CodeObjectRegistry. The
// CodeObjectRegistry allows fast lookup from an inner pointer of a code object
@@ -265,7 +581,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
// any heap object.
-class MemoryChunk {
+class MemoryChunk : public BasicMemoryChunk {
public:
// Use with std data structures.
struct Hasher {
@@ -274,74 +590,6 @@ class MemoryChunk {
}
};
- enum Flag {
- NO_FLAGS = 0u,
- IS_EXECUTABLE = 1u << 0,
- POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
- POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
- // A page in the from-space or a young large page that was not scavenged
- // yet.
- FROM_PAGE = 1u << 3,
- // A page in the to-space or a young large page that was scavenged.
- TO_PAGE = 1u << 4,
- LARGE_PAGE = 1u << 5,
- EVACUATION_CANDIDATE = 1u << 6,
- NEVER_EVACUATE = 1u << 7,
-
- // Large objects can have a progress bar in their page header. These object
- // are scanned in increments and will be kept black while being scanned.
- // Even if the mutator writes to them they will be kept black and a white
- // to grey transition is performed in the value.
- HAS_PROGRESS_BAR = 1u << 8,
-
- // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
- // from new to old space during evacuation.
- PAGE_NEW_OLD_PROMOTION = 1u << 9,
-
- // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
- // within the new space during evacuation.
- PAGE_NEW_NEW_PROMOTION = 1u << 10,
-
- // This flag is intended to be used for testing. Works only when both
- // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
- // are set. It forces the page to become an evacuation candidate at next
- // candidates selection cycle.
- FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
-
- // This flag is intended to be used for testing.
- NEVER_ALLOCATE_ON_PAGE = 1u << 12,
-
- // The memory chunk is already logically freed, however the actual freeing
- // still has to be performed.
- PRE_FREED = 1u << 13,
-
- // |POOLED|: When actually freeing this chunk, only uncommit and do not
- // give up the reservation as we still reuse the chunk at some point.
- POOLED = 1u << 14,
-
- // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
- // has been aborted and needs special handling by the sweeper.
- COMPACTION_WAS_ABORTED = 1u << 15,
-
- // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
- // on pages is sometimes aborted. The flag is used to avoid repeatedly
- // triggering on the same page.
- COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
-
- // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
- // to iterate the page.
- SWEEP_TO_ITERATE = 1u << 17,
-
- // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
- // enabled.
- INCREMENTAL_MARKING = 1u << 18,
- NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
-
- // The memory chunk freeing bookkeeping has been performed but the chunk has
- // not yet been freed.
- UNREGISTERED = 1u << 20
- };
-
using Flags = uintptr_t;
static const Flags kPointersToHereAreInterestingMask =
@@ -370,36 +618,12 @@ class MemoryChunk {
kSweepingInProgress,
};
- static const intptr_t kAlignment =
- (static_cast<uintptr_t>(1) << kPageSizeBits);
-
- static const intptr_t kAlignmentMask = kAlignment - 1;
-
- static const intptr_t kSizeOffset = 0;
- static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
- static const intptr_t kMarkBitmapOffset = kFlagsOffset + kUIntptrSize;
- static const intptr_t kReservationOffset =
- kMarkBitmapOffset + kSystemPointerSize;
- static const intptr_t kHeapOffset =
- kReservationOffset + 3 * kSystemPointerSize;
- static const intptr_t kHeaderSentinelOffset =
- kHeapOffset + kSystemPointerSize;
- static const intptr_t kOwnerOffset =
- kHeaderSentinelOffset + kSystemPointerSize;
-
static const size_t kHeaderSize =
- kSizeOffset // NOLINT
- + kSizetSize // size_t size
- + kUIntptrSize // uintptr_t flags_
- + kSystemPointerSize // Bitmap* marking_bitmap_
- + 3 * kSystemPointerSize // VirtualMemory reservation_
- + kSystemPointerSize // Heap* heap_
- + kSystemPointerSize // Address header_sentinel_
- + kSystemPointerSize // Address area_start_
- + kSystemPointerSize // Address area_end_
- + kSystemPointerSize // Address owner_
- + kSizetSize // size_t progress_bar_
- + kIntptrSize // intptr_t live_byte_count_
+ BasicMemoryChunk::kHeaderSize // Parent size.
+ + 3 * kSystemPointerSize // VirtualMemory reservation_
+ + kSystemPointerSize // Address owner_
+ + kSizetSize // size_t progress_bar_
+ + kIntptrSize // intptr_t live_byte_count_
+ kSystemPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
@@ -415,9 +639,8 @@ class MemoryChunk {
+ kSizetSize // size_t allocated_bytes_
+ kSizetSize // size_t wasted_memory_
+ kSystemPointerSize * 2 // base::ListNode
- + kSystemPointerSize * kNumberOfCategories
- // FreeListCategory categories_[kNumberOfCategories]
- + kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
+ + kSystemPointerSize // FreeListCategory** categories__
+ + kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
+ kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
+ kSystemPointerSize // Bitmap* young_generation_bitmap_
+ kSystemPointerSize; // CodeObjectRegistry* code_object_registry_
@@ -428,14 +651,12 @@ class MemoryChunk {
// Maximum number of nested code memory modification scopes.
static const int kMaxWriteUnprotectCounter = 3;
- static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
-
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(BaseAddress(a));
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
- static MemoryChunk* FromHeapObject(const HeapObject o) {
+ static MemoryChunk* FromHeapObject(HeapObject o) {
return reinterpret_cast<MemoryChunk*>(BaseAddress(o.ptr()));
}
@@ -465,22 +686,8 @@ class MemoryChunk {
void DiscardUnusedMemory(Address addr, size_t size);
- Address address() const {
- return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
- }
-
base::Mutex* mutex() { return mutex_; }
- bool Contains(Address addr) {
- return addr >= area_start() && addr < area_end();
- }
-
- // Checks whether |addr| can be a limit of addresses in this page. It's a
- // limit if it's in the page, or if it's just after the last byte of the page.
- bool ContainsLimit(Address addr) {
- return addr >= area_start() && addr <= area_end();
- }
-
void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
concurrent_sweeping_ = state;
}
@@ -491,15 +698,17 @@ class MemoryChunk {
bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
- size_t size() const { return size_; }
- void set_size(size_t size) { size_ = size; }
-
inline Heap* heap() const {
DCHECK_NOT_NULL(heap_);
return heap_;
}
- Heap* synchronized_heap();
+#ifdef THREAD_SANITIZER
+ // Perform a dummy acquire load to tell TSAN that there is no data race in
+ // mark-bit initialization. See MemoryChunk::Initialize for the corresponding
+ // release store.
+ void SynchronizedHeapLoad();
+#endif
template <RememberedSetType type>
bool ContainsSlots() {
@@ -547,12 +756,7 @@ class MemoryChunk {
void AllocateYoungGenerationBitmap();
void ReleaseYoungGenerationBitmap();
- void AllocateMarkingBitmap();
- void ReleaseMarkingBitmap();
-
- Address area_start() { return area_start_; }
- Address area_end() { return area_end_; }
- size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
+ int FreeListsLength();
// Approximate amount of physical memory committed for this chunk.
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
@@ -596,36 +800,6 @@ class MemoryChunk {
return this->address() + (index << kTaggedSizeLog2);
}
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- void SetFlag(Flag flag) {
- if (access_mode == AccessMode::NON_ATOMIC) {
- flags_ |= flag;
- } else {
- base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
- }
- }
-
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool IsFlagSet(Flag flag) {
- return (GetFlags<access_mode>() & flag) != 0;
- }
-
- void ClearFlag(Flag flag) { flags_ &= ~flag; }
- // Set or clear multiple flags at a time. The flags in the mask are set to
- // the value in "flags", the rest retain the current value in |flags_|.
- void SetFlags(uintptr_t flags, uintptr_t mask) {
- flags_ = (flags_ & ~mask) | (flags & mask);
- }
-
- // Return all current flags.
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- uintptr_t GetFlags() {
- if (access_mode == AccessMode::NON_ATOMIC) {
- return flags_;
- } else {
- return base::AsAtomicWord::Relaxed_Load(&flags_);
- }
- }
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
@@ -653,12 +827,11 @@ class MemoryChunk {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
- bool IsFromPage() const { return (flags_ & FROM_PAGE) != 0; }
- bool IsToPage() const { return (flags_ & TO_PAGE) != 0; }
- bool IsLargePage() const { return (flags_ & LARGE_PAGE) != 0; }
-
+ bool IsFromPage() const { return IsFlagSet(FROM_PAGE); }
+ bool IsToPage() const { return IsFlagSet(TO_PAGE); }
+ bool IsLargePage() const { return IsFlagSet(LARGE_PAGE); }
bool InYoungGeneration() const {
- return (flags_ & kIsInYoungGenerationMask) != 0;
+ return (GetFlags() & kIsInYoungGenerationMask) != 0;
}
bool InNewSpace() const { return InYoungGeneration() && !IsLargePage(); }
bool InNewLargeObjectSpace() const {
@@ -667,11 +840,20 @@ class MemoryChunk {
bool InOldSpace() const;
V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
+ // Gets the chunk's owner or null if the space has been detached.
Space* owner() const { return owner_; }
void set_owner(Space* space) { owner_ = space; }
- static inline bool HasHeaderSentinel(Address slot_addr);
+ bool IsWritable() const {
+ // If this is a read-only space chunk but heap_ is non-null, it has not yet
+ // been sealed and can be written to.
+ return !InReadOnlySpace() || heap_ != nullptr;
+ }
+
+ // Gets the chunk's allocation space, potentially dealing with a null owner_
+ // (like read-only chunks have).
+ inline AllocationSpace owner_identity() const;
// Emits a memory barrier. For TSAN builds the other thread needs to perform
// MemoryChunk::synchronized_heap() to simulate the barrier.
@@ -693,14 +875,20 @@ class MemoryChunk {
CodeObjectRegistry* GetCodeObjectRegistry() { return code_object_registry_; }
+ FreeList* free_list() { return owner()->free_list(); }
+
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
Executability executable, Space* owner,
VirtualMemory reservation);
- // Should be called when memory chunk is about to be freed.
- void ReleaseAllocatedMemory();
+ // Release all memory allocated by the chunk. Should be called when memory
+ // chunk is about to be freed.
+ void ReleaseAllAllocatedMemory();
+ // Release memory allocated by the chunk, except that which is needed by
+ // read-only space chunks.
+ void ReleaseAllocatedMemoryNeededForWritableChunk();
// Sets the requested page permissions only if the write unprotect counter
// has reached 0.
@@ -719,29 +907,12 @@ class MemoryChunk {
return reinterpret_cast<ConcurrentBitmap<mode>*>(young_generation_bitmap_);
}
- size_t size_;
- uintptr_t flags_;
-
- Bitmap* marking_bitmap_;
-
// If the chunk needs to remember its memory reservation, it is stored here.
VirtualMemory reservation_;
- Heap* heap_;
-
- // This is used to distinguish the memory chunk header from the interior of a
- // large page. The memory chunk header stores here an impossible tagged
- // pointer: the tagger pointer of the page start. A field in a large object is
- // guaranteed to not contain such a pointer.
- Address header_sentinel_;
-
// The space owning this memory chunk.
std::atomic<Space*> owner_;
- // Start and end of allocatable memory on this chunk.
- Address area_start_;
- Address area_end_;
-
// Used by the incremental marker to keep track of the scanning progress in
// large objects that have a progress bar and are scanned in increments.
std::atomic<size_t> progress_bar_;
@@ -792,7 +963,7 @@ class MemoryChunk {
base::ListNode<MemoryChunk> list_node_;
- FreeListCategory* categories_[kNumberOfCategories];
+ FreeListCategory** categories_;
LocalArrayBufferTracker* local_tracker_;
@@ -807,10 +978,8 @@ class MemoryChunk {
friend class ConcurrentMarkingState;
friend class IncrementalMarkingState;
friend class MajorAtomicMarkingState;
- friend class MajorMarkingState;
friend class MajorNonAtomicMarkingState;
friend class MemoryAllocator;
- friend class MemoryChunkValidator;
friend class MinorMarkingState;
friend class MinorNonAtomicMarkingState;
friend class PagedSpace;
@@ -819,7 +988,7 @@ class MemoryChunk {
STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// -----------------------------------------------------------------------------
-// A page is a memory chunk of a size 512K. Large object pages may be larger.
+// A page is a memory chunk of a size 256K. Large object pages may be larger.
//
// The only way to get a page pointer is by calling factory methods:
// Page* p = Page::FromAddress(addr); or
@@ -840,7 +1009,7 @@ class Page : public MemoryChunk {
static Page* FromAddress(Address addr) {
return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
}
- static Page* FromHeapObject(const HeapObject o) {
+ static Page* FromHeapObject(HeapObject o) {
return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
}
@@ -873,7 +1042,7 @@ class Page : public MemoryChunk {
template <typename Callback>
inline void ForAllFreeListCategories(Callback callback) {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ for (int i = kFirstCategory; i < free_list()->number_of_categories(); i++) {
callback(categories_[i]);
}
}
@@ -884,8 +1053,8 @@ class Page : public MemoryChunk {
// Returns the address for a given offset to the this page.
Address OffsetToAddress(size_t offset) {
Address address_in_page = address() + offset;
- DCHECK_GE(address_in_page, area_start_);
- DCHECK_LT(address_in_page, area_end_);
+ DCHECK_GE(address_in_page, area_start());
+ DCHECK_LT(address_in_page, area_end());
return address_in_page;
}
@@ -963,7 +1132,7 @@ class LargePage : public MemoryChunk {
// x64 and ia32 architectures.
static const int kMaxCodePageSize = 512 * MB;
- static LargePage* FromHeapObject(const HeapObject o) {
+ static LargePage* FromHeapObject(HeapObject o) {
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
}
@@ -986,162 +1155,11 @@ class LargePage : public MemoryChunk {
friend class MemoryAllocator;
};
-
-// ----------------------------------------------------------------------------
-// Space is the abstract superclass for all allocation spaces.
-class V8_EXPORT_PRIVATE Space : public Malloced {
- public:
- Space(Heap* heap, AllocationSpace id)
- : allocation_observers_paused_(false),
- heap_(heap),
- id_(id),
- committed_(0),
- max_committed_(0) {
- external_backing_store_bytes_ =
- new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
- external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
- external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
- 0;
- CheckOffsetsAreConsistent();
- }
-
- void CheckOffsetsAreConsistent() const;
-
- static inline void MoveExternalBackingStoreBytes(
- ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
-
- virtual ~Space() {
- delete[] external_backing_store_bytes_;
- external_backing_store_bytes_ = nullptr;
- }
-
- Heap* heap() const {
- DCHECK_NOT_NULL(heap_);
- return heap_;
- }
-
- // Identity used in error reporting.
- AllocationSpace identity() { return id_; }
-
- const char* name() { return Heap::GetSpaceName(id_); }
-
- virtual void AddAllocationObserver(AllocationObserver* observer);
-
- virtual void RemoveAllocationObserver(AllocationObserver* observer);
-
- virtual void PauseAllocationObservers();
-
- virtual void ResumeAllocationObservers();
-
- virtual void StartNextInlineAllocationStep() {}
-
- void AllocationStep(int bytes_since_last, Address soon_object, int size);
-
- // Return the total amount committed memory for this space, i.e., allocatable
- // memory and page headers.
- virtual size_t CommittedMemory() { return committed_; }
-
- virtual size_t MaximumCommittedMemory() { return max_committed_; }
-
- // Returns allocated size.
- virtual size_t Size() = 0;
-
- // Returns size of objects. Can differ from the allocated size
- // (e.g. see LargeObjectSpace).
- virtual size_t SizeOfObjects() { return Size(); }
-
- // Approximate amount of physical memory committed for this space.
- virtual size_t CommittedPhysicalMemory() = 0;
-
- // Return the available bytes without growing.
- virtual size_t Available() = 0;
-
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (id_ == CODE_SPACE) {
- return RoundDown(size, kCodeAlignment);
- } else {
- return RoundDown(size, kTaggedSize);
- }
- }
-
- virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
-
- void AccountCommitted(size_t bytes) {
- DCHECK_GE(committed_ + bytes, committed_);
- committed_ += bytes;
- if (committed_ > max_committed_) {
- max_committed_ = committed_;
- }
- }
-
- void AccountUncommitted(size_t bytes) {
- DCHECK_GE(committed_, committed_ - bytes);
- committed_ -= bytes;
- }
-
- inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
- size_t amount);
-
- inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
- size_t amount);
-
- // Returns amount of off-heap memory in-use by objects in this Space.
- virtual size_t ExternalBackingStoreBytes(
- ExternalBackingStoreType type) const {
- return external_backing_store_bytes_[type];
- }
-
- void* GetRandomMmapAddr();
-
- MemoryChunk* first_page() { return memory_chunk_list_.front(); }
- MemoryChunk* last_page() { return memory_chunk_list_.back(); }
-
- base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
-
-#ifdef DEBUG
- virtual void Print() = 0;
-#endif
-
- protected:
- intptr_t GetNextInlineAllocationStepSize();
- bool AllocationObserversActive() {
- return !allocation_observers_paused_ && !allocation_observers_.empty();
- }
-
- void DetachFromHeap() { heap_ = nullptr; }
-
- std::vector<AllocationObserver*> allocation_observers_;
-
- // The List manages the pages that belong to the given space.
- base::List<MemoryChunk> memory_chunk_list_;
-
- // Tracks off-heap memory used by this space.
- std::atomic<size_t>* external_backing_store_bytes_;
-
- private:
- static const intptr_t kIdOffset = 9 * kSystemPointerSize;
-
- bool allocation_observers_paused_;
- Heap* heap_;
- AllocationSpace id_;
-
- // Keeps track of committed memory in a space.
- size_t committed_;
- size_t max_committed_;
-
- DISALLOW_COPY_AND_ASSIGN(Space);
-};
-
-class MemoryChunkValidator {
- // Computed offsets should match the compiler generated ones.
- STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
-
- // Validate our estimates on the header size.
- STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
- STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
- STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
-};
-
+// Validate our estimates on the header size.
+STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
// The process-wide singleton that keeps track of code range regions with the
// intention to reuse free code range regions as a workaround for CFG memory
@@ -1205,7 +1223,7 @@ class MemoryAllocator {
chunk = GetMemoryChunkSafe<kRegular>();
if (chunk != nullptr) {
// For stolen chunks we need to manually free any allocated memory.
- chunk->ReleaseAllocatedMemory();
+ chunk->ReleaseAllAllocatedMemory();
}
}
return chunk;
@@ -1597,17 +1615,17 @@ class PageRange {
// -----------------------------------------------------------------------------
// Heap object iterator in new/old/map spaces.
//
-// A HeapObjectIterator iterates objects from the bottom of the given space
-// to its top or from the bottom of the given page to its top.
+// A PagedSpaceObjectIterator iterates objects from the bottom of the given
+// space to its top or from the bottom of the given page to its top.
//
// If objects are allocated in the page during iteration the iterator may
// or may not iterate over those objects. The caller must create a new
// iterator in order to be sure to visit these new objects.
-class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
+class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
public:
// Creates a new object iterator in a given space.
- explicit HeapObjectIterator(PagedSpace* space);
- explicit HeapObjectIterator(Page* page);
+ explicit PagedSpaceObjectIterator(PagedSpace* space);
+ explicit PagedSpaceObjectIterator(Page* page);
// Advance to the next object, skipping free spaces and other fillers and
// skipping the special garbage section of which there is one per space.
@@ -1629,7 +1647,6 @@ class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
PageRange::iterator current_page_;
};
-
// -----------------------------------------------------------------------------
// A space has a circular list of pages. The next page can be accessed via
// Page::next_page() call.
@@ -1761,13 +1778,6 @@ class AllocationStats {
#endif
};
-// A free list maintaining free blocks of memory. The free list is organized in
-// a way to encourage objects allocated around the same time to be near each
-// other. The normal way to allocate is intended to be by bumping a 'top'
-// pointer until it hits a 'limit' pointer. When the limit is hit we need to
-// find a new space to allocate from. This is done with the free list, which is
-// divided up into rough categories to cut down on waste. Having finer
-// categories would scatter allocation more.
// The free list is organized in categories as follows:
// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
@@ -1782,11 +1792,9 @@ class AllocationStats {
// words in size.
// At least 16384 words (huge): This list is for objects of 2048 words or
// larger. Empty pages are also added to this list.
-class FreeList {
+class V8_EXPORT_PRIVATE FreeListLegacy : public FreeList {
public:
- // This method returns how much memory can be allocated after freeing
- // maximum_freed memory.
- static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
+ size_t GuaranteedAllocatable(size_t maximum_freed) override {
if (maximum_freed <= kTiniestListMax) {
// Since we are not iterating over all list entries, we cannot guarantee
// that we can find the maximum freed block in that free list.
@@ -1803,7 +1811,50 @@ class FreeList {
return maximum_freed;
}
- static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
+ Page* GetPageForSize(size_t size_in_bytes) override {
+ const int minimum_category =
+ static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+ Page* page = GetPageForCategoryType(kHuge);
+ if (!page && static_cast<int>(kLarge) >= minimum_category)
+ page = GetPageForCategoryType(kLarge);
+ if (!page && static_cast<int>(kMedium) >= minimum_category)
+ page = GetPageForCategoryType(kMedium);
+ if (!page && static_cast<int>(kSmall) >= minimum_category)
+ page = GetPageForCategoryType(kSmall);
+ if (!page && static_cast<int>(kTiny) >= minimum_category)
+ page = GetPageForCategoryType(kTiny);
+ if (!page && static_cast<int>(kTiniest) >= minimum_category)
+ page = GetPageForCategoryType(kTiniest);
+ return page;
+ }
+
+ FreeListLegacy();
+ ~FreeListLegacy();
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) override;
+
+ private:
+ enum { kTiniest, kTiny, kSmall, kMedium, kLarge, kHuge };
+
+ static const size_t kMinBlockSize = 3 * kTaggedSize;
+
+ // This is a conservative upper bound. The actual maximum block size takes
+ // padding and alignment of data and code pages into account.
+ static const size_t kMaxBlockSize = Page::kPageSize;
+
+ static const size_t kTiniestListMax = 0xa * kTaggedSize;
+ static const size_t kTinyListMax = 0x1f * kTaggedSize;
+ static const size_t kSmallListMax = 0xff * kTaggedSize;
+ static const size_t kMediumListMax = 0x7ff * kTaggedSize;
+ static const size_t kLargeListMax = 0x1fff * kTaggedSize;
+ static const size_t kTinyAllocationMax = kTiniestListMax;
+ static const size_t kSmallAllocationMax = kTinyListMax;
+ static const size_t kMediumAllocationMax = kSmallListMax;
+ static const size_t kLargeAllocationMax = kMediumListMax;
+
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
if (size_in_bytes <= kTiniestListMax) {
return kTiniest;
} else if (size_in_bytes <= kTinyListMax) {
@@ -1818,152 +1869,145 @@ class FreeList {
return kHuge;
}
- FreeList();
-
- // Adds a node on the free list. The block of size {size_in_bytes} starting
- // at {start} is placed on the free list. The return value is the number of
- // bytes that were not added to the free list, because they freed memory block
- // was too small. Bookkeeping information will be written to the block, i.e.,
- // its contents will be destroyed. The start address should be word aligned,
- // and the size should be a non-zero multiple of the word size.
- size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
-
- // Allocates a free space node frome the free list of at least size_in_bytes
- // bytes. Returns the actual node size in node_size which can be bigger than
- // size_in_bytes. This method returns null if the allocation request cannot be
- // handled by the free list.
- V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
- size_t* node_size);
-
- // Clear the free list.
- void Reset();
-
- // Return the number of bytes available on the free list.
- size_t Available() {
- size_t available = 0;
- ForAllFreeListCategories([&available](FreeListCategory* category) {
- available += category->available();
- });
- return available;
- }
-
- bool IsEmpty() {
- bool empty = true;
- ForAllFreeListCategories([&empty](FreeListCategory* category) {
- if (!category->is_empty()) empty = false;
- });
- return empty;
+ // Returns the category to be used to allocate |size_in_bytes| in the fast
+ // path. The tiny categories are not used for fast allocation.
+ FreeListCategoryType SelectFastAllocationFreeListCategoryType(
+ size_t size_in_bytes) {
+ if (size_in_bytes <= kSmallAllocationMax) {
+ return kSmall;
+ } else if (size_in_bytes <= kMediumAllocationMax) {
+ return kMedium;
+ } else if (size_in_bytes <= kLargeAllocationMax) {
+ return kLarge;
+ }
+ return kHuge;
}
- // Used after booting the VM.
- void RepairLists(Heap* heap);
-
- V8_EXPORT_PRIVATE size_t EvictFreeListItems(Page* page);
- bool ContainsPageFreeListItems(Page* page);
-
- size_t wasted_bytes() { return wasted_bytes_; }
+ friend class FreeListCategory;
+ friend class heap::HeapTester;
+};
- template <typename Callback>
- void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
- FreeListCategory* current = categories_[type];
- while (current != nullptr) {
- FreeListCategory* next = current->next();
- callback(current);
- current = next;
+// Inspired by FreeListLegacy.
+// Only has 3 categories: Medium, Large and Huge.
+// Any block that would have belong to tiniest, tiny or small in FreeListLegacy
+// is considered wasted.
+// Allocation is done only in Huge, Medium and Large (in that order),
+// using a first-fit strategy (only the first block of each freelist is ever
+// considered though). Performances is supposed to be better than
+// FreeListLegacy, but memory usage should be higher (because fragmentation will
+// probably be higher).
+class V8_EXPORT_PRIVATE FreeListFastAlloc : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) override {
+ if (maximum_freed <= kMediumListMax) {
+ // Since we are not iterating over all list entries, we cannot guarantee
+ // that we can find the maximum freed block in that free list.
+ return 0;
+ } else if (maximum_freed <= kLargeListMax) {
+ return kLargeAllocationMax;
}
+ return kHugeAllocationMax;
}
- template <typename Callback>
- void ForAllFreeListCategories(Callback callback) {
- for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
- ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
- }
+ Page* GetPageForSize(size_t size_in_bytes) override {
+ const int minimum_category =
+ static_cast<int>(SelectFreeListCategoryType(size_in_bytes));
+ Page* page = GetPageForCategoryType(kHuge);
+ if (!page && static_cast<int>(kLarge) >= minimum_category)
+ page = GetPageForCategoryType(kLarge);
+ if (!page && static_cast<int>(kMedium) >= minimum_category)
+ page = GetPageForCategoryType(kMedium);
+ return page;
}
- bool AddCategory(FreeListCategory* category);
- V8_EXPORT_PRIVATE void RemoveCategory(FreeListCategory* category);
- void PrintCategories(FreeListCategoryType type);
-
- // Returns a page containing an entry for a given type, or nullptr otherwise.
- inline Page* GetPageForCategoryType(FreeListCategoryType type);
+ FreeListFastAlloc();
+ ~FreeListFastAlloc();
-#ifdef DEBUG
- size_t SumFreeLists();
- bool IsVeryLong();
-#endif
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) override;
private:
- class FreeListCategoryIterator {
- public:
- FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
- : current_(free_list->categories_[type]) {}
+ enum { kMedium, kLarge, kHuge };
- bool HasNext() { return current_ != nullptr; }
-
- FreeListCategory* Next() {
- DCHECK(HasNext());
- FreeListCategory* tmp = current_;
- current_ = current_->next();
- return tmp;
- }
-
- private:
- FreeListCategory* current_;
- };
-
- // The size range of blocks, in bytes.
- static const size_t kMinBlockSize = 3 * kTaggedSize;
+ static const size_t kMinBlockSize = 0xff * kTaggedSize;
// This is a conservative upper bound. The actual maximum block size takes
// padding and alignment of data and code pages into account.
static const size_t kMaxBlockSize = Page::kPageSize;
- static const size_t kTiniestListMax = 0xa * kTaggedSize;
- static const size_t kTinyListMax = 0x1f * kTaggedSize;
- static const size_t kSmallListMax = 0xff * kTaggedSize;
static const size_t kMediumListMax = 0x7ff * kTaggedSize;
static const size_t kLargeListMax = 0x1fff * kTaggedSize;
- static const size_t kTinyAllocationMax = kTiniestListMax;
- static const size_t kSmallAllocationMax = kTinyListMax;
- static const size_t kMediumAllocationMax = kSmallListMax;
+ static const size_t kMediumAllocationMax = kMinBlockSize;
static const size_t kLargeAllocationMax = kMediumListMax;
+ static const size_t kHugeAllocationMax = kLargeListMax;
- // Walks all available categories for a given |type| and tries to retrieve
- // a node. Returns nullptr if the category is empty.
- FreeSpace FindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size);
-
- // Tries to retrieve a node from the first category in a given |type|.
- // Returns nullptr if the category is empty or the top entry is smaller
- // than minimum_size.
- FreeSpace TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
- size_t* node_size);
-
- // Searches a given |type| for a node of at least |minimum_size|.
- FreeSpace SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
- size_t minimum_size);
-
- // The tiny categories are not used for fast allocation.
- FreeListCategoryType SelectFastAllocationFreeListCategoryType(
- size_t size_in_bytes) {
- if (size_in_bytes <= kSmallAllocationMax) {
- return kSmall;
- } else if (size_in_bytes <= kMediumAllocationMax) {
+ // Returns the category used to hold an object of size |size_in_bytes|.
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
+ if (size_in_bytes <= kMediumListMax) {
return kMedium;
- } else if (size_in_bytes <= kLargeAllocationMax) {
+ } else if (size_in_bytes <= kLargeListMax) {
return kLarge;
}
return kHuge;
}
- FreeListCategory* top(FreeListCategoryType type) const {
- return categories_[type];
+ Page* GetPageForCategoryType(FreeListCategoryType type) {
+ return top(type) ? top(type)->page() : nullptr;
}
+};
- std::atomic<size_t> wasted_bytes_;
- FreeListCategory* categories_[kNumberOfCategories];
+// Use 49 Freelists: on per size between 24 and 256, and then a few ones for
+// larger sizes. See the variable |categories_max| for the size of each
+// Freelist. Allocation is done using a best-fit strategy (considering only the
+// first element of each category though).
+// Performances are expected to be worst than FreeListLegacy, but memory
+// consumption should be lower (since fragmentation should be lower).
+class V8_EXPORT_PRIVATE FreeListMany : public FreeList {
+ public:
+ size_t GuaranteedAllocatable(size_t maximum_freed) override;
- friend class FreeListCategory;
+ Page* GetPageForSize(size_t size_in_bytes) override;
+
+ FreeListMany();
+ ~FreeListMany();
+
+ V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes,
+ size_t* node_size) override;
+
+ private:
+ static const size_t kMinBlockSize = 3 * kTaggedSize;
+
+ // This is a conservative upper bound. The actual maximum block size takes
+ // padding and alignment of data and code pages into account.
+ static const size_t kMaxBlockSize = Page::kPageSize;
+
+ // Categories boundaries generated with:
+ // perl -E '
+ // @cat = map {$_*8} 3..32, 48, 64;
+ // while ($cat[-1] <= 32768) {
+ // push @cat, $cat[-1]+$cat[-3], $cat[-1]*2
+ // }
+ // push @cat, 4080, 4088;
+ // @cat = sort { $a <=> $b } @cat;
+ // push @cat, "Page::kPageSize";
+ // say join ", ", @cat;
+ // say "\n", scalar @cat'
+ // Note the special case for 4080 and 4088 bytes: experiments have shown that
+ // this category classes are more used than others of similar sizes
+ static const int kNumberOfCategories = 49;
+ static const size_t categories_max[kNumberOfCategories];
+
+ // Return the smallest category that could hold |size_in_bytes| bytes.
+ FreeListCategoryType SelectFreeListCategoryType(
+ size_t size_in_bytes) override {
+ for (int cat = kFirstCategory; cat < last_category_; cat++) {
+ if (size_in_bytes <= categories_max[cat]) {
+ return cat;
+ }
+ }
+ return last_category_;
+ }
};
// LocalAllocationBuffer represents a linear allocation area that is created
@@ -2029,8 +2073,8 @@ class LocalAllocationBuffer {
class SpaceWithLinearArea : public Space {
public:
- SpaceWithLinearArea(Heap* heap, AllocationSpace id)
- : Space(heap, id), top_on_previous_step_(0) {
+ SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list)
+ : Space(heap, id, free_list), top_on_previous_step_(0) {
allocation_info_.Reset(kNullAddress, kNullAddress);
}
@@ -2091,7 +2135,8 @@ class V8_EXPORT_PRIVATE PagedSpace
static const size_t kCompactionMemoryWanted = 500 * KB;
// Creates a space with an id.
- PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
+ PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
+ FreeList* free_list);
~PagedSpace() override { TearDown(); }
@@ -2119,14 +2164,14 @@ class V8_EXPORT_PRIVATE PagedSpace
// to the available and wasted totals. The free list is cleared as well.
void ClearAllocatorState() {
accounting_stats_.ClearSize();
- free_list_.Reset();
+ free_list_->Reset();
}
// Available bytes without growing. These are the bytes on the free list.
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here.
- size_t Available() override { return free_list_.Available(); }
+ size_t Available() override { return free_list_->Available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the
@@ -2140,7 +2185,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation.
- virtual size_t Waste() { return free_list_.wasted_bytes(); }
+ virtual size_t Waste() { return free_list_->wasted_bytes(); }
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
@@ -2173,7 +2218,7 @@ class V8_EXPORT_PRIVATE PagedSpace
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
size_t AccountedFree(Address start, size_t size_in_bytes) {
- size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
+ size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
Page* page = Page::FromAddress(start);
accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
DCHECK_GE(size_in_bytes, wasted);
@@ -2181,7 +2226,7 @@ class V8_EXPORT_PRIVATE PagedSpace
}
size_t UnaccountedFree(Address start, size_t size_in_bytes) {
- size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
+ size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
@@ -2211,7 +2256,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void RefineAllocatedBytesAfterSweeping(Page* page);
- Page* InitializePage(MemoryChunk* chunk, Executability executable);
+ Page* InitializePage(MemoryChunk* chunk);
void ReleasePage(Page* page);
@@ -2275,8 +2320,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// sweeper.
virtual void RefillFreeList();
- FreeList* free_list() { return &free_list_; }
-
base::Mutex* mutex() { return &space_mutex_; }
inline void UnlinkFreeListCategories(Page* page);
@@ -2368,9 +2411,6 @@ class V8_EXPORT_PRIVATE PagedSpace
// Accounting information for this space.
AllocationStats accounting_stats_;
- // The space's free list.
- FreeList free_list_;
-
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
@@ -2396,7 +2436,7 @@ class SemiSpace : public Space {
static void Swap(SemiSpace* from, SemiSpace* to);
SemiSpace(Heap* heap, SemiSpaceId semispace)
- : Space(heap, NEW_SPACE),
+ : Space(heap, NEW_SPACE, new NoFreeList()),
current_capacity_(0),
maximum_capacity_(0),
minimum_capacity_(0),
@@ -2465,7 +2505,7 @@ class SemiSpace : public Space {
void RemovePage(Page* page);
void PrependPage(Page* page);
- Page* InitializePage(MemoryChunk* chunk, Executability executable);
+ Page* InitializePage(MemoryChunk* chunk);
// Age mark accessors.
Address age_mark() { return age_mark_; }
@@ -2552,19 +2592,18 @@ class SemiSpace : public Space {
int pages_used_;
friend class NewSpace;
- friend class SemiSpaceIterator;
+ friend class SemiSpaceObjectIterator;
};
-
-// A SemiSpaceIterator is an ObjectIterator that iterates over the active
+// A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
// semispace of the heap's new space. It iterates over the objects in the
// semispace from a given start address (defaulting to the bottom of the
// semispace) to the top of the semispace. New objects allocated after the
// iterator is created are not iterated.
-class SemiSpaceIterator : public ObjectIterator {
+class SemiSpaceObjectIterator : public ObjectIterator {
public:
// Create an iterator over the allocated objects in the given to-space.
- explicit SemiSpaceIterator(NewSpace* space);
+ explicit SemiSpaceObjectIterator(NewSpace* space);
inline HeapObject Next() override;
@@ -2821,7 +2860,7 @@ class V8_EXPORT_PRIVATE NewSpace
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
bool SupportsInlineAllocation() override { return true; }
- friend class SemiSpaceIterator;
+ friend class SemiSpaceObjectIterator;
};
class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
@@ -2840,7 +2879,7 @@ class V8_EXPORT_PRIVATE PauseAllocationObserversScope {
class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
public:
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
- : PagedSpace(heap, id, executable) {}
+ : PagedSpace(heap, id, executable, FreeList::CreateFreeList()) {}
bool is_local() override { return true; }
@@ -2886,7 +2925,9 @@ class OldSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
- explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
+ explicit OldSpace(Heap* heap)
+ : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
+ FreeList::CreateFreeList()) {}
static bool IsAtPageStart(Address addr) {
return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
@@ -2901,7 +2942,8 @@ class CodeSpace : public PagedSpace {
public:
// Creates an old space object. The constructor does not allocate pages
// from OS.
- explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
+ explicit CodeSpace(Heap* heap)
+ : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
};
// For contiguous spaces, top should be in the space (or at the end) and limit
@@ -2918,7 +2960,9 @@ class CodeSpace : public PagedSpace {
class MapSpace : public PagedSpace {
public:
// Creates a map space object.
- explicit MapSpace(Heap* heap) : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE) {}
+ explicit MapSpace(Heap* heap)
+ : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE,
+ FreeList::CreateFreeList()) {}
int RoundSizeDownToObjectAlignment(int size) override {
if (base::bits::IsPowerOfTwo(Map::kSize)) {
@@ -2946,6 +2990,9 @@ class ReadOnlySpace : public PagedSpace {
bool writable() const { return !is_marked_read_only_; }
+ bool Contains(Address a) = delete;
+ bool Contains(Object o) = delete;
+
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
@@ -3056,7 +3103,7 @@ class LargeObjectSpace : public Space {
size_t objects_size_; // size of objects
private:
- friend class LargeObjectIterator;
+ friend class LargeObjectSpaceObjectIterator;
};
class NewLargeObjectSpace : public LargeObjectSpace {
@@ -3112,9 +3159,9 @@ class CodeLargeObjectSpace : public LargeObjectSpace {
std::unordered_map<Address, LargePage*> chunk_map_;
};
-class LargeObjectIterator : public ObjectIterator {
+class LargeObjectSpaceObjectIterator : public ObjectIterator {
public:
- explicit LargeObjectIterator(LargeObjectSpace* space);
+ explicit LargeObjectSpaceObjectIterator(LargeObjectSpace* space);
HeapObject Next() override;
diff --git a/chromium/v8/src/heap/store-buffer.cc b/chromium/v8/src/heap/store-buffer.cc
index e59e72d3a64..7d0dcfc3707 100644
--- a/chromium/v8/src/heap/store-buffer.cc
+++ b/chromium/v8/src/heap/store-buffer.cc
@@ -104,16 +104,7 @@ void StoreBuffer::InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
void StoreBuffer::DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
Address start, Address end) {
- // In GC the store buffer has to be empty at any time.
- DCHECK(store_buffer->Empty());
- DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
- Page* page = Page::FromAddress(start);
- if (end) {
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
- SlotSet::PREFREE_EMPTY_BUCKETS);
- } else {
- RememberedSet<OLD_TO_NEW>::Remove(page, start);
- }
+ UNREACHABLE();
}
void StoreBuffer::InsertDuringGarbageCollection(StoreBuffer* store_buffer,
diff --git a/chromium/v8/src/heap/stress-marking-observer.cc b/chromium/v8/src/heap/stress-marking-observer.cc
index 091f279a781..bb7720e1188 100644
--- a/chromium/v8/src/heap/stress-marking-observer.cc
+++ b/chromium/v8/src/heap/stress-marking-observer.cc
@@ -9,14 +9,14 @@ namespace v8 {
namespace internal {
// TODO(majeski): meaningful step_size
-StressMarkingObserver::StressMarkingObserver(Heap& heap)
+StressMarkingObserver::StressMarkingObserver(Heap* heap)
: AllocationObserver(64), heap_(heap) {}
void StressMarkingObserver::Step(int bytes_allocated, Address soon_object,
size_t size) {
- heap_.StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
- kNoGCCallbackFlags);
- heap_.incremental_marking()->EnsureBlackAllocated(soon_object, size);
+ heap_->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
+ kNoGCCallbackFlags);
+ heap_->incremental_marking()->EnsureBlackAllocated(soon_object, size);
}
} // namespace internal
diff --git a/chromium/v8/src/heap/stress-marking-observer.h b/chromium/v8/src/heap/stress-marking-observer.h
index 37ebb821974..5736ba9289b 100644
--- a/chromium/v8/src/heap/stress-marking-observer.h
+++ b/chromium/v8/src/heap/stress-marking-observer.h
@@ -12,12 +12,12 @@ namespace internal {
class StressMarkingObserver : public AllocationObserver {
public:
- explicit StressMarkingObserver(Heap& heap);
+ explicit StressMarkingObserver(Heap* heap);
void Step(int bytes_allocated, Address soon_object, size_t size) override;
private:
- Heap& heap_;
+ Heap* heap_;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/stress-scavenge-observer.cc b/chromium/v8/src/heap/stress-scavenge-observer.cc
index b91825c38b8..5aa3419ed77 100644
--- a/chromium/v8/src/heap/stress-scavenge-observer.cc
+++ b/chromium/v8/src/heap/stress-scavenge-observer.cc
@@ -13,7 +13,7 @@ namespace v8 {
namespace internal {
// TODO(majeski): meaningful step_size
-StressScavengeObserver::StressScavengeObserver(Heap& heap)
+StressScavengeObserver::StressScavengeObserver(Heap* heap)
: AllocationObserver(64),
heap_(heap),
has_requested_gc_(false),
@@ -21,22 +21,22 @@ StressScavengeObserver::StressScavengeObserver(Heap& heap)
limit_percentage_ = NextLimit();
if (FLAG_trace_stress_scavenge && !FLAG_fuzzer_gc_analysis) {
- heap_.isolate()->PrintWithTimestamp(
+ heap_->isolate()->PrintWithTimestamp(
"[StressScavenge] %d%% is the new limit\n", limit_percentage_);
}
}
void StressScavengeObserver::Step(int bytes_allocated, Address soon_object,
size_t size) {
- if (has_requested_gc_ || heap_.new_space()->Capacity() == 0) {
+ if (has_requested_gc_ || heap_->new_space()->Capacity() == 0) {
return;
}
double current_percent =
- heap_.new_space()->Size() * 100.0 / heap_.new_space()->Capacity();
+ heap_->new_space()->Size() * 100.0 / heap_->new_space()->Capacity();
if (FLAG_trace_stress_scavenge) {
- heap_.isolate()->PrintWithTimestamp(
+ heap_->isolate()->PrintWithTimestamp(
"[Scavenge] %.2lf%% of the new space capacity reached\n",
current_percent);
}
@@ -49,11 +49,11 @@ void StressScavengeObserver::Step(int bytes_allocated, Address soon_object,
if (static_cast<int>(current_percent) >= limit_percentage_) {
if (FLAG_trace_stress_scavenge) {
- heap_.isolate()->PrintWithTimestamp("[Scavenge] GC requested\n");
+ heap_->isolate()->PrintWithTimestamp("[Scavenge] GC requested\n");
}
has_requested_gc_ = true;
- heap_.isolate()->stack_guard()->RequestGC();
+ heap_->isolate()->stack_guard()->RequestGC();
}
}
@@ -63,15 +63,15 @@ bool StressScavengeObserver::HasRequestedGC() const {
void StressScavengeObserver::RequestedGCDone() {
double current_percent =
- heap_.new_space()->Size() * 100.0 / heap_.new_space()->Capacity();
+ heap_->new_space()->Size() * 100.0 / heap_->new_space()->Capacity();
limit_percentage_ = NextLimit(static_cast<int>(current_percent));
if (FLAG_trace_stress_scavenge) {
- heap_.isolate()->PrintWithTimestamp(
+ heap_->isolate()->PrintWithTimestamp(
"[Scavenge] %.2lf%% of the new space capacity reached\n",
current_percent);
- heap_.isolate()->PrintWithTimestamp("[Scavenge] %d%% is the new limit\n",
- limit_percentage_);
+ heap_->isolate()->PrintWithTimestamp("[Scavenge] %d%% is the new limit\n",
+ limit_percentage_);
}
has_requested_gc_ = false;
@@ -87,7 +87,7 @@ int StressScavengeObserver::NextLimit(int min) {
return max;
}
- return min + heap_.isolate()->fuzzer_rng()->NextInt(max - min + 1);
+ return min + heap_->isolate()->fuzzer_rng()->NextInt(max - min + 1);
}
} // namespace internal
diff --git a/chromium/v8/src/heap/stress-scavenge-observer.h b/chromium/v8/src/heap/stress-scavenge-observer.h
index b39b2eac598..4996323b755 100644
--- a/chromium/v8/src/heap/stress-scavenge-observer.h
+++ b/chromium/v8/src/heap/stress-scavenge-observer.h
@@ -12,7 +12,7 @@ namespace internal {
class StressScavengeObserver : public AllocationObserver {
public:
- explicit StressScavengeObserver(Heap& heap);
+ explicit StressScavengeObserver(Heap* heap);
void Step(int bytes_allocated, Address soon_object, size_t size) override;
@@ -24,7 +24,7 @@ class StressScavengeObserver : public AllocationObserver {
double MaxNewSpaceSizeReached() const;
private:
- Heap& heap_;
+ Heap* heap_;
int limit_percentage_;
bool has_requested_gc_;
diff --git a/chromium/v8/src/heap/sweeper.cc b/chromium/v8/src/heap/sweeper.cc
index 8f7b55bf2b9..cbb7d717b07 100644
--- a/chromium/v8/src/heap/sweeper.cc
+++ b/chromium/v8/src/heap/sweeper.cc
@@ -184,7 +184,7 @@ void Sweeper::StartSweeperTasks() {
void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
if (!page->SweepingDone()) {
- ParallelSweepPage(page, page->owner()->identity());
+ ParallelSweepPage(page, page->owner_identity());
if (!page->SweepingDone()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper
@@ -370,7 +370,9 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
p->set_concurrent_sweeping_state(Page::kSweepingDone);
if (code_object_registry) code_object_registry->Finalize();
if (free_list_mode == IGNORE_FREE_LIST) return 0;
- return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
+
+ return static_cast<int>(
+ p->free_list()->GuaranteedAllocatable(max_freed_bytes));
}
void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
@@ -500,7 +502,7 @@ Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
}
void Sweeper::EnsurePageIsIterable(Page* page) {
- AllocationSpace space = page->owner()->identity();
+ AllocationSpace space = page->owner_identity();
if (IsValidSweepingSpace(space)) {
SweepOrWaitUntilSweepingCompleted(page);
} else {
@@ -573,7 +575,7 @@ void Sweeper::AddPageForIterability(Page* page) {
DCHECK(sweeping_in_progress_);
DCHECK(iterability_in_progress_);
DCHECK(!iterability_task_started_);
- DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
+ DCHECK(IsValidIterabilitySpace(page->owner_identity()));
DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
iterability_list_.push_back(page);
@@ -581,7 +583,7 @@ void Sweeper::AddPageForIterability(Page* page) {
}
void Sweeper::MakeIterable(Page* page) {
- DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
+ DCHECK(IsValidIterabilitySpace(page->owner_identity()));
const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);