summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap/heap.cc
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2018-01-24 20:16:06 +0100
committerMyles Borins <mylesborins@google.com>2018-01-24 15:02:20 -0800
commit4c4af643e5042d615a60c6bbc05aee9d81b903e5 (patch)
tree3fb0a97988fe4439ae3ae06f26915d1dcf8cab92 /deps/v8/src/heap/heap.cc
parentfa9f31a4fda5a3782c652e56e394465805ebb50f (diff)
downloadnode-new-4c4af643e5042d615a60c6bbc05aee9d81b903e5.tar.gz
deps: update V8 to 6.4.388.40
PR-URL: https://github.com/nodejs/node/pull/17489 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/src/heap/heap.cc')
-rw-r--r--deps/v8/src/heap/heap.cc755
1 files changed, 495 insertions, 260 deletions
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 458c6c7e09..d90f086be4 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -15,7 +15,7 @@
#include "src/base/once.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
-#include "src/codegen.h"
+#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/conversions.h"
@@ -23,6 +23,7 @@
#include "src/deoptimizer.h"
#include "src/feedback-vector.h"
#include "src/global-handles.h"
+#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/barrier.h"
#include "src/heap/code-stats.h"
@@ -42,6 +43,7 @@
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
+#include "src/heap/sweeper.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/object-macros.h"
#include "src/objects/shared-function-info.h"
@@ -82,16 +84,6 @@ void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
}
-void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
- DCHECK_EQ(Smi::kZero, getter_stub_deopt_pc_offset());
- set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
-void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
- DCHECK_EQ(Smi::kZero, setter_stub_deopt_pc_offset());
- set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-}
-
void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
@@ -170,10 +162,12 @@ Heap::Heap()
contexts_disposed_(0),
number_of_disposed_maps_(0),
new_space_(nullptr),
- old_space_(NULL),
- code_space_(NULL),
- map_space_(NULL),
- lo_space_(NULL),
+ old_space_(nullptr),
+ code_space_(nullptr),
+ map_space_(nullptr),
+ lo_space_(nullptr),
+ write_protect_code_memory_(false),
+ code_space_memory_modification_scope_depth_(0),
gc_state_(NOT_IN_GC),
gc_post_processing_depth_(0),
allocations_count_(0),
@@ -201,6 +195,7 @@ Heap::Heap()
last_gc_time_(0.0),
mark_compact_collector_(nullptr),
minor_mark_compact_collector_(nullptr),
+ array_buffer_collector_(nullptr),
memory_allocator_(nullptr),
store_buffer_(nullptr),
incremental_marking_(nullptr),
@@ -225,7 +220,7 @@ Heap::Heap()
external_string_table_(this),
gc_callbacks_depth_(0),
deserialization_complete_(false),
- strong_roots_list_(NULL),
+ strong_roots_list_(nullptr),
heap_iterator_depth_(0),
local_embedder_heap_tracer_(nullptr),
fast_promotion_mode_(false),
@@ -237,12 +232,12 @@ Heap::Heap()
DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
- set_native_contexts_list(NULL);
+ set_native_contexts_list(nullptr);
set_allocation_sites_list(Smi::kZero);
set_encountered_weak_collections(Smi::kZero);
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
- RememberUnmappedPage(NULL, false);
+ RememberUnmappedPage(nullptr, false);
}
size_t Heap::Capacity() {
@@ -253,7 +248,6 @@ size_t Heap::Capacity() {
size_t Heap::OldGenerationCapacity() {
if (!HasBeenSetUp()) return 0;
-
return old_space_->Capacity() + code_space_->Capacity() +
map_space_->Capacity() + lo_space_->SizeOfObjects();
}
@@ -303,16 +297,24 @@ size_t Heap::Available() {
size_t total = 0;
AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
total += space->Available();
}
return total;
}
+bool Heap::CanExpandOldGeneration(size_t size) {
+ if (force_oom_) return false;
+ if (OldGenerationCapacity() + size > MaxOldGenerationSize()) return false;
+ // The OldGenerationCapacity does not account compaction spaces used
+ // during evacuation. Ensure that expanding the old generation does push
+ // the total allocated memory size over the maximum heap size.
+ return memory_allocator()->Size() + size <= MaxReserved();
+}
bool Heap::HasBeenSetUp() {
- return old_space_ != NULL && code_space_ != NULL && map_space_ != NULL &&
- lo_space_ != NULL;
+ return old_space_ != nullptr && code_space_ != nullptr &&
+ map_space_ != nullptr && lo_space_ != nullptr;
}
@@ -336,16 +338,8 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return MARK_COMPACTOR;
}
- // Is there enough space left in OLD to guarantee that a scavenge can
- // succeed?
- //
- // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
- // for object promotion. It counts only the bytes that the memory
- // allocator has not yet allocated from the OS and assigned to any space,
- // and does not count available bytes already in the old space or code
- // space. Undercounting is safe---we may get an unrequested full GC when
- // a scavenge would have succeeded.
- if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
+ // Over-estimate the new space size using capacity to allow some slack.
+ if (!CanExpandOldGeneration(new_space_->TotalCapacity())) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
@@ -354,7 +348,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
}
// Default
- *reason = NULL;
+ *reason = nullptr;
return YoungGenerationCollector();
}
@@ -469,21 +463,30 @@ void Heap::ReportStatisticsAfterGC() {
}
}
-void Heap::AddRetainingPathTarget(Handle<HeapObject> object) {
+void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
+ RetainingPathOption option) {
if (!FLAG_track_retaining_path) {
PrintF("Retaining path tracking requires --trace-retaining-path\n");
} else {
+ int index = 0;
Handle<WeakFixedArray> array = WeakFixedArray::Add(
- handle(retaining_path_targets(), isolate()), object);
+ handle(retaining_path_targets(), isolate()), object, &index);
set_retaining_path_targets(*array);
+ retaining_path_target_option_[index] = option;
}
}
-bool Heap::IsRetainingPathTarget(HeapObject* object) {
- WeakFixedArray::Iterator it(retaining_path_targets());
- HeapObject* target;
- while ((target = it.Next<HeapObject>()) != nullptr) {
- if (target == object) return true;
+bool Heap::IsRetainingPathTarget(HeapObject* object,
+ RetainingPathOption* option) {
+ if (!retaining_path_targets()->IsWeakFixedArray()) return false;
+ WeakFixedArray* targets = WeakFixedArray::cast(retaining_path_targets());
+ int length = targets->Length();
+ for (int i = 0; i < length; i++) {
+ if (targets->Get(i) == object) {
+ DCHECK(retaining_path_target_option_.count(i));
+ *option = retaining_path_target_option_[i];
+ return true;
+ }
}
return false;
}
@@ -512,17 +515,23 @@ const char* RootToString(Root root) {
}
} // namespace
-void Heap::PrintRetainingPath(HeapObject* target) {
+void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
PrintF("\n\n\n");
PrintF("#################################################\n");
PrintF("Retaining path for %p:\n", static_cast<void*>(target));
HeapObject* object = target;
- std::vector<HeapObject*> retaining_path;
+ std::vector<std::pair<HeapObject*, bool>> retaining_path;
Root root = Root::kUnknown;
+ bool ephemeral = false;
while (true) {
- retaining_path.push_back(object);
- if (retainer_.count(object)) {
+ retaining_path.push_back(std::make_pair(object, ephemeral));
+ if (option == RetainingPathOption::kTrackEphemeralPath &&
+ ephemeral_retainer_.count(object)) {
+ object = ephemeral_retainer_[object];
+ ephemeral = true;
+ } else if (retainer_.count(object)) {
object = retainer_[object];
+ ephemeral = false;
} else {
if (retaining_root_.count(object)) {
root = retaining_root_[object];
@@ -531,10 +540,13 @@ void Heap::PrintRetainingPath(HeapObject* target) {
}
}
int distance = static_cast<int>(retaining_path.size());
- for (auto object : retaining_path) {
+ for (auto node : retaining_path) {
+ HeapObject* object = node.first;
+ bool ephemeral = node.second;
PrintF("\n");
PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
- PrintF("Distance from root %d: ", distance);
+ PrintF("Distance from root %d%s: ", distance,
+ ephemeral ? " (ephemeral)" : "");
object->ShortPrint();
PrintF("\n");
#ifdef OBJECT_PRINT
@@ -550,16 +562,38 @@ void Heap::PrintRetainingPath(HeapObject* target) {
}
void Heap::AddRetainer(HeapObject* retainer, HeapObject* object) {
+ if (retainer_.count(object)) return;
retainer_[object] = retainer;
- if (IsRetainingPathTarget(object)) {
- PrintRetainingPath(object);
+ RetainingPathOption option = RetainingPathOption::kDefault;
+ if (IsRetainingPathTarget(object, &option)) {
+ // Check if the retaining path was already printed in
+ // AddEphemeralRetainer().
+ if (ephemeral_retainer_.count(object) == 0 ||
+ option == RetainingPathOption::kDefault) {
+ PrintRetainingPath(object, option);
+ }
+ }
+}
+
+void Heap::AddEphemeralRetainer(HeapObject* retainer, HeapObject* object) {
+ if (ephemeral_retainer_.count(object)) return;
+ ephemeral_retainer_[object] = retainer;
+ RetainingPathOption option = RetainingPathOption::kDefault;
+ if (IsRetainingPathTarget(object, &option) &&
+ option == RetainingPathOption::kTrackEphemeralPath) {
+ // Check if the retaining path was already printed in AddRetainer().
+ if (retainer_.count(object) == 0) {
+ PrintRetainingPath(object, option);
+ }
}
}
void Heap::AddRetainingRoot(Root root, HeapObject* object) {
+ if (retaining_root_.count(object)) return;
retaining_root_[object] = root;
- if (IsRetainingPathTarget(object)) {
- PrintRetainingPath(object);
+ RetainingPathOption option = RetainingPathOption::kDefault;
+ if (IsRetainingPathTarget(object, &option)) {
+ PrintRetainingPath(object, option);
}
}
@@ -609,6 +643,7 @@ void Heap::GarbageCollectionPrologue() {
UpdateNewSpaceAllocationCounter();
if (FLAG_track_retaining_path) {
retainer_.clear();
+ ephemeral_retainer_.clear();
retaining_root_.clear();
}
}
@@ -616,7 +651,7 @@ void Heap::GarbageCollectionPrologue() {
size_t Heap::SizeOfObjects() {
size_t total = 0;
AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
total += space->SizeOfObjects();
}
return total;
@@ -641,13 +676,13 @@ const char* Heap::GetSpaceName(int idx) {
return nullptr;
}
-void Heap::SetRootCodeStubs(UnseededNumberDictionary* value) {
+void Heap::SetRootCodeStubs(NumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
}
void Heap::RepairFreeListsAfterDeserialization() {
PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != NULL;
+ for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
space->RepairFreeListsAfterDeserialization();
}
@@ -836,6 +871,17 @@ void Heap::ProcessPretenuringFeedback() {
}
}
+void Heap::InvalidateCodeEmbeddedObjects(Code* code) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
+ CodePageMemoryModificationScope modification_scope(chunk);
+ code->InvalidateEmbeddedObjects();
+}
+
+void Heap::InvalidateCodeDeoptimizationData(Code* code) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
+ CodePageMemoryModificationScope modification_scope(chunk);
+ code->set_deoptimization_data(empty_fixed_array());
+}
void Heap::DeoptMarkedAllocationSites() {
// TODO(hpayer): If iterating over the allocation sites list becomes a
@@ -1145,8 +1191,7 @@ void Heap::ReportExternalMemoryPressure() {
current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
incremental_marking()->AdvanceIncrementalMarking(
- deadline, IncrementalMarking::GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
}
}
@@ -1169,7 +1214,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate());
- const char* collector_reason = NULL;
+ const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
#ifdef DEBUG
@@ -1439,6 +1484,13 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
}
}
if (perform_gc) {
+ // We cannot perfom a GC with an uninitialized isolate. This check
+ // fails for example if the max old space size is chosen unwisely,
+ // so that we cannot allocate space to deserialize the initial heap.
+ if (!deserialization_complete_) {
+ V8::FatalProcessOutOfMemory(
+ "insufficient memory to create an Isolate");
+ }
if (space == NEW_SPACE) {
CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
} else {
@@ -1470,26 +1522,6 @@ void Heap::EnsureFromSpaceIsCommitted() {
}
-void Heap::ClearNormalizedMapCaches() {
- if (isolate_->bootstrapper()->IsActive() &&
- !incremental_marking()->IsMarking()) {
- return;
- }
-
- Object* context = native_contexts_list();
- while (!context->IsUndefined(isolate())) {
- // GC can happen when the context is not fully initialized,
- // so the cache can be undefined.
- Object* cache =
- Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
- if (!cache->IsUndefined(isolate())) {
- NormalizedMapCache::cast(cache)->Clear();
- }
- context = Context::cast(context)->next_context_link();
- }
-}
-
-
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
@@ -1542,7 +1574,7 @@ bool Heap::PerformGarbageCollection(
EnsureFromSpaceIsCommitted();
- int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
+ size_t start_new_space_size = Heap::new_space()->Size();
{
Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
@@ -1581,9 +1613,16 @@ bool Heap::PerformGarbageCollection(
ProcessPretenuringFeedback();
}
- UpdateSurvivalStatistics(start_new_space_size);
+ UpdateSurvivalStatistics(static_cast<int>(start_new_space_size));
ConfigureInitialOldGenerationSize();
+ if (collector != MARK_COMPACTOR) {
+ // Objects that died in the new space might have been accounted
+ // as bytes marked ahead of schedule by the incremental marker.
+ incremental_marking()->UpdateMarkedBytesAfterScavenge(
+ start_new_space_size - SurvivedNewSpaceObjectSize());
+ }
+
if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
ComputeFastPromotionMode(promotion_ratio_ + semi_space_copied_rate_);
}
@@ -1672,6 +1711,8 @@ void Heap::MarkCompact() {
uint64_t size_of_objects_before_gc = SizeOfObjects();
+ CodeSpaceMemoryModificationScope code_modifcation(this);
+
mark_compact_collector()->Prepare();
ms_count_++;
@@ -1730,7 +1771,6 @@ void Heap::MarkCompactPrologue() {
isolate_->compilation_cache()->MarkCompactPrologue();
FlushNumberStringCache();
- ClearNormalizedMapCaches();
}
@@ -1753,7 +1793,7 @@ void Heap::CheckNewSpaceExpansionCriteria() {
}
static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
- return heap->InNewSpace(*p) &&
+ return heap->InFromSpace(*p) &&
!HeapObject::cast(*p)->map_word().IsForwardingAddress();
}
@@ -1770,7 +1810,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
}
- return NULL;
+ return nullptr;
}
private:
@@ -1786,7 +1826,7 @@ void Heap::EvacuateYoungGeneration() {
DCHECK(CanExpandOldGeneration(new_space()->Size()));
}
- mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
+ mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
SetGCState(SCAVENGE);
LOG(isolate_, ResourceEvent("scavenge", "begin"));
@@ -1827,10 +1867,15 @@ static bool IsLogging(Isolate* isolate) {
isolate->heap_profiler()->is_tracking_object_moves());
}
-class ScavengingItem : public ItemParallelJob::Item {
+class PageScavengingItem final : public ItemParallelJob::Item {
public:
- virtual ~ScavengingItem() {}
- virtual void Process(Scavenger* scavenger) = 0;
+ explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
+ virtual ~PageScavengingItem() {}
+
+ void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
+
+ private:
+ MemoryChunk* const chunk_;
};
class ScavengingTask final : public ItemParallelJob::Task {
@@ -1846,8 +1891,8 @@ class ScavengingTask final : public ItemParallelJob::Task {
{
barrier_->Start();
TimedScope scope(&scavenging_time);
- ScavengingItem* item = nullptr;
- while ((item = GetItem<ScavengingItem>()) != nullptr) {
+ PageScavengingItem* item = nullptr;
+ while ((item = GetItem<PageScavengingItem>()) != nullptr) {
item->Process(scavenger_);
item->MarkFinished();
}
@@ -1870,41 +1915,6 @@ class ScavengingTask final : public ItemParallelJob::Task {
OneshotBarrier* const barrier_;
};
-class PageScavengingItem final : public ScavengingItem {
- public:
- explicit PageScavengingItem(Heap* heap, MemoryChunk* chunk)
- : heap_(heap), chunk_(chunk) {}
- virtual ~PageScavengingItem() {}
-
- void Process(Scavenger* scavenger) final {
- base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
- scavenger->AnnounceLockedPage(chunk_);
- RememberedSet<OLD_TO_NEW>::Iterate(
- chunk_,
- [this, scavenger](Address addr) {
- return scavenger->CheckAndScavengeObject(heap_, addr);
- },
- SlotSet::KEEP_EMPTY_BUCKETS);
- RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk_,
- [this, scavenger](SlotType type, Address host_addr, Address addr) {
- return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap_->isolate(), type, addr, [this, scavenger](Object** addr) {
- // We expect that objects referenced by code are long
- // living. If we do not force promotion, then we need to
- // clear old_to_new slots in dead code objects after
- // mark-compact.
- return scavenger->CheckAndScavengeObject(
- heap_, reinterpret_cast<Address>(addr));
- });
- });
- }
-
- private:
- Heap* const heap_;
- MemoryChunk* const chunk_;
-};
-
int Heap::NumberOfScavengeTasks() {
if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks =
@@ -1932,13 +1942,14 @@ void Heap::Scavenge() {
IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
incremental_marking());
- if (mark_compact_collector()->sweeper().sweeping_in_progress() &&
+ if (mark_compact_collector()->sweeper()->sweeping_in_progress() &&
memory_allocator_->unmapper()->NumberOfDelayedChunks() >
static_cast<int>(new_space_->MaximumCapacity() / Page::kPageSize)) {
mark_compact_collector()->EnsureSweepingCompleted();
}
- mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
+ // TODO(mlippautz): Untangle the dependency of the unmapper from the sweeper.
+ mark_compact_collector()->sweeper()->EnsureNewSpaceCompleted();
SetGCState(SCAVENGE);
@@ -1965,51 +1976,74 @@ void Heap::Scavenge() {
job.AddTask(new ScavengingTask(this, scavengers[i], &barrier));
}
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- this, [this, &job](MemoryChunk* chunk) {
- job.AddItem(new PageScavengingItem(this, chunk));
- });
+ {
+ Sweeper* sweeper = mark_compact_collector()->sweeper();
+ // Pause the concurrent sweeper.
+ Sweeper::PauseOrCompleteScope pause_scope(sweeper);
+ // Filter out pages from the sweeper that need to be processed for old to
+ // new slots by the Scavenger. After processing, the Scavenger adds back
+ // pages that are still unsweeped. This way the Scavenger has exclusive
+ // access to the slots of a page and can completely avoid any locks on
+ // the page itself.
+ Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
+ filter_scope.FilterOldSpaceSweepingPages(
+ [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ this, [&job](MemoryChunk* chunk) {
+ job.AddItem(new PageScavengingItem(chunk));
+ });
- RootScavengeVisitor root_scavenge_visitor(this, scavengers[kMainThreadId]);
+ RootScavengeVisitor root_scavenge_visitor(this, scavengers[kMainThreadId]);
- {
- // Identify weak unmodified handles. Requires an unmodified graph.
- TRACE_GC(tracer(),
- GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
- isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
- &JSObject::IsUnmodifiedApiObject);
- }
- {
- // Copy roots.
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
- IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
- }
- {
- // Weak collections are held strongly by the Scavenger.
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK);
- IterateEncounteredWeakCollections(&root_scavenge_visitor);
- }
- {
- // Parallel phase scavenging all copied and promoted objects.
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
- job.Run();
- DCHECK(copied_list.IsGlobalEmpty());
- DCHECK(promotion_list.IsGlobalEmpty());
- }
- {
- // Scavenge weak global handles.
- TRACE_GC(tracer(),
- GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
- isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
- &IsUnscavengedHeapObject);
- isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
- &root_scavenge_visitor);
- scavengers[kMainThreadId]->Process();
- }
+ {
+ // Identify weak unmodified handles. Requires an unmodified graph.
+ TRACE_GC(
+ tracer(),
+ GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
+ isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+ &JSObject::IsUnmodifiedApiObject);
+ }
+ {
+ // Copy roots.
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
+ IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
+ }
+ {
+ // Weak collections are held strongly by the Scavenger.
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK);
+ IterateEncounteredWeakCollections(&root_scavenge_visitor);
+ }
+ {
+ // Parallel phase scavenging all copied and promoted objects.
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
+ job.Run();
+ DCHECK(copied_list.IsGlobalEmpty());
+ DCHECK(promotion_list.IsGlobalEmpty());
+ }
+ {
+ // Scavenge weak global handles.
+ TRACE_GC(tracer(),
+ GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
+ isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+ &IsUnscavengedHeapObject);
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
+ &root_scavenge_visitor);
+ scavengers[kMainThreadId]->Process();
+
+ DCHECK(copied_list.IsGlobalEmpty());
+ DCHECK(promotion_list.IsGlobalEmpty());
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
+ &root_scavenge_visitor, &IsUnscavengedHeapObject);
+ }
- for (int i = 0; i < num_scavenge_tasks; i++) {
- scavengers[i]->Finalize();
- delete scavengers[i];
+ for (int i = 0; i < num_scavenge_tasks; i++) {
+ scavengers[i]->Finalize();
+ delete scavengers[i];
+ }
}
UpdateNewSpaceReferencesInExternalStringTable(
@@ -2032,7 +2066,11 @@ void Heap::Scavenge() {
// Set age mark.
new_space_->set_age_mark(new_space_->top());
- ArrayBufferTracker::FreeDeadInNewSpace(this);
+ {
+ TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_PROCESS_ARRAY_BUFFERS);
+ ArrayBufferTracker::PrepareToFreeDeadInNewSpace(this);
+ }
+ array_buffer_collector()->FreeAllocationsOnBackgroundThread();
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(this, [](MemoryChunk* chunk) {
if (chunk->SweepingDone()) {
@@ -2078,10 +2116,10 @@ String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
if (!string->IsExternalString()) {
// Original external string has been internalized.
DCHECK(string->IsThinString());
- return NULL;
+ return nullptr;
}
heap->FinalizeExternalString(string);
- return NULL;
+ return nullptr;
}
// String is still reachable.
@@ -2117,7 +2155,7 @@ void Heap::ExternalStringTable::UpdateNewSpaceReferences(
for (Object** p = start; p < end; ++p) {
String* target = updater_func(heap_, p);
- if (target == NULL) continue;
+ if (target == nullptr) continue;
DCHECK(target->IsExternalString());
@@ -2371,9 +2409,9 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
}
// GetVisitorId requires a properly initialized LayoutDescriptor.
map->set_visitor_id(Map::GetVisitorId(map));
- map->clear_unused();
- map->set_inobject_properties_or_constructor_function_index(0);
- map->set_unused_property_fields(0);
+ map->set_inobject_properties_start_or_constructor_function_index(0);
+ DCHECK(!map->IsJSObjectMap());
+ map->SetInObjectUnusedPropertyFields(0);
map->set_bit_field(0);
map->set_bit_field2(0);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
@@ -2385,10 +2423,10 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
return map;
}
-
AllocationResult Heap::AllocateMap(InstanceType instance_type,
int instance_size,
- ElementsKind elements_kind) {
+ ElementsKind elements_kind,
+ int inobject_properties) {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
DCHECK_IMPLIES(instance_type >= FIRST_JS_OBJECT_TYPE &&
!Map::CanHaveFastTransitionableElementsKind(instance_type),
@@ -2405,13 +2443,19 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
map->set_instance_size(instance_size);
- map->clear_unused();
- map->set_inobject_properties_or_constructor_function_index(0);
+ if (map->IsJSObjectMap()) {
+ map->SetInObjectPropertiesStartInWords(instance_size / kPointerSize -
+ inobject_properties);
+ DCHECK_EQ(map->GetInObjectProperties(), inobject_properties);
+ } else {
+ DCHECK_EQ(inobject_properties, 0);
+ map->set_inobject_properties_start_or_constructor_function_index(0);
+ }
map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
map->set_weak_cell_cache(Smi::kZero);
map->set_raw_transitions(Smi::kZero);
- map->set_unused_property_fields(0);
+ map->SetInObjectUnusedPropertyFields(inobject_properties);
map->set_instance_descriptors(empty_descriptor_array());
if (FLAG_unbox_double_fields) {
map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
@@ -2469,20 +2513,18 @@ AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
return result;
}
-AllocationResult Heap::AllocateBigInt(int length, bool zero_initialize,
- PretenureFlag pretenure) {
+AllocationResult Heap::AllocateBigInt(int length) {
if (length < 0 || length > BigInt::kMaxLength) {
v8::internal::Heap::FatalProcessOutOfMemory("invalid BigInt length", true);
}
int size = BigInt::SizeFor(length);
- AllocationSpace space = SelectSpace(pretenure);
+ AllocationSpace space = SelectSpace(NOT_TENURED);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, space);
if (!allocation.To(&result)) return allocation;
}
result->set_map_after_allocation(bigint_map(), SKIP_WRITE_BARRIER);
- BigInt::cast(result)->Initialize(length, zero_initialize);
return result;
}
@@ -2611,7 +2653,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kWeakNewSpaceObjectToCodeListRootIndex:
case kRetainedMapsRootIndex:
case kRetainingPathTargetsRootIndex:
- case kCodeCoverageListRootIndex:
+ case kFeedbackVectorsForProfilingToolsRootIndex:
case kNoScriptSharedFunctionInfosRootIndex:
case kWeakStackTraceListRootIndex:
case kSerializedTemplatesRootIndex:
@@ -2620,6 +2662,9 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kApiSymbolTableRootIndex:
case kApiPrivateSymbolTableRootIndex:
case kMessageListenersRootIndex:
+ case kDeserializeLazyHandlerRootIndex:
+ case kDeserializeLazyHandlerWideRootIndex:
+ case kDeserializeLazyHandlerExtraWideRootIndex:
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
SMI_ROOT_LIST(SMI_ENTRY)
@@ -2836,8 +2881,8 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
}
// At this point, we may be deserializing the heap from a snapshot, and
- // none of the maps have been created yet and are NULL.
- DCHECK((filler->map() == NULL && !deserialization_complete_) ||
+ // none of the maps have been created yet and are nullptr.
+ DCHECK((filler->map() == nullptr && !deserialization_complete_) ||
filler->map()->IsMap());
return filler;
}
@@ -2925,16 +2970,12 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
DCHECK_GE(elements_to_trim, 0);
int bytes_to_trim;
- if (object->IsFixedTypedArrayBase()) {
- InstanceType type = object->map()->instance_type();
- bytes_to_trim =
- FixedTypedArrayBase::TypedArraySize(type, len) -
- FixedTypedArrayBase::TypedArraySize(type, len - elements_to_trim);
- } else if (object->IsByteArray()) {
+ DCHECK(!object->IsFixedTypedArrayBase());
+ if (object->IsByteArray()) {
int new_size = ByteArray::SizeFor(len - elements_to_trim);
bytes_to_trim = ByteArray::SizeFor(len) - new_size;
DCHECK_GE(bytes_to_trim, 0);
- } else if (object->IsFixedArray() || object->IsTransitionArray()) {
+ } else if (object->IsFixedArray()) {
bytes_to_trim = elements_to_trim * kPointerSize;
} else {
DCHECK(object->IsFixedDoubleArray());
@@ -3060,23 +3101,21 @@ AllocationResult Heap::AllocateFixedTypedArray(int length,
return elements;
}
-
-AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
+AllocationResult Heap::AllocateCode(int object_size, Movability movability) {
DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
HeapObject* result = nullptr;
if (!allocation.To(&result)) return allocation;
- if (immovable) {
+ if (movability == kImmovable) {
Address address = result->address();
MemoryChunk* chunk = MemoryChunk::FromAddress(address);
// Code objects which should stay at a fixed address are allocated either
- // in the first page of code space (objects on the first page of each space
- // are never moved), in large object space, or (during snapshot creation)
- // the containing page is marked as immovable.
- if (!Heap::IsImmovable(result) &&
- !code_space_->FirstPage()->Contains(address)) {
- if (isolate()->serializer_enabled()) {
+ // in the first page of code space, in large object space, or (during
+ // snapshot creation) the containing page is marked as immovable.
+ if (!Heap::IsImmovable(result)) {
+ if (isolate()->serializer_enabled() ||
+ code_space_->FirstPage()->Contains(address)) {
chunk->MarkNeverEvacuate();
} else {
// Discard the first code allocation, which was on a page where it could
@@ -3099,8 +3138,86 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
return code;
}
+AllocationResult Heap::AllocateCode(
+ const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
+ int32_t builtin_index, ByteArray* reloc_info,
+ CodeDataContainer* data_container, HandlerTable* handler_table,
+ ByteArray* source_position_table, DeoptimizationData* deopt_data,
+ Movability movability, uint32_t stub_key, bool is_turbofanned,
+ int stack_slots, int safepoint_table_offset) {
+ bool has_unwinding_info = desc.unwinding_info != nullptr;
+ DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
+ (!has_unwinding_info && desc.unwinding_info_size == 0));
+
+ // Compute size.
+ int body_size = desc.instr_size;
+ int unwinding_info_size_field_size = kInt64Size;
+ if (has_unwinding_info) {
+ body_size = RoundUp(body_size, kInt64Size) + desc.unwinding_info_size +
+ unwinding_info_size_field_size;
+ }
+ int object_size = Code::SizeFor(RoundUp(body_size, kObjectAlignment));
+
+ Code* code = nullptr;
+ CodeSpaceMemoryModificationScope code_allocation(this);
+ AllocationResult allocation = AllocateCode(object_size, movability);
+ if (!allocation.To(&code)) return allocation;
+
+ // The code object has not been fully initialized yet. We rely on the
+ // fact that no allocation will happen from this point on.
+ DisallowHeapAllocation no_gc;
+ code->set_instruction_size(desc.instr_size);
+ code->set_relocation_info(reloc_info);
+ code->initialize_flags(kind, has_unwinding_info, is_turbofanned, stack_slots);
+ code->set_safepoint_table_offset(safepoint_table_offset);
+ code->set_code_data_container(data_container);
+ code->set_has_tagged_params(true);
+ code->set_deoptimization_data(deopt_data);
+ code->set_stub_key(stub_key);
+ code->set_handler_table(handler_table);
+ code->set_source_position_table(source_position_table);
+ code->set_protected_instructions(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
+ code->set_builtin_index(builtin_index);
+ code->set_trap_handler_index(Smi::FromInt(-1));
+
+ switch (code->kind()) {
+ case Code::OPTIMIZED_FUNCTION:
+ code->set_marked_for_deoptimization(false);
+ break;
+ case Code::JS_TO_WASM_FUNCTION:
+ case Code::C_WASM_ENTRY:
+ case Code::WASM_FUNCTION:
+ code->set_has_tagged_params(false);
+ break;
+ default:
+ break;
+ }
+
+ // Allow self references to created code object by patching the handle to
+ // point to the newly allocated Code object.
+ if (!self_ref.is_null()) *(self_ref.location()) = code;
+
+ // Migrate generated code.
+ // The generated code can contain Object** values (typically from handles)
+ // that are dereferenced during the copy to point directly to the actual heap
+ // objects. These pointers can include references to the code object itself,
+ // through the self_reference parameter.
+ code->CopyFrom(desc);
+
+ code->clear_padding();
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) code->ObjectVerify();
+#endif
+ DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
+ DCHECK(!memory_allocator()->code_range()->valid() ||
+ memory_allocator()->code_range()->contains(code->address()) ||
+ object_size <= code_space()->AreaSize());
+ return code;
+}
-AllocationResult Heap::CopyCode(Code* code) {
+AllocationResult Heap::CopyCode(Code* code, CodeDataContainer* data_container) {
AllocationResult allocation;
HeapObject* result = nullptr;
@@ -3115,23 +3232,29 @@ AllocationResult Heap::CopyCode(Code* code) {
CopyBlock(new_addr, old_addr, obj_size);
Code* new_code = Code::cast(result);
- // Relocate the copy.
- DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
- DCHECK(!memory_allocator()->code_range()->valid() ||
- memory_allocator()->code_range()->contains(code->address()) ||
- obj_size <= code_space()->AreaSize());
+ // Set the {CodeDataContainer}, it cannot be shared.
+ new_code->set_code_data_container(data_container);
// Clear the trap handler index since they can't be shared between code. We
- // have to do this before calling Relocate becauase relocate would adjust the
+ // have to do this before calling Relocate because relocate would adjust the
// base pointer for the old code.
new_code->set_trap_handler_index(Smi::FromInt(trap_handler::kInvalidIndex));
+ // Relocate the copy.
new_code->Relocate(new_addr - old_addr);
// We have to iterate over the object and process its pointers when black
// allocation is on.
incremental_marking()->ProcessBlackAllocatedObject(new_code);
// Record all references to embedded objects in the new code object.
RecordWritesIntoCode(new_code);
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) new_code->ObjectVerify();
+#endif
+ DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
+ DCHECK(!memory_allocator()->code_range()->valid() ||
+ memory_allocator()->code_range()->contains(new_code->address()) ||
+ obj_size <= code_space()->AreaSize());
return new_code;
}
@@ -3177,7 +3300,7 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
DCHECK(gc_state_ == NOT_IN_GC);
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
- if (allocation_site != NULL) {
+ if (allocation_site != nullptr) {
size += AllocationMemento::kSize;
}
HeapObject* result = nullptr;
@@ -3187,7 +3310,7 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
WriteBarrierMode write_barrier_mode =
space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
result->set_map_after_allocation(map, write_barrier_mode);
- if (allocation_site != NULL) {
+ if (allocation_site != nullptr) {
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(result) + map->instance_size());
InitializeAllocationMemento(alloc_memento, allocation_site);
@@ -3302,10 +3425,10 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
int object_size = map->instance_size();
HeapObject* clone = nullptr;
- DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
+ DCHECK(site == nullptr || AllocationSite::CanTrack(map->instance_type()));
int adjusted_object_size =
- site != NULL ? object_size + AllocationMemento::kSize : object_size;
+ site != nullptr ? object_size + AllocationMemento::kSize : object_size;
AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
if (!allocation.To(&clone)) return allocation;
@@ -3314,7 +3437,7 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// the contents without worrying about updating the write barrier.
CopyBlock(clone->address(), source->address(), object_size);
- if (site != NULL) {
+ if (site != nullptr) {
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(clone) + object_size);
InitializeAllocationMemento(alloc_memento, site);
@@ -3742,22 +3865,21 @@ AllocationResult Heap::AllocateRawFixedArray(int length,
return result;
}
-
-AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
- PretenureFlag pretenure,
- Object* filler) {
- DCHECK_LE(0, length);
- DCHECK(empty_fixed_array()->IsFixedArray());
- if (length == 0) return empty_fixed_array();
-
+AllocationResult Heap::AllocateFixedArrayWithFiller(
+ RootListIndex map_root_index, int length, PretenureFlag pretenure,
+ Object* filler) {
+ // Zero-length case must be handled outside, where the knowledge about
+ // the map is.
+ DCHECK_LT(0, length);
DCHECK(!InNewSpace(filler));
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
if (!allocation.To(&result)) return allocation;
}
-
- result->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
+ DCHECK(RootIsImmortalImmovable(map_root_index));
+ Map* map = Map::cast(root(map_root_index));
+ result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
@@ -4195,7 +4317,7 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
const double remaining_idle_time_in_ms =
incremental_marking()->AdvanceIncrementalMarking(
deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
+ StepOrigin::kTask);
if (remaining_idle_time_in_ms > 0.0) {
FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
@@ -4425,7 +4547,7 @@ void Heap::Print() {
if (!HasBeenSetUp()) return;
isolate()->PrintStack(stdout);
AllSpaces spaces(this);
- for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
space->Print();
}
}
@@ -4725,7 +4847,7 @@ void CollectSlots(MemoryChunk* chunk, Address start, Address end,
void Heap::VerifyRememberedSetFor(HeapObject* object) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- base::LockGuard<base::RecursiveMutex> lock_guard(chunk->mutex());
+ base::LockGuard<base::Mutex> lock_guard(chunk->mutex());
Address start = object->address();
Address end = start + object->Size();
std::set<Address> old_to_new;
@@ -4853,10 +4975,6 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
v->VisitRootPointers(Root::kStrongRootList, &roots_[0],
&roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
- // The serializer/deserializer iterates the root list twice, first to pick
- // off immortal immovable roots to make sure they end up on the first page,
- // and then again for the rest.
- if (mode == VISIT_ONLY_STRONG_ROOT_LIST) return;
isolate_->bootstrapper()->Iterate(v);
v->Synchronize(VisitorSynchronization::kBootstrapper);
@@ -4889,11 +5007,7 @@ void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
// Iterate over global handles.
switch (mode) {
- case VISIT_ONLY_STRONG_ROOT_LIST:
- UNREACHABLE();
- break;
case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
- break;
case VISIT_ONLY_STRONG:
isolate_->global_handles()->IterateStrongRoots(v);
break;
@@ -5086,7 +5200,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
if (take_snapshot) {
HeapIterator iterator(this);
- for (HeapObject* obj = iterator.next(); obj != NULL;
+ for (HeapObject* obj = iterator.next(); obj != nullptr;
obj = iterator.next()) {
InstanceType type = obj->map()->instance_type();
DCHECK(0 <= type && type <= LAST_TYPE);
@@ -5094,9 +5208,9 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
stats->size_per_type[type] += obj->Size();
}
}
- if (stats->last_few_messages != NULL)
+ if (stats->last_few_messages != nullptr)
GetFromRingBuffer(stats->last_few_messages);
- if (stats->js_stacktrace != NULL) {
+ if (stats->js_stacktrace != nullptr) {
FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
if (gc_state() == Heap::NOT_IN_GC) {
@@ -5346,7 +5460,31 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
// start marking immediately.
return IncrementalMarkingLimit::kHardLimit;
}
+
+ if (FLAG_stress_marking > 0) {
+ double gained_since_last_gc =
+ PromotedSinceLastGC() +
+ (external_memory_ - external_memory_at_last_mark_compact_);
+ double size_before_gc = PromotedTotalSize() - gained_since_last_gc;
+ double bytes_to_limit = old_generation_allocation_limit_ - size_before_gc;
+ if (bytes_to_limit > 0) {
+ double current_percent = (gained_since_last_gc / bytes_to_limit) * 100.0;
+
+ if (FLAG_trace_incremental_marking) {
+ isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] %.2lf%% of the memory limit reached\n",
+ current_percent);
+ }
+
+ if (static_cast<int>(current_percent) >= stress_marking_percentage_) {
+ stress_marking_percentage_ = NextStressMarkingLimit();
+ return IncrementalMarkingLimit::kHardLimit;
+ }
+ }
+ }
+
size_t old_generation_space_available = OldGenerationSpaceAvailable();
+
if (old_generation_space_available > new_space_->Capacity()) {
return IncrementalMarkingLimit::kNoLimit;
}
@@ -5380,7 +5518,8 @@ void Heap::DisableInlineAllocation() {
// Update inline allocation limit for old spaces.
PagedSpaces spaces(this);
- for (PagedSpace* space = spaces.next(); space != NULL;
+ CodeSpaceMemoryModificationScope modification_scope(this);
+ for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
space->EmptyAllocationInfo();
}
@@ -5404,7 +5543,7 @@ bool Heap::SetUp() {
}
mmap_region_base_ =
- reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
+ reinterpret_cast<uintptr_t>(base::OS::GetRandomMmapAddr()) &
~kMmapRegionMask;
// Set up memory allocator.
@@ -5414,10 +5553,9 @@ bool Heap::SetUp() {
store_buffer_ = new StoreBuffer(this);
mark_compact_collector_ = new MarkCompactCollector(this);
- incremental_marking_ = new IncrementalMarking(this);
+ incremental_marking_ =
+ new IncrementalMarking(this, mark_compact_collector_->marking_worklist());
- incremental_marking_->set_marking_worklist(
- mark_compact_collector_->marking_worklist());
if (FLAG_concurrent_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
@@ -5465,6 +5603,7 @@ bool Heap::SetUp() {
tracer_ = new GCTracer(this);
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
+ array_buffer_collector_ = new ArrayBufferCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
memory_reducer_ = new MemoryReducer(this);
if (V8_UNLIKELY(FLAG_gc_stats)) {
@@ -5491,6 +5630,12 @@ bool Heap::SetUp() {
SetGetExternallyAllocatedMemoryInBytesCallback(
DefaultGetExternallyAllocatedMemoryInBytesCallback);
+ if (FLAG_stress_marking > 0) {
+ stress_marking_percentage_ = NextStressMarkingLimit();
+ }
+
+ write_protect_code_memory_ = FLAG_write_protect_code_memory;
+
return true;
}
@@ -5504,7 +5649,7 @@ void Heap::InitializeHashSeed() {
}
void Heap::SetStackLimits() {
- DCHECK(isolate_ != NULL);
+ DCHECK_NOT_NULL(isolate_);
DCHECK(isolate_ == isolate());
// On 64 bit machines, pointers are generally out of range of Smis. We write
// something that looks like an out of range Smi to the GC.
@@ -5527,10 +5672,13 @@ void Heap::PrintAllocationsHash() {
PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
}
+int Heap::NextStressMarkingLimit() {
+ return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
+}
void Heap::NotifyDeserializationComplete() {
PagedSpaces spaces(this);
- for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
+ for (PagedSpace* s = spaces.next(); s != nullptr; s = spaces.next()) {
if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
#ifdef DEBUG
// All pages right after bootstrapping must be marked as never-evacuate.
@@ -5606,6 +5754,11 @@ void Heap::TearDown() {
minor_mark_compact_collector_ = nullptr;
}
+ if (array_buffer_collector_ != nullptr) {
+ delete array_buffer_collector_;
+ array_buffer_collector_ = nullptr;
+ }
+
delete incremental_marking_;
incremental_marking_ = nullptr;
@@ -5648,37 +5801,37 @@ void Heap::TearDown() {
delete new_space_;
new_space_ = nullptr;
- if (old_space_ != NULL) {
+ if (old_space_ != nullptr) {
delete old_space_;
- old_space_ = NULL;
+ old_space_ = nullptr;
}
- if (code_space_ != NULL) {
+ if (code_space_ != nullptr) {
delete code_space_;
- code_space_ = NULL;
+ code_space_ = nullptr;
}
- if (map_space_ != NULL) {
+ if (map_space_ != nullptr) {
delete map_space_;
- map_space_ = NULL;
+ map_space_ = nullptr;
}
- if (lo_space_ != NULL) {
+ if (lo_space_ != nullptr) {
lo_space_->TearDown();
delete lo_space_;
- lo_space_ = NULL;
+ lo_space_ = nullptr;
}
store_buffer()->TearDown();
memory_allocator()->TearDown();
- StrongRootsList* next = NULL;
+ StrongRootsList* next = nullptr;
for (StrongRootsList* list = strong_roots_list_; list; list = next) {
next = list->next;
delete list;
}
- strong_roots_list_ = NULL;
+ strong_roots_list_ = nullptr;
delete store_buffer_;
store_buffer_ = nullptr;
@@ -5776,7 +5929,7 @@ void CompactWeakFixedArray(Object* object) {
void Heap::CompactWeakFixedArrays() {
// Find known WeakFixedArrays and compact them.
HeapIterator iterator(this);
- for (HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
+ for (HeapObject* o = iterator.next(); o != nullptr; o = iterator.next()) {
if (o->IsPrototypeInfo()) {
Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
if (prototype_users->IsWeakFixedArray()) {
@@ -5946,7 +6099,7 @@ Space* AllSpaces::next() {
case LO_SPACE:
return heap_->lo_space();
default:
- return NULL;
+ return nullptr;
}
}
@@ -5959,7 +6112,7 @@ PagedSpace* PagedSpaces::next() {
case MAP_SPACE:
return heap_->map_space();
default:
- return NULL;
+ return nullptr;
}
}
@@ -5971,7 +6124,7 @@ OldSpace* OldSpaces::next() {
case CODE_SPACE:
return heap_->code_space();
default:
- return NULL;
+ return nullptr;
}
}
@@ -6247,7 +6400,7 @@ void Heap::RegisterStrongRoots(Object** start, Object** end) {
void Heap::UnregisterStrongRoots(Object** start) {
- StrongRootsList* prev = NULL;
+ StrongRootsList* prev = nullptr;
StrongRootsList* list = strong_roots_list_;
while (list != nullptr) {
StrongRootsList* next = list->next;
@@ -6265,6 +6418,23 @@ void Heap::UnregisterStrongRoots(Object** start) {
}
}
+bool Heap::IsDeserializeLazyHandler(Code* code) {
+ return (code == deserialize_lazy_handler() ||
+ code == deserialize_lazy_handler_wide() ||
+ code == deserialize_lazy_handler_extra_wide());
+}
+
+void Heap::SetDeserializeLazyHandler(Code* code) {
+ set_deserialize_lazy_handler(code);
+}
+
+void Heap::SetDeserializeLazyHandlerWide(Code* code) {
+ set_deserialize_lazy_handler_wide(code);
+}
+
+void Heap::SetDeserializeLazyHandlerExtraWide(Code* code) {
+ set_deserialize_lazy_handler_extra_wide(code);
+}
size_t Heap::NumberOfTrackedHeapObjectTypes() {
return ObjectStats::OBJECT_STATS_COUNT;
@@ -6330,7 +6500,7 @@ const char* AllocationSpaceName(AllocationSpace space) {
default:
UNREACHABLE();
}
- return NULL;
+ return nullptr;
}
void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
@@ -6404,5 +6574,70 @@ void Heap::CreateObjectStats() {
}
}
+namespace {
+
+Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
+ MapWord map_word = object->map_word();
+ return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress()->map()
+ : map_word.ToMap();
+}
+
+int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
+ return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
+}
+
+Code* GcSafeCastToCode(Heap* heap, HeapObject* object, Address inner_pointer) {
+ Code* code = reinterpret_cast<Code*>(object);
+ DCHECK_NOT_NULL(code);
+ DCHECK(heap->GcSafeCodeContains(code, inner_pointer));
+ return code;
+}
+
+} // namespace
+
+bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
+ Map* map = GcSafeMapOfCodeSpaceObject(code);
+ DCHECK(map == code->GetHeap()->code_map());
+ Address start = code->address();
+ Address end = code->address() + code->SizeFromMap(map);
+ return start <= addr && addr < end;
+}
+
+Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
+ // Check if the inner pointer points into a large object chunk.
+ LargePage* large_page = lo_space()->FindPage(inner_pointer);
+ if (large_page != nullptr) {
+ return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
+ }
+
+ if (!code_space()->Contains(inner_pointer)) {
+ return nullptr;
+ }
+
+ // Iterate through the page until we reach the end or find an object starting
+ // after the inner pointer.
+ Page* page = Page::FromAddress(inner_pointer);
+ DCHECK_EQ(page->owner(), code_space());
+ mark_compact_collector()->sweeper()->SweepOrWaitUntilSweepingCompleted(page);
+
+ Address addr = page->skip_list()->StartFor(inner_pointer);
+ Address top = code_space()->top();
+ Address limit = code_space()->limit();
+
+ while (true) {
+ if (addr == top && addr != limit) {
+ addr = limit;
+ continue;
+ }
+
+ HeapObject* obj = HeapObject::FromAddress(addr);
+ int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
+ Address next_addr = addr + obj_size;
+ if (next_addr > inner_pointer)
+ return GcSafeCastToCode(this, obj, inner_pointer);
+ addr = next_addr;
+ }
+}
+
} // namespace internal
} // namespace v8