summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap.cc')
-rw-r--r--deps/v8/src/heap.cc295
1 files changed, 225 insertions, 70 deletions
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index a1cccf6f22..d3c7f0a9b0 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -42,6 +42,7 @@
#include "natives.h"
#include "objects-visiting.h"
#include "objects-visiting-inl.h"
+#include "once.h"
#include "runtime-profiler.h"
#include "scopeinfo.h"
#include "snapshot.h"
@@ -60,8 +61,6 @@
namespace v8 {
namespace internal {
-static LazyMutex gc_initializer_mutex = LAZY_MUTEX_INITIALIZER;
-
Heap::Heap()
: isolate_(NULL),
@@ -145,7 +144,6 @@ Heap::Heap()
number_idle_notifications_(0),
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
- idle_notification_will_schedule_next_gc_(false),
mark_sweeps_since_idle_round_started_(0),
ms_count_at_last_idle_notification_(0),
gc_count_at_last_idle_gc_(0),
@@ -173,6 +171,9 @@ Heap::Heap()
global_contexts_list_ = NULL;
mark_compact_collector_.heap_ = this;
external_string_table_.heap_ = this;
+ // Put a dummy entry in the remembered pages so we can find the list the
+ // minidump even if there are no real unmapped pages.
+ RememberUnmappedPage(NULL, false);
}
@@ -240,12 +241,17 @@ int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
const char** reason) {
// Is global GC requested?
- if (space != NEW_SPACE || FLAG_gc_global) {
+ if (space != NEW_SPACE) {
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
*reason = "GC in old space requested";
return MARK_COMPACTOR;
}
+ if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
+ *reason = "GC in old space forced by flags";
+ return MARK_COMPACTOR;
+ }
+
// Is enough data promoted to justify a global GC?
if (OldGenerationPromotionLimitReached()) {
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
@@ -504,11 +510,17 @@ bool Heap::CollectGarbage(AllocationSpace space,
!incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() &&
FLAG_incremental_marking_steps) {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+ // Make progress in incremental marking.
+ const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
+ incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
+ IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ if (!incremental_marking()->IsComplete()) {
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+ }
+ collector = SCAVENGER;
+ collector_reason = "incremental marking delaying mark-sweep";
}
- collector = SCAVENGER;
- collector_reason = "incremental marking delaying mark-sweep";
}
bool next_gc_likely_to_collect_more = false;
@@ -796,7 +808,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size);
- size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
+ size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
@@ -1120,6 +1132,27 @@ void PromotionQueue::RelocateQueueHead() {
}
+class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+ explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
+
+ virtual Object* RetainAs(Object* object) {
+ if (!heap_->InFromSpace(object)) {
+ return object;
+ }
+
+ MapWord map_word = HeapObject::cast(object)->map_word();
+ if (map_word.IsForwardingAddress()) {
+ return map_word.ToForwardingAddress();
+ }
+ return NULL;
+ }
+
+ private:
+ Heap* heap_;
+};
+
+
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
@@ -1218,6 +1251,9 @@ void Heap::Scavenge() {
}
incremental_marking()->UpdateMarkingDequeAfterScavenge();
+ ScavengeWeakObjectRetainer weak_object_retainer(this);
+ ProcessWeakReferences(&weak_object_retainer);
+
ASSERT(new_space_front == new_space_.top());
// Set age mark.
@@ -1304,7 +1340,8 @@ void Heap::UpdateReferencesInExternalStringTable(
static Object* ProcessFunctionWeakReferences(Heap* heap,
Object* function,
- WeakObjectRetainer* retainer) {
+ WeakObjectRetainer* retainer,
+ bool record_slots) {
Object* undefined = heap->undefined_value();
Object* head = undefined;
JSFunction* tail = NULL;
@@ -1321,6 +1358,12 @@ static Object* ProcessFunctionWeakReferences(Heap* heap,
// Subsequent elements in the list.
ASSERT(tail != NULL);
tail->set_next_function_link(retain);
+ if (record_slots) {
+ Object** next_function =
+ HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
+ heap->mark_compact_collector()->RecordSlot(
+ next_function, next_function, retain);
+ }
}
// Retained function is new tail.
candidate_function = reinterpret_cast<JSFunction*>(retain);
@@ -1349,6 +1392,15 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
Object* head = undefined;
Context* tail = NULL;
Object* candidate = global_contexts_list_;
+
+ // We don't record weak slots during marking or scavenges.
+ // Instead we do it once when we complete mark-compact cycle.
+ // Note that write barrier has no effect if we are already in the middle of
+ // compacting mark-sweep cycle and we have to record slots manually.
+ bool record_slots =
+ gc_state() == MARK_COMPACT &&
+ mark_compact_collector()->is_compacting();
+
while (candidate != undefined) {
// Check whether to keep the candidate in the list.
Context* candidate_context = reinterpret_cast<Context*>(candidate);
@@ -1364,6 +1416,14 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
Context::NEXT_CONTEXT_LINK,
retain,
UPDATE_WRITE_BARRIER);
+
+ if (record_slots) {
+ Object** next_context =
+ HeapObject::RawField(
+ tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
+ mark_compact_collector()->RecordSlot(
+ next_context, next_context, retain);
+ }
}
// Retained context is new tail.
candidate_context = reinterpret_cast<Context*>(retain);
@@ -1376,11 +1436,19 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
ProcessFunctionWeakReferences(
this,
candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
- retainer);
+ retainer,
+ record_slots);
candidate_context->set_unchecked(this,
Context::OPTIMIZED_FUNCTIONS_LIST,
function_list_head,
UPDATE_WRITE_BARRIER);
+ if (record_slots) {
+ Object** optimized_functions =
+ HeapObject::RawField(
+ tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
+ mark_compact_collector()->RecordSlot(
+ optimized_functions, optimized_functions, function_list_head);
+ }
}
// Move to next element in the list.
@@ -1480,6 +1548,27 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
}
+STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
+
+
+INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
+ HeapObject* object,
+ int size));
+
+static HeapObject* EnsureDoubleAligned(Heap* heap,
+ HeapObject* object,
+ int size) {
+ if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
+ heap->CreateFillerObjectAt(object->address(), kPointerSize);
+ return HeapObject::FromAddress(object->address() + kPointerSize);
+ } else {
+ heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
+ kPointerSize);
+ return object;
+ }
+}
+
+
enum LoggingAndProfiling {
LOGGING_AND_PROFILING_ENABLED,
LOGGING_AND_PROFILING_DISABLED
@@ -1603,7 +1692,10 @@ class ScavengingVisitor : public StaticVisitorBase {
}
}
- template<ObjectContents object_contents, SizeRestriction size_restriction>
+
+ template<ObjectContents object_contents,
+ SizeRestriction size_restriction,
+ int alignment>
static inline void EvacuateObject(Map* map,
HeapObject** slot,
HeapObject* object,
@@ -1612,19 +1704,26 @@ class ScavengingVisitor : public StaticVisitorBase {
(object_size <= Page::kMaxNonCodeHeapObjectSize));
SLOW_ASSERT(object->Size() == object_size);
+ int allocation_size = object_size;
+ if (alignment != kObjectAlignment) {
+ ASSERT(alignment == kDoubleAlignment);
+ allocation_size += kPointerSize;
+ }
+
Heap* heap = map->GetHeap();
if (heap->ShouldBePromoted(object->address(), object_size)) {
MaybeObject* maybe_result;
if ((size_restriction != SMALL) &&
- (object_size > Page::kMaxNonCodeHeapObjectSize)) {
- maybe_result = heap->lo_space()->AllocateRaw(object_size,
+ (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
+ maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
NOT_EXECUTABLE);
} else {
if (object_contents == DATA_OBJECT) {
- maybe_result = heap->old_data_space()->AllocateRaw(object_size);
+ maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
} else {
- maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
+ maybe_result =
+ heap->old_pointer_space()->AllocateRaw(allocation_size);
}
}
@@ -1632,6 +1731,10 @@ class ScavengingVisitor : public StaticVisitorBase {
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
+ if (alignment != kObjectAlignment) {
+ target = EnsureDoubleAligned(heap, target, allocation_size);
+ }
+
// Order is important: slot might be inside of the target if target
// was allocated over a dead object and slot comes from the store
// buffer.
@@ -1639,18 +1742,27 @@ class ScavengingVisitor : public StaticVisitorBase {
MigrateObject(heap, object, target, object_size);
if (object_contents == POINTER_OBJECT) {
- heap->promotion_queue()->insert(target, object_size);
+ if (map->instance_type() == JS_FUNCTION_TYPE) {
+ heap->promotion_queue()->insert(
+ target, JSFunction::kNonWeakFieldsEndOffset);
+ } else {
+ heap->promotion_queue()->insert(target, object_size);
+ }
}
heap->tracer()->increment_promoted_objects_size(object_size);
return;
}
}
- MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
+ MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked();
HeapObject* target = HeapObject::cast(result);
+ if (alignment != kObjectAlignment) {
+ target = EnsureDoubleAligned(heap, target, allocation_size);
+ }
+
// Order is important: slot might be inside of the target if target
// was allocated over a dead object and slot comes from the store
// buffer.
@@ -1686,7 +1798,7 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object) {
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
- EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
+ EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
slot,
object,
object_size);
@@ -1698,10 +1810,11 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
int object_size = FixedDoubleArray::SizeFor(length);
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
- slot,
- object,
- object_size);
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
+ map,
+ slot,
+ object,
+ object_size);
}
@@ -1709,7 +1822,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
+ map, slot, object, object_size);
}
@@ -1718,7 +1832,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int object_size = SeqAsciiString::cast(object)->
SeqAsciiStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
+ map, slot, object, object_size);
}
@@ -1727,7 +1842,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
int object_size = SeqTwoByteString::cast(object)->
SeqTwoByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+ EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
+ map, slot, object, object_size);
}
@@ -1770,7 +1886,8 @@ class ScavengingVisitor : public StaticVisitorBase {
}
int object_size = ConsString::kSize;
- EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
+ EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
+ map, slot, object, object_size);
}
template<ObjectContents object_contents>
@@ -1780,14 +1897,16 @@ class ScavengingVisitor : public StaticVisitorBase {
static inline void VisitSpecialized(Map* map,
HeapObject** slot,
HeapObject* object) {
- EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
+ EvacuateObject<object_contents, SMALL, kObjectAlignment>(
+ map, slot, object, object_size);
}
static inline void Visit(Map* map,
HeapObject** slot,
HeapObject* object) {
int object_size = map->instance_size();
- EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
+ EvacuateObject<object_contents, SMALL, kObjectAlignment>(
+ map, slot, object, object_size);
}
};
@@ -1904,7 +2023,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_pre_allocated_property_fields(0);
map->init_instance_descriptors();
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
- map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ map->init_prototype_transitions(undefined_value());
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
@@ -1953,7 +2072,7 @@ MaybeObject* Heap::AllocateTypeFeedbackInfo() {
if (!maybe_info->To(&info)) return maybe_info;
}
info->set_ic_total_count(0);
- info->set_ic_with_typeinfo_count(0);
+ info->set_ic_with_type_info_count(0);
info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
return info;
@@ -2043,15 +2162,15 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps.
meta_map()->init_instance_descriptors();
meta_map()->set_code_cache(empty_fixed_array());
- meta_map()->set_prototype_transitions(empty_fixed_array());
+ meta_map()->init_prototype_transitions(undefined_value());
fixed_array_map()->init_instance_descriptors();
fixed_array_map()->set_code_cache(empty_fixed_array());
- fixed_array_map()->set_prototype_transitions(empty_fixed_array());
+ fixed_array_map()->init_prototype_transitions(undefined_value());
oddball_map()->init_instance_descriptors();
oddball_map()->set_code_cache(empty_fixed_array());
- oddball_map()->set_prototype_transitions(empty_fixed_array());
+ oddball_map()->init_prototype_transitions(undefined_value());
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
@@ -2897,9 +3016,9 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
- share->set_deopt_counter(FLAG_deopt_every_n_times);
- share->set_profiler_ticks(0);
share->set_ast_node_count(0);
+ share->set_deopt_counter(FLAG_deopt_every_n_times);
+ share->set_ic_age(0);
// Set integer fields (smi or int, depending on the architecture).
share->set_length(0);
@@ -3823,6 +3942,16 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
}
+MaybeObject* Heap::AllocateJSModule() {
+ // Allocate a fresh map. Modules do not have a prototype.
+ Map* map;
+ MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
+ if (!maybe_map->To(&map)) return maybe_map;
+ // Allocate the object based on the map.
+ return AllocateJSObjectFromMap(map, TENURED);
+}
+
+
MaybeObject* Heap::AllocateJSArrayAndStorage(
ElementsKind elements_kind,
int length,
@@ -3959,7 +4088,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
// Fill these accessors into the dictionary.
DescriptorArray* descs = map->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
- PropertyDetails details(descs->GetDetails(i));
+ PropertyDetails details = descs->GetDetails(i);
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
PropertyDetails d =
PropertyDetails(details.attributes(), CALLBACKS, details.index());
@@ -4652,6 +4781,11 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
AllocationSpace space =
(pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
int size = FixedDoubleArray::SizeFor(length);
+
+#ifndef V8_HOST_ARCH_64_BIT
+ size += kPointerSize;
+#endif
+
if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
// Too big for new space.
space = LO_SPACE;
@@ -4664,7 +4798,12 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
AllocationSpace retry_space =
(size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
- return AllocateRaw(size, space, retry_space);
+ HeapObject* object;
+ { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
+ if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
+ }
+
+ return EnsureDoubleAligned(this, object, size);
}
@@ -4697,6 +4836,22 @@ MaybeObject* Heap::AllocateGlobalContext() {
}
+MaybeObject* Heap::AllocateModuleContext(Context* previous,
+ ScopeInfo* scope_info) {
+ Object* result;
+ { MaybeObject* maybe_result =
+ AllocateFixedArrayWithHoles(scope_info->ContextLength(), TENURED);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Context* context = reinterpret_cast<Context*>(result);
+ context->set_map_no_write_barrier(module_context_map());
+ context->set_previous(previous);
+ context->set_extension(scope_info);
+ context->set_global(previous->global());
+ return context;
+}
+
+
MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
Object* result;
@@ -4817,10 +4972,8 @@ void Heap::EnsureHeapIsIterable() {
void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
- // This flag prevents incremental marking from requesting GC via stack guard
- idle_notification_will_schedule_next_gc_ = true;
- incremental_marking()->Step(step_size);
- idle_notification_will_schedule_next_gc_ = false;
+ incremental_marking()->Step(step_size,
+ IncrementalMarking::NO_GC_VIA_STACK_GUARD);
if (incremental_marking()->IsComplete()) {
bool uncommit = false;
@@ -4841,8 +4994,10 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
bool Heap::IdleNotification(int hint) {
const int kMaxHint = 1000;
- intptr_t size_factor = Min(Max(hint, 30), kMaxHint) / 10;
- // The size factor is in range [3..100].
+ intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
+ // The size factor is in range [5..250]. The numbers here are chosen from
+ // experiments. If you changes them, make sure to test with
+ // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
if (contexts_disposed_ > 0) {
@@ -4866,11 +5021,14 @@ bool Heap::IdleNotification(int hint) {
// Take into account that we might have decided to delay full collection
// because incremental marking is in progress.
ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
+ // After context disposal there is likely a lot of garbage remaining, reset
+ // the idle notification counters in order to trigger more incremental GCs
+ // on subsequent idle notifications.
+ StartIdleRound();
return false;
}
- if (hint >= kMaxHint || !FLAG_incremental_marking ||
- FLAG_expose_gc || Serializer::enabled()) {
+ if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
return IdleGlobalGC();
}
@@ -4909,10 +5067,6 @@ bool Heap::IdleNotification(int hint) {
}
if (incremental_marking()->IsStopped()) {
- if (!WorthStartingGCWhenIdle()) {
- FinishIdleRound();
- return true;
- }
incremental_marking()->Start();
}
@@ -5550,6 +5704,11 @@ bool Heap::ConfigureHeap(int max_semispace_size,
intptr_t max_executable_size) {
if (HasBeenSetUp()) return false;
+ if (FLAG_stress_compaction) {
+ // This will cause more frequent GCs when stressing.
+ max_semispace_size_ = Page::kPageSize;
+ }
+
if (max_semispace_size > 0) {
if (max_semispace_size < Page::kPageSize) {
max_semispace_size = Page::kPageSize;
@@ -5654,16 +5813,6 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
}
-intptr_t Heap::PromotedSpaceSize() {
- return old_pointer_space_->Size()
- + old_data_space_->Size()
- + code_space_->Size()
- + map_space_->Size()
- + cell_space_->Size()
- + lo_space_->Size();
-}
-
-
intptr_t Heap::PromotedSpaceSizeOfObjects() {
return old_pointer_space_->SizeOfObjects()
+ old_data_space_->SizeOfObjects()
@@ -5674,7 +5823,7 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
}
-int Heap::PromotedExternalMemorySize() {
+intptr_t Heap::PromotedExternalMemorySize() {
if (amount_of_external_allocated_memory_
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
return amount_of_external_allocated_memory_
@@ -5847,6 +5996,15 @@ class HeapDebugUtils {
#endif
+
+V8_DECLARE_ONCE(initialize_gc_once);
+
+static void InitializeGCOnce() {
+ InitializeScavengingVisitorsTables();
+ NewSpaceScavenger::Initialize();
+ MarkCompactCollector::Initialize();
+}
+
bool Heap::SetUp(bool create_heap_objects) {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
@@ -5865,15 +6023,7 @@ bool Heap::SetUp(bool create_heap_objects) {
if (!ConfigureHeapDefault()) return false;
}
- gc_initializer_mutex.Pointer()->Lock();
- static bool initialized_gc = false;
- if (!initialized_gc) {
- initialized_gc = true;
- InitializeScavengingVisitorsTables();
- NewSpaceScavenger::Initialize();
- MarkCompactCollector::Initialize();
- }
- gc_initializer_mutex.Pointer()->Unlock();
+ CallOnce(&initialize_gc_once, &InitializeGCOnce);
MarkMapPointersAsEncoded(false);
@@ -5985,6 +6135,11 @@ void Heap::SetStackLimits() {
void Heap::TearDown() {
+#ifdef DEBUG
+ if (FLAG_verify_heap) {
+ Verify();
+ }
+#endif
if (FLAG_print_cumulative_gc_stat) {
PrintF("\n\n");
PrintF("gc_count=%d ", gc_count_);