summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap.cc
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-10-10 17:58:30 -0700
committerRyan Dahl <ry@tinyclouds.org>2011-10-10 17:58:30 -0700
commit3b1d656da56bdd403d1625a0c6a44d75cde36cc1 (patch)
tree80de8a68eacd596f0d120efc65dbbae3f324aea0 /deps/v8/src/heap.cc
parent9bbca99107652906a060679ee95bf1ad7381cbb5 (diff)
downloadnode-3b1d656da56bdd403d1625a0c6a44d75cde36cc1.tar.gz
Revert "Upgrade V8 to 3.6.6"
Not stable enough. - Windows snapshot linking broken - Linux crash on ./node_g test/simple/test-stream-pipe-multi.js This reverts commit 56e6952e639ba1557a5b22333788583e9e39fa29.
Diffstat (limited to 'deps/v8/src/heap.cc')
-rw-r--r--deps/v8/src/heap.cc1531
1 files changed, 702 insertions, 829 deletions
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index c6efd6205..d0185930b 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -36,16 +36,13 @@
#include "deoptimizer.h"
#include "global-handles.h"
#include "heap-profiler.h"
-#include "incremental-marking.h"
#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "natives.h"
#include "objects-visiting.h"
-#include "objects-visiting-inl.h"
#include "runtime-profiler.h"
#include "scopeinfo.h"
#include "snapshot.h"
-#include "store-buffer.h"
#include "v8threads.h"
#include "vm-state-inl.h"
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
@@ -61,6 +58,10 @@ namespace v8 {
namespace internal {
+static const intptr_t kMinimumPromotionLimit = 2 * MB;
+static const intptr_t kMinimumAllocationLimit = 8 * MB;
+
+
static Mutex* gc_initializer_mutex = OS::CreateMutex();
@@ -69,21 +70,27 @@ Heap::Heap()
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
#if defined(ANDROID)
-#define LUMP_OF_MEMORY (128 * KB)
+ reserved_semispace_size_(2*MB),
+ max_semispace_size_(2*MB),
+ initial_semispace_size_(128*KB),
+ max_old_generation_size_(192*MB),
+ max_executable_size_(max_old_generation_size_),
code_range_size_(0),
#elif defined(V8_TARGET_ARCH_X64)
-#define LUMP_OF_MEMORY (2 * MB)
+ reserved_semispace_size_(16*MB),
+ max_semispace_size_(16*MB),
+ initial_semispace_size_(1*MB),
+ max_old_generation_size_(1400*MB),
+ max_executable_size_(256*MB),
code_range_size_(512*MB),
#else
-#define LUMP_OF_MEMORY MB
+ reserved_semispace_size_(8*MB),
+ max_semispace_size_(8*MB),
+ initial_semispace_size_(512*KB),
+ max_old_generation_size_(700*MB),
+ max_executable_size_(128*MB),
code_range_size_(0),
#endif
- reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_old_generation_size_(700ul * LUMP_OF_MEMORY),
- max_executable_size_(128l * LUMP_OF_MEMORY),
-
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
// Will be 4 * reserved_semispace_size_ to ensure that young
@@ -93,7 +100,6 @@ Heap::Heap()
always_allocate_scope_depth_(0),
linear_allocation_scope_depth_(0),
contexts_disposed_(0),
- scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
old_data_space_(NULL),
@@ -103,6 +109,7 @@ Heap::Heap()
lo_space_(NULL),
gc_state_(NOT_IN_GC),
gc_post_processing_depth_(0),
+ mc_count_(0),
ms_count_(0),
gc_count_(0),
unflattened_strings_length_(0),
@@ -114,13 +121,10 @@ Heap::Heap()
#endif // DEBUG
old_gen_promotion_limit_(kMinimumPromotionLimit),
old_gen_allocation_limit_(kMinimumAllocationLimit),
- old_gen_limit_factor_(1),
- size_of_old_gen_at_last_old_space_gc_(0),
external_allocation_limit_(0),
amount_of_external_allocated_memory_(0),
amount_of_external_allocated_memory_at_last_global_gc_(0),
old_gen_exhausted_(false),
- store_buffer_rebuilder_(store_buffer()),
hidden_symbol_(NULL),
global_gc_prologue_callback_(NULL),
global_gc_epilogue_callback_(NULL),
@@ -137,14 +141,12 @@ Heap::Heap()
min_in_mutator_(kMaxInt),
alive_after_last_gc_(0),
last_gc_end_timestamp_(0.0),
- store_buffer_(this),
- marking_(this),
- incremental_marking_(this),
+ page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
number_idle_notifications_(0),
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
configured_(false),
- chunks_queued_for_free_(NULL) {
+ is_safe_to_read_maps_(true) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -222,10 +224,29 @@ bool Heap::HasBeenSetup() {
int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
- if (IntrusiveMarking::IsMarked(object)) {
- return IntrusiveMarking::SizeOfMarkedObject(object);
+ ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
+ ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
+ MapWord map_word = object->map_word();
+ map_word.ClearMark();
+ map_word.ClearOverflow();
+ return object->SizeFromMap(map_word.ToMap());
+}
+
+
+int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
+ ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
+ ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
+ uint32_t marker = Memory::uint32_at(object->address());
+ if (marker == MarkCompactCollector::kSingleFreeEncoding) {
+ return kIntSize;
+ } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
+ return Memory::int_at(object->address() + kIntSize);
+ } else {
+ MapWord map_word = object->map_word();
+ Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
+ Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
+ return object->SizeFromMap(map);
}
- return object->SizeFromMap(object->map());
}
@@ -379,7 +400,6 @@ void Heap::GarbageCollectionPrologue() {
#endif // DEBUG
LiveObjectList::GCPrologue();
- store_buffer()->GCPrologue();
}
intptr_t Heap::SizeOfObjects() {
@@ -392,7 +412,6 @@ intptr_t Heap::SizeOfObjects() {
}
void Heap::GarbageCollectionEpilogue() {
- store_buffer()->GCEpilogue();
LiveObjectList::GCEpilogue();
#ifdef DEBUG
allow_allocation(true);
@@ -424,13 +443,13 @@ void Heap::GarbageCollectionEpilogue() {
}
-void Heap::CollectAllGarbage(int flags) {
+void Heap::CollectAllGarbage(bool force_compaction) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
- mark_compact_collector_.SetFlags(flags);
+ mark_compact_collector_.SetForceCompaction(force_compaction);
CollectGarbage(OLD_POINTER_SPACE);
- mark_compact_collector_.SetFlags(kNoGCFlags);
+ mark_compact_collector_.SetForceCompaction(false);
}
@@ -438,6 +457,8 @@ void Heap::CollectAllAvailableGarbage() {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
+ mark_compact_collector()->SetForceCompaction(true);
+
// Major GC would invoke weak handle callbacks on weakly reachable
// handles, but won't collect weakly reachable objects until next
// major GC. Therefore if we collect aggressively and weak handle callback
@@ -446,14 +467,13 @@ void Heap::CollectAllAvailableGarbage() {
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
- mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
const int kMaxNumberOfAttempts = 7;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
break;
}
}
- mark_compact_collector()->SetFlags(kNoGCFlags);
+ mark_compact_collector()->SetForceCompaction(false);
}
@@ -470,23 +490,6 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif
- if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Scavenge during marking.\n");
- }
- }
-
- if (collector == MARK_COMPACTOR &&
- !mark_compact_collector()->PreciseSweepingRequired() &&
- !incremental_marking()->IsStopped() &&
- !incremental_marking()->should_hurry() &&
- FLAG_incremental_marking_steps) {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
- }
- collector = SCAVENGER;
- }
-
bool next_gc_likely_to_collect_more = false;
{ GCTracer tracer(this);
@@ -509,24 +512,13 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
GarbageCollectionEpilogue();
}
- ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
- if (incremental_marking()->IsStopped()) {
- if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
- incremental_marking()->Start();
- }
- }
-
return next_gc_likely_to_collect_more;
}
void Heap::PerformScavenge() {
GCTracer tracer(this);
- if (incremental_marking()->IsStopped()) {
- PerformGarbageCollection(SCAVENGER, &tracer);
- } else {
- PerformGarbageCollection(MARK_COMPACTOR, &tracer);
- }
+ PerformGarbageCollection(SCAVENGER, &tracer);
}
@@ -618,6 +610,13 @@ void Heap::EnsureFromSpaceIsCommitted() {
// Committing memory to from space failed.
// Try shrinking and try again.
+ PagedSpaces spaces;
+ for (PagedSpace* space = spaces.next();
+ space != NULL;
+ space = spaces.next()) {
+ space->RelinkPageListInChunkOrder(true);
+ }
+
Shrink();
if (new_space_.CommitFromSpaceIfNeeded()) return;
@@ -648,10 +647,7 @@ void Heap::ClearJSFunctionResultCaches() {
void Heap::ClearNormalizedMapCaches() {
- if (isolate_->bootstrapper()->IsActive() &&
- !incremental_marking()->IsMarking()) {
- return;
- }
+ if (isolate_->bootstrapper()->IsActive()) return;
Object* context = global_contexts_list_;
while (!context->IsUndefined()) {
@@ -661,6 +657,24 @@ void Heap::ClearNormalizedMapCaches() {
}
+#ifdef DEBUG
+
+enum PageWatermarkValidity {
+ ALL_VALID,
+ ALL_INVALID
+};
+
+static void VerifyPageWatermarkValidity(PagedSpace* space,
+ PageWatermarkValidity validity) {
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ bool expected_value = (validity == ALL_VALID);
+ while (it.has_next()) {
+ Page* page = it.next();
+ ASSERT(page->IsWatermarkValid() == expected_value);
+ }
+}
+#endif
+
void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
double survival_rate =
(static_cast<double>(young_survivors_after_last_gc_) * 100) /
@@ -713,13 +727,6 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
int start_new_space_size = Heap::new_space()->SizeAsInt();
- if (IsHighSurvivalRate()) {
- // We speed up the incremental marker if it is running so that it
- // does not fall behind the rate of promotion, which would cause a
- // constantly growing old space.
- incremental_marking()->NotifyOfHighPromotionRate();
- }
-
if (collector == MARK_COMPACTOR) {
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
@@ -729,7 +736,11 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size);
- size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
+ intptr_t old_gen_size = PromotedSpaceSize();
+ old_gen_promotion_limit_ =
+ old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
+ old_gen_allocation_limit_ =
+ old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
@@ -739,16 +750,10 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// In this case we aggressively raise old generation memory limits to
// postpone subsequent mark-sweep collection and thus trade memory
// space for the mutation speed.
- old_gen_limit_factor_ = 2;
- } else {
- old_gen_limit_factor_ = 1;
+ old_gen_promotion_limit_ *= 2;
+ old_gen_allocation_limit_ *= 2;
}
- old_gen_promotion_limit_ =
- OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
- old_gen_allocation_limit_ =
- OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
-
old_gen_exhausted_ = false;
} else {
tracer_ = tracer;
@@ -777,7 +782,9 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
amount_of_external_allocated_memory_;
}
- GCCallbackFlags callback_flags = kNoGCCallbackFlags;
+ GCCallbackFlags callback_flags = tracer->is_compacting()
+ ? kGCCallbackFlagCompacted
+ : kNoGCCallbackFlags;
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
@@ -801,24 +808,34 @@ void Heap::MarkCompact(GCTracer* tracer) {
mark_compact_collector_.Prepare(tracer);
- ms_count_++;
- tracer->set_full_gc_count(ms_count_);
+ bool is_compacting = mark_compact_collector_.IsCompacting();
- MarkCompactPrologue();
+ if (is_compacting) {
+ mc_count_++;
+ } else {
+ ms_count_++;
+ }
+ tracer->set_full_gc_count(mc_count_ + ms_count_);
+ MarkCompactPrologue(is_compacting);
+
+ is_safe_to_read_maps_ = false;
mark_compact_collector_.CollectGarbage();
+ is_safe_to_read_maps_ = true;
LOG(isolate_, ResourceEvent("markcompact", "end"));
gc_state_ = NOT_IN_GC;
+ Shrink();
+
isolate_->counters()->objs_since_last_full()->Set(0);
contexts_disposed_ = 0;
}
-void Heap::MarkCompactPrologue() {
+void Heap::MarkCompactPrologue(bool is_compacting) {
// At any old GC clear the keyed lookup cache to enable collection of unused
// maps.
isolate_->keyed_lookup_cache()->Clear();
@@ -830,8 +847,7 @@ void Heap::MarkCompactPrologue() {
CompletelyClearInstanceofCache();
- // TODO(1605) select heuristic for flushing NumberString cache with
- // FlushNumberStringCache
+ if (is_compacting) FlushNumberStringCache();
if (FLAG_cleanup_code_caches_at_gc) {
polymorphic_code_cache()->set_cache(undefined_value());
}
@@ -841,8 +857,13 @@ void Heap::MarkCompactPrologue() {
Object* Heap::FindCodeObject(Address a) {
- return isolate()->inner_pointer_to_code_cache()->
- GcSafeFindCodeForInnerPointer(a);
+ Object* obj = NULL; // Initialization to please compiler.
+ { MaybeObject* maybe_obj = code_space_->FindObject(a);
+ if (!maybe_obj->ToObject(&obj)) {
+ obj = lo_space_->FindObject(a)->ToObjectUnchecked();
+ }
+ }
+ return obj;
}
@@ -890,18 +911,14 @@ static void VerifyNonPointerSpacePointers() {
// do not expect them.
VerifyNonPointerSpacePointersVisitor v;
HeapObjectIterator code_it(HEAP->code_space());
- for (HeapObject* object = code_it.Next();
- object != NULL; object = code_it.Next())
+ for (HeapObject* object = code_it.next();
+ object != NULL; object = code_it.next())
object->Iterate(&v);
- // The old data space was normally swept conservatively so that the iterator
- // doesn't work, so we normally skip the next bit.
- if (!HEAP->old_data_space()->was_swept_conservatively()) {
- HeapObjectIterator data_it(HEAP->old_data_space());
- for (HeapObject* object = data_it.Next();
- object != NULL; object = data_it.Next())
- object->Iterate(&v);
- }
+ HeapObjectIterator data_it(HEAP->old_data_space());
+ for (HeapObject* object = data_it.next();
+ object != NULL; object = data_it.next())
+ object->Iterate(&v);
}
#endif
@@ -923,64 +940,6 @@ static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
}
-void Heap::ScavengeStoreBufferCallback(
- Heap* heap,
- MemoryChunk* page,
- StoreBufferEvent event) {
- heap->store_buffer_rebuilder_.Callback(page, event);
-}
-
-
-void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
- if (event == kStoreBufferStartScanningPagesEvent) {
- start_of_current_page_ = NULL;
- current_page_ = NULL;
- } else if (event == kStoreBufferScanningPageEvent) {
- if (current_page_ != NULL) {
- // If this page already overflowed the store buffer during this iteration.
- if (current_page_->scan_on_scavenge()) {
- // Then we should wipe out the entries that have been added for it.
- store_buffer_->SetTop(start_of_current_page_);
- } else if (store_buffer_->Top() - start_of_current_page_ >=
- (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
- // Did we find too many pointers in the previous page? The heuristic is
- // that no page can take more then 1/5 the remaining slots in the store
- // buffer.
- current_page_->set_scan_on_scavenge(true);
- store_buffer_->SetTop(start_of_current_page_);
- } else {
- // In this case the page we scanned took a reasonable number of slots in
- // the store buffer. It has now been rehabilitated and is no longer
- // marked scan_on_scavenge.
- ASSERT(!current_page_->scan_on_scavenge());
- }
- }
- start_of_current_page_ = store_buffer_->Top();
- current_page_ = page;
- } else if (event == kStoreBufferFullEvent) {
- // The current page overflowed the store buffer again. Wipe out its entries
- // in the store buffer and mark it scan-on-scavenge again. This may happen
- // several times while scanning.
- if (current_page_ == NULL) {
- // Store Buffer overflowed while scanning promoted objects. These are not
- // in any particular page, though they are likely to be clustered by the
- // allocation routines.
- store_buffer_->HandleFullness();
- } else {
- // Store Buffer overflowed while scanning a particular old space page for
- // pointers to new space.
- ASSERT(current_page_ == page);
- ASSERT(page != NULL);
- current_page_->set_scan_on_scavenge(true);
- ASSERT(start_of_current_page_ != store_buffer_->Top());
- store_buffer_->SetTop(start_of_current_page_);
- }
- } else {
- UNREACHABLE();
- }
-}
-
-
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
@@ -988,6 +947,22 @@ void Heap::Scavenge() {
gc_state_ = SCAVENGE;
+ SwitchScavengingVisitorsTableIfProfilingWasEnabled();
+
+ Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
+#ifdef DEBUG
+ VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
+ VerifyPageWatermarkValidity(map_space_, ALL_VALID);
+#endif
+
+ // We do not update an allocation watermark of the top page during linear
+ // allocation to avoid overhead. So to maintain the watermark invariant
+ // we have to manually cache the watermark and mark the top page as having an
+ // invalid watermark. This guarantees that dirty regions iteration will use a
+ // correct watermark even if a linear allocation happens.
+ old_pointer_space_->FlushTopPageWatermark();
+ map_space_->FlushTopPageWatermark();
+
// Implements Cheney's copying algorithm
LOG(isolate_, ResourceEvent("scavenge", "begin"));
@@ -999,13 +974,6 @@ void Heap::Scavenge() {
CheckNewSpaceExpansionCriteria();
- SelectScavengingVisitorsTable();
-
- incremental_marking()->PrepareForScavenge();
-
- old_pointer_space()->AdvanceSweeper(new_space_.Size());
- old_data_space()->AdvanceSweeper(new_space_.Size());
-
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_.Flip();
@@ -1028,29 +996,32 @@ void Heap::Scavenge() {
// for the addresses of promoted objects: every object promoted
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
- Address new_space_front = new_space_.ToSpaceStart();
- promotion_queue_.Initialize(new_space_.ToSpaceEnd());
-
-#ifdef DEBUG
- store_buffer()->Clean();
-#endif
+ Address new_space_front = new_space_.ToSpaceLow();
+ promotion_queue_.Initialize(new_space_.ToSpaceHigh());
+ is_safe_to_read_maps_ = false;
ScavengeVisitor scavenge_visitor(this);
// Copy roots.
IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
- // Copy objects reachable from the old generation.
- {
- StoreBufferRebuildScope scope(this,
- store_buffer(),
- &ScavengeStoreBufferCallback);
- store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
- }
+ // Copy objects reachable from the old generation. By definition,
+ // there are no intergenerational pointers in code or data spaces.
+ IterateDirtyRegions(old_pointer_space_,
+ &Heap::IteratePointersInDirtyRegion,
+ &ScavengePointer,
+ WATERMARK_CAN_BE_INVALID);
+
+ IterateDirtyRegions(map_space_,
+ &IteratePointersInDirtyMapsRegion,
+ &ScavengePointer,
+ WATERMARK_CAN_BE_INVALID);
+
+ lo_space_->IterateDirtyRegions(&ScavengePointer);
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
- for (HeapObject* cell = cell_iterator.Next();
- cell != NULL; cell = cell_iterator.Next()) {
+ for (HeapObject* cell = cell_iterator.next();
+ cell != NULL; cell = cell_iterator.next()) {
if (cell->IsJSGlobalPropertyCell()) {
Address value_address =
reinterpret_cast<Address>(cell) +
@@ -1075,16 +1046,14 @@ void Heap::Scavenge() {
LiveObjectList::UpdateReferencesForScavengeGC();
isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
- incremental_marking()->UpdateMarkingDequeAfterScavenge();
ASSERT(new_space_front == new_space_.top());
+ is_safe_to_read_maps_ = true;
+
// Set age mark.
new_space_.set_age_mark(new_space_.top());
- new_space_.LowerInlineAllocationLimit(
- new_space_.inline_allocation_limit_step());
-
// Update how much has survived scavenge.
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
@@ -1143,56 +1112,35 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
}
-void Heap::UpdateReferencesInExternalStringTable(
- ExternalStringTableUpdaterCallback updater_func) {
-
- // Update old space string references.
- if (external_string_table_.old_space_strings_.length() > 0) {
- Object** start = &external_string_table_.old_space_strings_[0];
- Object** end = start + external_string_table_.old_space_strings_.length();
- for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
- }
-
- UpdateNewSpaceReferencesInExternalStringTable(updater_func);
-}
-
-
static Object* ProcessFunctionWeakReferences(Heap* heap,
Object* function,
WeakObjectRetainer* retainer) {
- Object* undefined = heap->undefined_value();
- Object* head = undefined;
+ Object* head = heap->undefined_value();
JSFunction* tail = NULL;
Object* candidate = function;
- while (candidate != undefined) {
+ while (candidate != heap->undefined_value()) {
// Check whether to keep the candidate in the list.
JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
Object* retain = retainer->RetainAs(candidate);
if (retain != NULL) {
- if (head == undefined) {
+ if (head == heap->undefined_value()) {
// First element in the list.
- head = retain;
+ head = candidate_function;
} else {
// Subsequent elements in the list.
ASSERT(tail != NULL);
- tail->set_next_function_link(retain);
+ tail->set_next_function_link(candidate_function);
}
// Retained function is new tail.
- candidate_function = reinterpret_cast<JSFunction*>(retain);
tail = candidate_function;
-
- ASSERT(retain->IsUndefined() || retain->IsJSFunction());
-
- if (retain == undefined) break;
}
-
// Move to next element in the list.
candidate = candidate_function->next_function_link();
}
// Terminate the list if there is one or more elements.
if (tail != NULL) {
- tail->set_next_function_link(undefined);
+ tail->set_next_function_link(heap->undefined_value());
}
return head;
@@ -1200,32 +1148,28 @@ static Object* ProcessFunctionWeakReferences(Heap* heap,
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
- Object* undefined = undefined_value();
- Object* head = undefined;
+ Object* head = undefined_value();
Context* tail = NULL;
Object* candidate = global_contexts_list_;
- while (candidate != undefined) {
+ while (candidate != undefined_value()) {
// Check whether to keep the candidate in the list.
Context* candidate_context = reinterpret_cast<Context*>(candidate);
Object* retain = retainer->RetainAs(candidate);
if (retain != NULL) {
- if (head == undefined) {
+ if (head == undefined_value()) {
// First element in the list.
- head = retain;
+ head = candidate_context;
} else {
// Subsequent elements in the list.
ASSERT(tail != NULL);
tail->set_unchecked(this,
Context::NEXT_CONTEXT_LINK,
- retain,
+ candidate_context,
UPDATE_WRITE_BARRIER);
}
// Retained context is new tail.
- candidate_context = reinterpret_cast<Context*>(retain);
tail = candidate_context;
- if (retain == undefined) break;
-
// Process the weak list of optimized functions for the context.
Object* function_list_head =
ProcessFunctionWeakReferences(
@@ -1237,7 +1181,6 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
function_list_head,
UPDATE_WRITE_BARRIER);
}
-
// Move to next element in the list.
candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
}
@@ -1269,45 +1212,35 @@ class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front) {
do {
- SemiSpace::AssertValidRange(new_space_front, new_space_.top());
+ ASSERT(new_space_front <= new_space_.top());
+
// The addresses new_space_front and new_space_.top() define a
// queue of unprocessed copied objects. Process them until the
// queue is empty.
- while (new_space_front != new_space_.top()) {
- if (!NewSpacePage::IsAtEnd(new_space_front)) {
- HeapObject* object = HeapObject::FromAddress(new_space_front);
- new_space_front +=
- NewSpaceScavenger::IterateBody(object->map(), object);
- } else {
- new_space_front =
- NewSpacePage::FromLimit(new_space_front)->next_page()->body();
- }
+ while (new_space_front < new_space_.top()) {
+ HeapObject* object = HeapObject::FromAddress(new_space_front);
+ new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
}
// Promote and process all the to-be-promoted objects.
- {
- StoreBufferRebuildScope scope(this,
- store_buffer(),
- &ScavengeStoreBufferCallback);
- while (!promotion_queue()->is_empty()) {
- HeapObject* target;
- int size;
- promotion_queue()->remove(&target, &size);
-
- // Promoted object might be already partially visited
- // during old space pointer iteration. Thus we search specificly
- // for pointers to from semispace instead of looking for pointers
- // to new space.
- ASSERT(!target->IsMap());
- IterateAndMarkPointersToFromSpace(target->address(),
- target->address() + size,
- &ScavengeObject);
- }
+ while (!promotion_queue_.is_empty()) {
+ HeapObject* target;
+ int size;
+ promotion_queue_.remove(&target, &size);
+
+ // Promoted object might be already partially visited
+ // during dirty regions iteration. Thus we search specificly
+ // for pointers to from semispace instead of looking for pointers
+ // to new space.
+ ASSERT(!target->IsMap());
+ IterateAndMarkPointersToFromSpace(target->address(),
+ target->address() + size,
+ &ScavengePointer);
}
// Take another spin if there are now unswept objects in new space
// (there are currently no more unswept promoted objects).
- } while (new_space_front != new_space_.top());
+ } while (new_space_front < new_space_.top());
return new_space_front;
}
@@ -1319,11 +1252,26 @@ enum LoggingAndProfiling {
};
-enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
+typedef void (*ScavengingCallback)(Map* map,
+ HeapObject** slot,
+ HeapObject* object);
+
+
+static Atomic32 scavenging_visitors_table_mode_;
+static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+
+
+INLINE(static void DoScavengeObject(Map* map,
+ HeapObject** slot,
+ HeapObject* obj));
-template<MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
+void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
+ scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
+}
+
+
+template<LoggingAndProfiling logging_and_profiling_mode>
class ScavengingVisitor : public StaticVisitorBase {
public:
static void Initialize() {
@@ -1358,13 +1306,9 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
- if (marks_handling == IGNORE_MARKS) {
- table_.Register(kVisitJSFunction,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<JSFunction::kSize>);
- } else {
- table_.Register(kVisitJSFunction, &EvacuateJSFunction);
- }
+ table_.Register(kVisitJSFunction,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::
+ template VisitSpecialized<JSFunction::kSize>);
table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
kVisitDataObject,
@@ -1429,15 +1373,10 @@ class ScavengingVisitor : public StaticVisitorBase {
}
}
- if (marks_handling == TRANSFER_MARKS) {
- if (Marking::TransferColor(source, target)) {
- MemoryChunk::IncrementLiveBytes(target->address(), size);
- }
- }
-
return target;
}
+
template<ObjectContents object_contents, SizeRestriction size_restriction>
static inline void EvacuateObject(Map* map,
HeapObject** slot,
@@ -1447,14 +1386,13 @@ class ScavengingVisitor : public StaticVisitorBase {
(object_size <= Page::kMaxHeapObjectSize));
ASSERT(object->Size() == object_size);
- Heap* heap = map->GetHeap();
+ Heap* heap = map->heap();
if (heap->ShouldBePromoted(object->address(), object_size)) {
MaybeObject* maybe_result;
if ((size_restriction != SMALL) &&
(object_size > Page::kMaxHeapObjectSize)) {
- maybe_result = heap->lo_space()->AllocateRaw(object_size,
- NOT_EXECUTABLE);
+ maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
} else {
if (object_contents == DATA_OBJECT) {
maybe_result = heap->old_data_space()->AllocateRaw(object_size);
@@ -1476,36 +1414,13 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
}
- MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
- Object* result = allocation->ToObjectUnchecked();
-
+ Object* result =
+ heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
*slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
return;
}
- static inline void EvacuateJSFunction(Map* map,
- HeapObject** slot,
- HeapObject* object) {
- ObjectEvacuationStrategy<POINTER_OBJECT>::
- template VisitSpecialized<JSFunction::kSize>(map, slot, object);
-
- HeapObject* target = *slot;
- MarkBit mark_bit = Marking::MarkBitFrom(target);
- if (Marking::IsBlack(mark_bit)) {
- // This object is black and it might not be rescanned by marker.
- // We should explicitly record code entry slot for compaction because
- // promotion queue processing (IterateAndMarkPointersToFromSpace) will
- // miss it as it is not HeapObject-tagged.
- Address code_entry_slot =
- target->address() + JSFunction::kCodeEntryOffset;
- Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
- map->GetHeap()->mark_compact_collector()->
- RecordCodeEntrySlot(code_entry_slot, code);
- }
- }
-
-
static inline void EvacuateFixedArray(Map* map,
HeapObject** slot,
HeapObject* object) {
@@ -1564,17 +1479,14 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* object) {
ASSERT(IsShortcutCandidate(map->instance_type()));
- Heap* heap = map->GetHeap();
-
- if (marks_handling == IGNORE_MARKS &&
- ConsString::cast(object)->unchecked_second() ==
- heap->empty_string()) {
+ if (ConsString::cast(object)->unchecked_second() ==
+ map->heap()->empty_string()) {
HeapObject* first =
HeapObject::cast(ConsString::cast(object)->unchecked_first());
*slot = first;
- if (!heap->InNewSpace(first)) {
+ if (!map->heap()->InNewSpace(first)) {
object->set_map_word(MapWord::FromForwardingAddress(first));
return;
}
@@ -1588,7 +1500,7 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
- heap->DoScavengeObject(first->map(), slot, first);
+ DoScavengeObject(first->map(), slot, first);
object->set_map_word(MapWord::FromForwardingAddress(*slot));
return;
}
@@ -1619,60 +1531,45 @@ class ScavengingVisitor : public StaticVisitorBase {
};
-template<MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
+template<LoggingAndProfiling logging_and_profiling_mode>
VisitorDispatchTable<ScavengingCallback>
- ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
+ ScavengingVisitor<logging_and_profiling_mode>::table_;
static void InitializeScavengingVisitorsTables() {
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
+ ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
+ ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
+ scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
}
-void Heap::SelectScavengingVisitorsTable() {
- bool logging_and_profiling =
- isolate()->logger()->is_logging() ||
+void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
+ if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
+ // Table was already updated by some isolate.
+ return;
+ }
+
+ if (isolate()->logger()->is_logging() |
CpuProfiler::is_profiling(isolate()) ||
(isolate()->heap_profiler() != NULL &&
- isolate()->heap_profiler()->is_profiling());
-
- if (!incremental_marking()->IsMarking()) {
- if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::GetTable());
- } else {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::GetTable());
- }
- } else {
- if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::GetTable());
- } else {
- scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::GetTable());
- }
+ isolate()->heap_profiler()->is_profiling())) {
+ // If one of the isolates is doing scavenge at this moment of time
+ // it might see this table in an inconsitent state when
+ // some of the callbacks point to
+ // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
+ // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
+ // However this does not lead to any bugs as such isolate does not have
+ // profiling enabled and any isolate with enabled profiling is guaranteed
+ // to see the table in the consistent state.
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
- if (incremental_marking()->IsCompacting()) {
- // When compacting forbid short-circuiting of cons-strings.
- // Scavenging code relies on the fact that new space object
- // can't be evacuated into evacuation candidate but
- // short-circuiting violates this assumption.
- scavenging_visitors_table_.Register(
- StaticVisitorBase::kVisitShortcutCandidate,
- scavenging_visitors_table_.GetVisitorById(
- StaticVisitorBase::kVisitConsString));
- }
+ // We use Release_Store to prevent reordering of this write before writes
+ // to the table.
+ Release_Store(&scavenging_visitors_table_mode_,
+ LOGGING_AND_PROFILING_ENABLED);
}
}
@@ -1682,7 +1579,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
MapWord first_word = object->map_word();
ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
- map->GetHeap()->DoScavengeObject(map, p, object);
+ DoScavengeObject(map, p, object);
}
@@ -1708,9 +1605,7 @@ MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
}
-MaybeObject* Heap::AllocateMap(InstanceType instance_type,
- int instance_size,
- ElementsKind elements_kind) {
+MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
Object* result;
{ MaybeObject* maybe_result = AllocateRawMap();
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -1732,7 +1627,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
- map->set_elements_kind(elements_kind);
+ map->set_elements_kind(FAST_ELEMENTS);
// If the map object is aligned fill the padding area with Smi 0 objects.
if (Map::kPadStart < Map::kSize) {
@@ -1812,19 +1707,12 @@ bool Heap::CreateInitialMaps() {
}
set_empty_fixed_array(FixedArray::cast(obj));
- { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
+ { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_null_value(Oddball::cast(obj));
+ set_null_value(obj);
Oddball::cast(obj)->set_kind(Oddball::kNull);
- { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_undefined_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kUndefined);
- ASSERT(!InNewSpace(undefined_value()));
-
// Allocate the empty descriptor array.
{ MaybeObject* maybe_obj = AllocateEmptyFixedArray();
if (!maybe_obj->ToObject(&obj)) return false;
@@ -1910,12 +1798,6 @@ bool Heap::CreateInitialMaps() {
}
set_byte_array_map(Map::cast(obj));
- { MaybeObject* maybe_obj =
- AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_free_space_map(Map::cast(obj));
-
{ MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -2116,7 +1998,7 @@ MaybeObject* Heap::CreateOddball(const char* to_string,
Object* to_number,
byte kind) {
Object* result;
- { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
+ { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
return Oddball::cast(result)->Initialize(to_string, to_number, kind);
@@ -2129,13 +2011,7 @@ bool Heap::CreateApiObjects() {
{ MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
- // Don't use Smi-only elements optimizations for objects with the neander
- // map. There are too many cases where element values are set directly with a
- // bottleneck to trap the Smi-only -> fast elements transition, and there
- // appears to be no benefit for optimize this case.
- Map* new_neander_map = Map::cast(obj);
- new_neander_map->set_elements_kind(FAST_ELEMENTS);
- set_neander_map(new_neander_map);
+ set_neander_map(Map::cast(obj));
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
if (!maybe_obj->ToObject(&obj)) return false;
@@ -2180,12 +2056,6 @@ void Heap::CreateFixedStubs() {
// To workaround the problem, make separate functions without inlining.
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
-
- // Create stubs that should be there, so we don't unexpectedly have to
- // create them if we need them during the creation of another stub.
- // Stub creation mixes raw pointers and handles in an unsafe manner so
- // we cannot create stubs while we are creating stubs.
- CodeStub::GenerateStubsAheadOfTime();
}
@@ -2196,18 +2066,20 @@ bool Heap::CreateInitialObjects() {
{ MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_minus_zero_value(HeapNumber::cast(obj));
+ set_minus_zero_value(obj);
ASSERT(signbit(minus_zero_value()->Number()) != 0);
{ MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_nan_value(HeapNumber::cast(obj));
+ set_nan_value(obj);
- { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
+ { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_infinity_value(HeapNumber::cast(obj));
+ set_undefined_value(obj);
+ Oddball::cast(obj)->set_kind(Oddball::kUndefined);
+ ASSERT(!InNewSpace(undefined_value()));
// Allocate initial symbol table.
{ MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
@@ -2216,17 +2088,19 @@ bool Heap::CreateInitialObjects() {
// Don't use set_symbol_table() due to asserts.
roots_[kSymbolTableRootIndex] = obj;
- // Finish initializing oddballs after creating symboltable.
- { MaybeObject* maybe_obj =
- undefined_value()->Initialize("undefined",
- nan_value(),
- Oddball::kUndefined);
- if (!maybe_obj->ToObject(&obj)) return false;
+ // Assign the print strings for oddballs after creating symboltable.
+ Object* symbol;
+ { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
+ if (!maybe_symbol->ToObject(&symbol)) return false;
}
+ Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
+ Oddball::cast(undefined_value())->set_to_number(nan_value());
- // Initialize the null_value.
+ // Allocate the null_value
{ MaybeObject* maybe_obj =
- null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
+ Oddball::cast(null_value())->Initialize("null",
+ Smi::FromInt(0),
+ Oddball::kNull);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -2235,51 +2109,43 @@ bool Heap::CreateInitialObjects() {
Oddball::kTrue);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_true_value(Oddball::cast(obj));
+ set_true_value(obj);
{ MaybeObject* maybe_obj = CreateOddball("false",
Smi::FromInt(0),
Oddball::kFalse);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_false_value(Oddball::cast(obj));
+ set_false_value(obj);
{ MaybeObject* maybe_obj = CreateOddball("hole",
Smi::FromInt(-1),
Oddball::kTheHole);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_the_hole_value(Oddball::cast(obj));
+ set_the_hole_value(obj);
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
- Smi::FromInt(-2),
+ Smi::FromInt(-4),
Oddball::kArgumentMarker);
if (!maybe_obj->ToObject(&obj)) return false;
}
- set_arguments_marker(Oddball::cast(obj));
+ set_arguments_marker(obj);
{ MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
- Smi::FromInt(-3),
+ Smi::FromInt(-2),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_no_interceptor_result_sentinel(obj);
{ MaybeObject* maybe_obj = CreateOddball("termination_exception",
- Smi::FromInt(-4),
+ Smi::FromInt(-3),
Oddball::kOther);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_termination_exception(obj);
- { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
- Smi::FromInt(-5),
- Oddball::kOther);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_frame_alignment_marker(Oddball::cast(obj));
- STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
-
// Allocate the empty string.
{ MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
@@ -2556,15 +2422,6 @@ MaybeObject* Heap::NumberToString(Object* number,
}
-MaybeObject* Heap::Uint32ToString(uint32_t value,
- bool check_number_string_cache) {
- Object* number;
- MaybeObject* maybe = NumberFromUint32(value);
- if (!maybe->To<Object>(&number)) return maybe;
- return NumberToString(number, check_number_string_cache);
-}
-
-
Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
}
@@ -2880,23 +2737,25 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
// Make an attempt to flatten the buffer to reduce access time.
buffer = buffer->TryFlattenGetString();
+ // TODO(1626): For now slicing external strings is not supported. However,
+ // a flat cons string can have an external string as first part in some cases.
+ // Therefore we have to single out this case as well.
if (!FLAG_string_slices ||
- !buffer->IsFlat() ||
+ (buffer->IsConsString() &&
+ (!buffer->IsFlat() ||
+ !ConsString::cast(buffer)->first()->IsSeqString())) ||
+ buffer->IsExternalString() ||
length < SlicedString::kMinLength ||
pretenure == TENURED) {
Object* result;
- // WriteToFlat takes care of the case when an indirect string has a
- // different encoding from its underlying string. These encodings may
- // differ because of externalization.
- bool is_ascii = buffer->IsAsciiRepresentation();
- { MaybeObject* maybe_result = is_ascii
- ? AllocateRawAsciiString(length, pretenure)
- : AllocateRawTwoByteString(length, pretenure);
+ { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
+ ? AllocateRawAsciiString(length, pretenure)
+ : AllocateRawTwoByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
String* string_result = String::cast(result);
// Copy the characters into the new object.
- if (is_ascii) {
+ if (buffer->IsAsciiRepresentation()) {
ASSERT(string_result->IsAsciiRepresentation());
char* dest = SeqAsciiString::cast(string_result)->GetChars();
String::WriteToFlat(buffer, dest, start, end);
@@ -2909,17 +2768,12 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
}
ASSERT(buffer->IsFlat());
+ ASSERT(!buffer->IsExternalString());
#if DEBUG
buffer->StringVerify();
#endif
Object* result;
- // When slicing an indirect string we use its encoding for a newly created
- // slice and don't check the encoding of the underlying string. This is safe
- // even if the encodings are different because of externalization. If an
- // indirect ASCII string is pointing to a two-byte string, the two-byte char
- // codes of the underlying string must still fit into ASCII (because
- // externalization must not change char codes).
{ Map* map = buffer->IsAsciiRepresentation()
? sliced_ascii_string_map()
: sliced_string_map();
@@ -2945,14 +2799,13 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
sliced_string->set_parent(buffer);
sliced_string->set_offset(start);
}
- ASSERT(sliced_string->parent()->IsSeqString() ||
- sliced_string->parent()->IsExternalString());
+ ASSERT(sliced_string->parent()->IsSeqString());
return result;
}
MaybeObject* Heap::AllocateExternalStringFromAscii(
- const ExternalAsciiString::Resource* resource) {
+ ExternalAsciiString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
isolate()->context()->mark_out_of_memory();
@@ -2975,7 +2828,7 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
MaybeObject* Heap::AllocateExternalStringFromTwoByte(
- const ExternalTwoByteString::Resource* resource) {
+ ExternalTwoByteString::Resource* resource) {
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
isolate()->context()->mark_out_of_memory();
@@ -3039,7 +2892,7 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Object* result;
{ MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
? old_data_space_->AllocateRaw(size)
- : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
+ : lo_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -3075,8 +2928,8 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
} else if (size == 2 * kPointerSize) {
filler->set_map(two_pointer_filler_map());
} else {
- filler->set_map(free_space_map());
- FreeSpace::cast(filler)->set_size(size);
+ filler->set_map(byte_array_map());
+ ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
}
}
@@ -3122,7 +2975,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
// Large code objects and code objects which should stay at a fixed address
// are allocated in large object space.
if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
- maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
+ maybe_result = lo_space_->AllocateRawCode(obj_size);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
@@ -3167,7 +3020,7 @@ MaybeObject* Heap::CopyCode(Code* code) {
int obj_size = code->Size();
MaybeObject* maybe_result;
if (obj_size > MaxObjectSizeInPagedSpace()) {
- maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
+ maybe_result = lo_space_->AllocateRawCode(obj_size);
} else {
maybe_result = code_space_->AllocateRaw(obj_size);
}
@@ -3210,7 +3063,7 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
MaybeObject* maybe_result;
if (new_obj_size > MaxObjectSizeInPagedSpace()) {
- maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
+ maybe_result = lo_space_->AllocateRawCode(new_obj_size);
} else {
maybe_result = code_space_->AllocateRaw(new_obj_size);
}
@@ -3259,9 +3112,9 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
}
-void Heap::InitializeFunction(JSFunction* function,
- SharedFunctionInfo* shared,
- Object* prototype) {
+MaybeObject* Heap::InitializeFunction(JSFunction* function,
+ SharedFunctionInfo* shared,
+ Object* prototype) {
ASSERT(!prototype->IsMap());
function->initialize_properties();
function->initialize_elements();
@@ -3271,6 +3124,7 @@ void Heap::InitializeFunction(JSFunction* function,
function->set_context(undefined_value());
function->set_literals(empty_fixed_array());
function->set_next_function_link(undefined_value());
+ return function;
}
@@ -3280,18 +3134,8 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
// different context.
JSFunction* object_function =
function->context()->global_context()->object_function();
-
- // Each function prototype gets a copy of the object function map.
- // This avoid unwanted sharing of maps between prototypes of different
- // constructors.
- Map* new_map;
- ASSERT(object_function->has_initial_map());
- { MaybeObject* maybe_map =
- object_function->initial_map()->CopyDropTransitions();
- if (!maybe_map->To<Map>(&new_map)) return maybe_map;
- }
Object* prototype;
- { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
+ { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
}
// When creating the prototype for the function we must set its
@@ -3316,8 +3160,7 @@ MaybeObject* Heap::AllocateFunction(Map* function_map,
{ MaybeObject* maybe_result = Allocate(function_map, space);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- InitializeFunction(JSFunction::cast(result), shared, prototype);
- return result;
+ return InitializeFunction(JSFunction::cast(result), shared, prototype);
}
@@ -3487,9 +3330,6 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
// We cannot always fill with one_pointer_filler_map because objects
// created from API functions expect their internal fields to be initialized
// with undefined_value.
- // Pre-allocated fields need to be initialized with undefined_value as well
- // so that object accesses before the constructor completes (e.g. in the
- // debugger) will not cause a crash.
if (map->constructor()->IsJSFunction() &&
JSFunction::cast(map->constructor())->shared()->
IsInobjectSlackTrackingInProgress()) {
@@ -3499,7 +3339,7 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
} else {
filler = Heap::undefined_value();
}
- obj->InitializeBody(map, Heap::undefined_value(), filler);
+ obj->InitializeBody(map->instance_size(), filler);
}
@@ -3537,8 +3377,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
- ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
- JSObject::cast(obj)->HasFastElements());
+ ASSERT(JSObject::cast(obj)->HasFastElements());
return obj;
}
@@ -3581,7 +3420,6 @@ MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
result->InitializeBody(map->instance_size(), Smi::FromInt(0));
result->set_handler(handler);
- result->set_hash(undefined_value());
return result;
}
@@ -3605,7 +3443,6 @@ MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
result->InitializeBody(map->instance_size(), Smi::FromInt(0));
result->set_handler(handler);
- result->set_hash(undefined_value());
result->set_call_trap(call_trap);
result->set_construct_trap(construct_trap);
return result;
@@ -3722,7 +3559,6 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
object_size);
}
- ASSERT(JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
@@ -3755,13 +3591,13 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
MaybeObject* Heap::ReinitializeJSReceiver(
JSReceiver* object, InstanceType type, int size) {
- ASSERT(type >= FIRST_JS_OBJECT_TYPE);
+ ASSERT(type >= FIRST_JS_RECEIVER_TYPE);
// Allocate fresh map.
// TODO(rossberg): Once we optimize proxies, cache these maps.
Map* map;
- MaybeObject* maybe = AllocateMap(type, size);
- if (!maybe->To<Map>(&map)) return maybe;
+ MaybeObject* maybe_map_obj = AllocateMap(type, size);
+ if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
// Check that the receiver has at least the size of the fresh object.
int size_difference = object->map()->instance_size() - map->instance_size();
@@ -3772,35 +3608,30 @@ MaybeObject* Heap::ReinitializeJSReceiver(
// Allocate the backing storage for the properties.
int prop_size = map->unused_property_fields() - map->inobject_properties();
Object* properties;
- maybe = AllocateFixedArray(prop_size, TENURED);
- if (!maybe->ToObject(&properties)) return maybe;
-
- // Functions require some allocation, which might fail here.
- SharedFunctionInfo* shared = NULL;
- if (type == JS_FUNCTION_TYPE) {
- String* name;
- maybe = LookupAsciiSymbol("<freezing call trap>");
- if (!maybe->To<String>(&name)) return maybe;
- maybe = AllocateSharedFunctionInfo(name);
- if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
+ { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
+ if (!maybe_properties->ToObject(&properties)) return maybe_properties;
}
- // Because of possible retries of this function after failure,
- // we must NOT fail after this point, where we have changed the type!
-
// Reset the map for the object.
object->set_map(map);
- JSObject* jsobj = JSObject::cast(object);
// Reinitialize the object from the constructor map.
- InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
+ InitializeJSObjectFromMap(JSObject::cast(object),
+ FixedArray::cast(properties), map);
// Functions require some minimal initialization.
if (type == JS_FUNCTION_TYPE) {
- map->set_function_with_prototype(true);
- InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
- JSFunction::cast(object)->set_context(
- isolate()->context()->global_context());
+ String* name;
+ MaybeObject* maybe_name = LookupAsciiSymbol("<freezing call trap>");
+ if (!maybe_name->To<String>(&name)) return maybe_name;
+ SharedFunctionInfo* shared;
+ MaybeObject* maybe_shared = AllocateSharedFunctionInfo(name);
+ if (!maybe_shared->To<SharedFunctionInfo>(&shared)) return maybe_shared;
+ JSFunction* func;
+ MaybeObject* maybe_func =
+ InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
+ if (!maybe_func->To<JSFunction>(&func)) return maybe_func;
+ func->set_context(isolate()->context()->global_context());
}
// Put in filler if the new object is smaller than the old.
@@ -3983,7 +3814,7 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
+ ? lo_space_->AllocateRaw(size)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -4100,7 +3931,7 @@ MaybeObject* Heap::AllocateRawFixedArray(int length) {
int size = FixedArray::SizeFor(length);
return size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
+ : lo_space_->AllocateRawFixedArray(size);
}
@@ -4431,21 +4262,6 @@ STRUCT_LIST(MAKE_CASE)
}
-bool Heap::IsHeapIterable() {
- return (!old_pointer_space()->was_swept_conservatively() &&
- !old_data_space()->was_swept_conservatively());
-}
-
-
-void Heap::EnsureHeapIsIterable() {
- ASSERT(IsAllocationAllowed());
- if (!IsHeapIterable()) {
- CollectAllGarbage(kMakeHeapIterableMask);
- }
- ASSERT(IsHeapIterable());
-}
-
-
bool Heap::IdleNotification() {
static const int kIdlesBeforeScavenge = 4;
static const int kIdlesBeforeMarkSweep = 7;
@@ -4476,7 +4292,7 @@ bool Heap::IdleNotification() {
if (number_idle_notifications_ == kIdlesBeforeScavenge) {
if (contexts_disposed_ > 0) {
HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(false);
} else {
CollectGarbage(NEW_SPACE);
}
@@ -4488,12 +4304,12 @@ bool Heap::IdleNotification() {
// generated code for cached functions.
isolate_->compilation_cache()->Clear();
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(false);
new_space_.Shrink();
last_idle_notification_gc_count_ = gc_count_;
} else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(true);
new_space_.Shrink();
last_idle_notification_gc_count_ = gc_count_;
number_idle_notifications_ = 0;
@@ -4503,7 +4319,7 @@ bool Heap::IdleNotification() {
contexts_disposed_ = 0;
} else {
HistogramTimerScope scope(isolate_->counters()->gc_context());
- CollectAllGarbage(kNoGCFlags);
+ CollectAllGarbage(false);
last_idle_notification_gc_count_ = gc_count_;
}
// If this is the first idle notification, we reset the
@@ -4523,11 +4339,8 @@ bool Heap::IdleNotification() {
// Make sure that we have no pending context disposals and
// conditionally uncommit from space.
- // Take into account that we might have decided to delay full collection
- // because incremental marking is in progress.
- ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
+ ASSERT(contexts_disposed_ == 0);
if (uncommit) UncommitFromSpace();
-
return finished;
}
@@ -4561,11 +4374,11 @@ void Heap::ReportHeapStatistics(const char* title) {
USE(title);
PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
title, gc_count_);
+ PrintF("mark-compact GC : %d\n", mc_count_);
PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
old_gen_promotion_limit_);
PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
old_gen_allocation_limit_);
- PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
@@ -4642,18 +4455,69 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
#ifdef DEBUG
+static void DummyScavengePointer(HeapObject** p) {
+}
+
+
+static void VerifyPointersUnderWatermark(
+ PagedSpace* space,
+ DirtyRegionCallback visit_dirty_region) {
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+
+ while (it.has_next()) {
+ Page* page = it.next();
+ Address start = page->ObjectAreaStart();
+ Address end = page->AllocationWatermark();
+
+ HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
+ start,
+ end,
+ visit_dirty_region,
+ &DummyScavengePointer);
+ }
+}
+
+
+static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
+ LargeObjectIterator it(space);
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
+ if (object->IsFixedArray()) {
+ Address slot_address = object->address();
+ Address end = object->address() + object->Size();
+
+ while (slot_address < end) {
+ HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
+ // When we are not in GC the Heap::InNewSpace() predicate
+ // checks that pointers which satisfy predicate point into
+ // the active semispace.
+ HEAP->InNewSpace(*slot);
+ slot_address += kPointerSize;
+ }
+ }
+ }
+}
+
+
void Heap::Verify() {
ASSERT(HasBeenSetup());
- store_buffer()->Verify();
-
VerifyPointersVisitor visitor;
IterateRoots(&visitor, VISIT_ONLY_STRONG);
new_space_.Verify();
- old_pointer_space_->Verify(&visitor);
- map_space_->Verify(&visitor);
+ VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
+ old_pointer_space_->Verify(&dirty_regions_visitor);
+ map_space_->Verify(&dirty_regions_visitor);
+
+ VerifyPointersUnderWatermark(old_pointer_space_,
+ &IteratePointersInDirtyRegion);
+ VerifyPointersUnderWatermark(map_space_,
+ &IteratePointersInDirtyMapsRegion);
+ VerifyPointersUnderWatermark(lo_space_);
+
+ VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
+ VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
VerifyPointersVisitor no_dirty_regions_visitor;
old_data_space_->Verify(&no_dirty_regions_visitor);
@@ -4662,7 +4526,6 @@ void Heap::Verify() {
lo_space_->Verify();
}
-
#endif // DEBUG
@@ -4758,221 +4621,275 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
#ifdef DEBUG
void Heap::ZapFromSpace() {
- NewSpacePageIterator it(new_space_.FromSpaceStart(),
- new_space_.FromSpaceEnd());
- while (it.has_next()) {
- NewSpacePage* page = it.next();
- for (Address cursor = page->body(), limit = page->body_limit();
- cursor < limit;
- cursor += kPointerSize) {
- Memory::Address_at(cursor) = kFromSpaceZapValue;
- }
+ ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
+ for (Address a = new_space_.FromSpaceLow();
+ a < new_space_.FromSpaceHigh();
+ a += kPointerSize) {
+ Memory::Address_at(a) = kFromSpaceZapValue;
}
}
#endif // DEBUG
-void Heap::IterateAndMarkPointersToFromSpace(Address start,
- Address end,
- ObjectSlotCallback callback) {
+bool Heap::IteratePointersInDirtyRegion(Heap* heap,
+ Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func) {
Address slot_address = start;
-
- // We are not collecting slots on new space objects during mutation
- // thus we have to scan for pointers to evacuation candidates when we
- // promote objects. But we should not record any slots in non-black
- // objects. Grey object's slots would be rescanned.
- // White object might not survive until the end of collection
- // it would be a violation of the invariant to record it's slots.
- bool record_slots = false;
- if (incremental_marking()->IsCompacting()) {
- MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
- record_slots = Marking::IsBlack(mark_bit);
- }
+ bool pointers_to_new_space_found = false;
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* object = *slot;
- // If the store buffer becomes overfull we mark pages as being exempt from
- // the store buffer. These pages are scanned to find pointers that point
- // to the new space. In that case we may hit newly promoted objects and
- // fix the pointers before the promotion queue gets to them. Thus the 'if'.
- if (object->IsHeapObject()) {
- if (Heap::InFromSpace(object)) {
- callback(reinterpret_cast<HeapObject**>(slot),
- HeapObject::cast(object));
- Object* new_object = *slot;
- if (InNewSpace(new_object)) {
- ASSERT(Heap::InToSpace(new_object));
- ASSERT(new_object->IsHeapObject());
- store_buffer_.EnterDirectlyIntoStoreBuffer(
- reinterpret_cast<Address>(slot));
- }
- ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
- } else if (record_slots &&
- MarkCompactCollector::IsOnEvacuationCandidate(object)) {
- mark_compact_collector()->RecordSlot(slot, slot, object);
+ if (heap->InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ copy_object_func(reinterpret_cast<HeapObject**>(slot));
+ if (heap->InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ pointers_to_new_space_found = true;
}
}
slot_address += kPointerSize;
}
+ return pointers_to_new_space_found;
}
-#ifdef DEBUG
-typedef bool (*CheckStoreBufferFilter)(Object** addr);
+// Compute start address of the first map following given addr.
+static inline Address MapStartAlign(Address addr) {
+ Address page = Page::FromAddress(addr)->ObjectAreaStart();
+ return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
+}
-bool IsAMapPointerAddress(Object** addr) {
- uintptr_t a = reinterpret_cast<uintptr_t>(addr);
- int mod = a % Map::kSize;
- return mod >= Map::kPointerFieldsBeginOffset &&
- mod < Map::kPointerFieldsEndOffset;
+// Compute end address of the first map preceding given addr.
+static inline Address MapEndAlign(Address addr) {
+ Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
+ return page + ((addr - page) / Map::kSize * Map::kSize);
}
-bool EverythingsAPointer(Object** addr) {
- return true;
-}
+static bool IteratePointersInDirtyMaps(Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func) {
+ ASSERT(MapStartAlign(start) == start);
+ ASSERT(MapEndAlign(end) == end);
+ Address map_address = start;
+ bool pointers_to_new_space_found = false;
-static void CheckStoreBuffer(Heap* heap,
- Object** current,
- Object** limit,
- Object**** store_buffer_position,
- Object*** store_buffer_top,
- CheckStoreBufferFilter filter,
- Address special_garbage_start,
- Address special_garbage_end) {
- Map* free_space_map = heap->free_space_map();
- for ( ; current < limit; current++) {
- Object* o = *current;
- Address current_address = reinterpret_cast<Address>(current);
- // Skip free space.
- if (o == free_space_map) {
- Address current_address = reinterpret_cast<Address>(current);
- FreeSpace* free_space =
- FreeSpace::cast(HeapObject::FromAddress(current_address));
- int skip = free_space->Size();
- ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
- ASSERT(skip > 0);
- current_address += skip - kPointerSize;
- current = reinterpret_cast<Object**>(current_address);
- continue;
- }
- // Skip the current linear allocation space between top and limit which is
- // unmarked with the free space map, but can contain junk.
- if (current_address == special_garbage_start &&
- special_garbage_end != special_garbage_start) {
- current_address = special_garbage_end - kPointerSize;
- current = reinterpret_cast<Object**>(current_address);
- continue;
- }
- if (!(*filter)(current)) continue;
- ASSERT(current_address < special_garbage_start ||
- current_address >= special_garbage_end);
- ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
- // We have to check that the pointer does not point into new space
- // without trying to cast it to a heap object since the hash field of
- // a string can contain values like 1 and 3 which are tagged null
- // pointers.
- if (!heap->InNewSpace(o)) continue;
- while (**store_buffer_position < current &&
- *store_buffer_position < store_buffer_top) {
- (*store_buffer_position)++;
- }
- if (**store_buffer_position != current ||
- *store_buffer_position == store_buffer_top) {
- Object** obj_start = current;
- while (!(*obj_start)->IsMap()) obj_start--;
- UNREACHABLE();
+ Heap* heap = HEAP;
+ while (map_address < end) {
+ ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
+ ASSERT(Memory::Object_at(map_address)->IsMap());
+
+ Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
+ Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
+
+ if (Heap::IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
+ pointer_fields_end,
+ copy_object_func)) {
+ pointers_to_new_space_found = true;
}
+
+ map_address += Map::kSize;
}
+
+ return pointers_to_new_space_found;
}
-// Check that the store buffer contains all intergenerational pointers by
-// scanning a page and ensuring that all pointers to young space are in the
-// store buffer.
-void Heap::OldPointerSpaceCheckStoreBuffer() {
- OldSpace* space = old_pointer_space();
- PageIterator pages(space);
+bool Heap::IteratePointersInDirtyMapsRegion(
+ Heap* heap,
+ Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func) {
+ Address map_aligned_start = MapStartAlign(start);
+ Address map_aligned_end = MapEndAlign(end);
- store_buffer()->SortUniq();
+ bool contains_pointers_to_new_space = false;
- while (pages.has_next()) {
- Page* page = pages.next();
- Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+ if (map_aligned_start != start) {
+ Address prev_map = map_aligned_start - Map::kSize;
+ ASSERT(Memory::Object_at(prev_map)->IsMap());
- Address end = page->ObjectAreaEnd();
+ Address pointer_fields_start =
+ Max(start, prev_map + Map::kPointerFieldsBeginOffset);
- Object*** store_buffer_position = store_buffer()->Start();
- Object*** store_buffer_top = store_buffer()->Top();
+ Address pointer_fields_end =
+ Min(prev_map + Map::kPointerFieldsEndOffset, end);
- Object** limit = reinterpret_cast<Object**>(end);
- CheckStoreBuffer(this,
- current,
- limit,
- &store_buffer_position,
- store_buffer_top,
- &EverythingsAPointer,
- space->top(),
- space->limit());
+ contains_pointers_to_new_space =
+ IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
+ pointer_fields_end,
+ copy_object_func)
+ || contains_pointers_to_new_space;
}
+
+ contains_pointers_to_new_space =
+ IteratePointersInDirtyMaps(map_aligned_start,
+ map_aligned_end,
+ copy_object_func)
+ || contains_pointers_to_new_space;
+
+ if (map_aligned_end != end) {
+ ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
+
+ Address pointer_fields_start =
+ map_aligned_end + Map::kPointerFieldsBeginOffset;
+
+ Address pointer_fields_end =
+ Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
+
+ contains_pointers_to_new_space =
+ IteratePointersInDirtyRegion(heap,
+ pointer_fields_start,
+ pointer_fields_end,
+ copy_object_func)
+ || contains_pointers_to_new_space;
+ }
+
+ return contains_pointers_to_new_space;
}
-void Heap::MapSpaceCheckStoreBuffer() {
- MapSpace* space = map_space();
- PageIterator pages(space);
+void Heap::IterateAndMarkPointersToFromSpace(Address start,
+ Address end,
+ ObjectSlotCallback callback) {
+ Address slot_address = start;
+ Page* page = Page::FromAddress(start);
- store_buffer()->SortUniq();
+ uint32_t marks = page->GetRegionMarks();
- while (pages.has_next()) {
- Page* page = pages.next();
- Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+ while (slot_address < end) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ if (InFromSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ callback(reinterpret_cast<HeapObject**>(slot));
+ if (InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ marks |= page->GetRegionMaskForAddress(slot_address);
+ }
+ }
+ slot_address += kPointerSize;
+ }
+
+ page->SetRegionMarks(marks);
+}
- Address end = page->ObjectAreaEnd();
- Object*** store_buffer_position = store_buffer()->Start();
- Object*** store_buffer_top = store_buffer()->Top();
+uint32_t Heap::IterateDirtyRegions(
+ uint32_t marks,
+ Address area_start,
+ Address area_end,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback copy_object_func) {
+ uint32_t newmarks = 0;
+ uint32_t mask = 1;
- Object** limit = reinterpret_cast<Object**>(end);
- CheckStoreBuffer(this,
- current,
- limit,
- &store_buffer_position,
- store_buffer_top,
- &IsAMapPointerAddress,
- space->top(),
- space->limit());
+ if (area_start >= area_end) {
+ return newmarks;
}
+
+ Address region_start = area_start;
+
+ // area_start does not necessarily coincide with start of the first region.
+ // Thus to calculate the beginning of the next region we have to align
+ // area_start by Page::kRegionSize.
+ Address second_region =
+ reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
+ ~Page::kRegionAlignmentMask);
+
+ // Next region might be beyond area_end.
+ Address region_end = Min(second_region, area_end);
+
+ if (marks & mask) {
+ if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
+ newmarks |= mask;
+ }
+ }
+ mask <<= 1;
+
+ // Iterate subsequent regions which fully lay inside [area_start, area_end[.
+ region_start = region_end;
+ region_end = region_start + Page::kRegionSize;
+
+ while (region_end <= area_end) {
+ if (marks & mask) {
+ if (visit_dirty_region(this,
+ region_start,
+ region_end,
+ copy_object_func)) {
+ newmarks |= mask;
+ }
+ }
+
+ region_start = region_end;
+ region_end = region_start + Page::kRegionSize;
+
+ mask <<= 1;
+ }
+
+ if (region_start != area_end) {
+ // A small piece of area left uniterated because area_end does not coincide
+ // with region end. Check whether region covering last part of area is
+ // dirty.
+ if (marks & mask) {
+ if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
+ newmarks |= mask;
+ }
+ }
+ }
+
+ return newmarks;
}
-void Heap::LargeObjectSpaceCheckStoreBuffer() {
- LargeObjectIterator it(lo_space());
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- // We only have code, sequential strings, or fixed arrays in large
- // object space, and only fixed arrays can possibly contain pointers to
- // the young generation.
- if (object->IsFixedArray()) {
- Object*** store_buffer_position = store_buffer()->Start();
- Object*** store_buffer_top = store_buffer()->Top();
- Object** current = reinterpret_cast<Object**>(object->address());
- Object** limit =
- reinterpret_cast<Object**>(object->address() + object->Size());
- CheckStoreBuffer(this,
- current,
- limit,
- &store_buffer_position,
- store_buffer_top,
- &EverythingsAPointer,
- NULL,
- NULL);
+
+void Heap::IterateDirtyRegions(
+ PagedSpace* space,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback copy_object_func,
+ ExpectedPageWatermarkState expected_page_watermark_state) {
+
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+
+ while (it.has_next()) {
+ Page* page = it.next();
+ uint32_t marks = page->GetRegionMarks();
+
+ if (marks != Page::kAllRegionsCleanMarks) {
+ Address start = page->ObjectAreaStart();
+
+ // Do not try to visit pointers beyond page allocation watermark.
+ // Page can contain garbage pointers there.
+ Address end;
+
+ if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
+ page->IsWatermarkValid()) {
+ end = page->AllocationWatermark();
+ } else {
+ end = page->CachedAllocationWatermark();
+ }
+
+ ASSERT(space == old_pointer_space_ ||
+ (space == map_space_ &&
+ ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
+
+ page->SetRegionMarks(IterateDirtyRegions(marks,
+ start,
+ end,
+ visit_dirty_region,
+ copy_object_func));
}
+
+ // Mark page watermark as invalid to maintain watermark validity invariant.
+ // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
+ page->InvalidateWatermark(true);
}
}
-#endif
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
@@ -5024,7 +4941,8 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// Iterate over the builtin code objects and code stubs in the
// heap. Note that it is not necessary to iterate over code objects
// on scavenge collections.
- if (mode != VISIT_ALL_IN_SCAVENGE) {
+ if (mode != VISIT_ALL_IN_SCAVENGE &&
+ mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
isolate_->builtins()->IterateBuiltins(v);
}
v->Synchronize("builtins");
@@ -5068,20 +4986,11 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
bool Heap::ConfigureHeap(int max_semispace_size,
- intptr_t max_old_gen_size,
- intptr_t max_executable_size) {
+ int max_old_gen_size,
+ int max_executable_size) {
if (HasBeenSetup()) return false;
- if (max_semispace_size > 0) {
- if (max_semispace_size < Page::kPageSize) {
- max_semispace_size = Page::kPageSize;
- if (FLAG_trace_gc) {
- PrintF("Max semispace size cannot be less than %dkbytes\n",
- Page::kPageSize >> 10);
- }
- }
- max_semispace_size_ = max_semispace_size;
- }
+ if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
if (Snapshot::IsEnabled()) {
// If we are using a snapshot we always reserve the default amount
@@ -5091,10 +5000,6 @@ bool Heap::ConfigureHeap(int max_semispace_size,
// than the default reserved semispace size.
if (max_semispace_size_ > reserved_semispace_size_) {
max_semispace_size_ = reserved_semispace_size_;
- if (FLAG_trace_gc) {
- PrintF("Max semispace size cannot be more than %dkbytes\n",
- reserved_semispace_size_ >> 10);
- }
}
} else {
// If we are not using snapshots we reserve space for the actual
@@ -5120,12 +5025,8 @@ bool Heap::ConfigureHeap(int max_semispace_size,
initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
external_allocation_limit_ = 10 * max_semispace_size_;
- // The old generation is paged and needs at least one page for each space.
- int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
- max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
- Page::kPageSize),
- RoundUp(max_old_generation_size_,
- Page::kPageSize));
+ // The old generation is paged.
+ max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
configured_ = true;
return true;
@@ -5133,9 +5034,9 @@ bool Heap::ConfigureHeap(int max_semispace_size,
bool Heap::ConfigureHeapDefault() {
- return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
- static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
- static_cast<intptr_t>(FLAG_max_executable_size) * MB);
+ return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
+ FLAG_max_old_space_size * MB,
+ FLAG_max_executable_size * MB);
}
@@ -5163,7 +5064,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->os_error = OS::GetLastError();
isolate()->memory_allocator()->Available();
if (take_snapshot) {
- HeapIterator iterator;
+ HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
@@ -5379,21 +5280,31 @@ bool Heap::Setup(bool create_heap_objects) {
gc_initializer_mutex->Lock();
static bool initialized_gc = false;
if (!initialized_gc) {
- initialized_gc = true;
- InitializeScavengingVisitorsTables();
- NewSpaceScavenger::Initialize();
- MarkCompactCollector::Initialize();
+ initialized_gc = true;
+ InitializeScavengingVisitorsTables();
+ NewSpaceScavenger::Initialize();
+ MarkCompactCollector::Initialize();
}
gc_initializer_mutex->Unlock();
MarkMapPointersAsEncoded(false);
- // Setup memory allocator.
+ // Setup memory allocator and reserve a chunk of memory for new
+ // space. The chunk is double the size of the requested reserved
+ // new space size to ensure that we can find a pair of semispaces that
+ // are contiguous and aligned to their size.
if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
return false;
-
- // Setup new space.
- if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) {
+ void* chunk =
+ isolate_->memory_allocator()->ReserveInitialChunk(
+ 4 * reserved_semispace_size_);
+ if (chunk == NULL) return false;
+
+ // Align the pair of semispaces to their size, which must be a power
+ // of 2.
+ Address new_space_start =
+ RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
+ if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
return false;
}
@@ -5404,7 +5315,7 @@ bool Heap::Setup(bool create_heap_objects) {
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
if (old_pointer_space_ == NULL) return false;
- if (!old_pointer_space_->Setup()) return false;
+ if (!old_pointer_space_->Setup(NULL, 0)) return false;
// Initialize old data space.
old_data_space_ =
@@ -5413,7 +5324,7 @@ bool Heap::Setup(bool create_heap_objects) {
OLD_DATA_SPACE,
NOT_EXECUTABLE);
if (old_data_space_ == NULL) return false;
- if (!old_data_space_->Setup()) return false;
+ if (!old_data_space_->Setup(NULL, 0)) return false;
// Initialize the code space, set its maximum capacity to the old
// generation size. It needs executable memory.
@@ -5428,20 +5339,21 @@ bool Heap::Setup(bool create_heap_objects) {
code_space_ =
new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
- if (!code_space_->Setup()) return false;
+ if (!code_space_->Setup(NULL, 0)) return false;
// Initialize map space.
- map_space_ = new MapSpace(this,
- max_old_generation_size_,
- FLAG_max_map_space_pages,
- MAP_SPACE);
+ map_space_ = new MapSpace(this, FLAG_use_big_map_space
+ ? max_old_generation_size_
+ : MapSpace::kMaxMapPageIndex * Page::kPageSize,
+ FLAG_max_map_space_pages,
+ MAP_SPACE);
if (map_space_ == NULL) return false;
- if (!map_space_->Setup()) return false;
+ if (!map_space_->Setup(NULL, 0)) return false;
// Initialize global property cell space.
cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false;
- if (!cell_space_->Setup()) return false;
+ if (!cell_space_->Setup(NULL, 0)) return false;
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
@@ -5449,6 +5361,7 @@ bool Heap::Setup(bool create_heap_objects) {
lo_space_ = new LargeObjectSpace(this, LO_SPACE);
if (lo_space_ == NULL) return false;
if (!lo_space_->Setup()) return false;
+
if (create_heap_objects) {
// Create initial maps.
if (!CreateInitialMaps()) return false;
@@ -5463,8 +5376,6 @@ bool Heap::Setup(bool create_heap_objects) {
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
- store_buffer()->Setup();
-
return true;
}
@@ -5491,6 +5402,7 @@ void Heap::TearDown() {
PrintF("\n\n");
PrintF("gc_count=%d ", gc_count_);
PrintF("mark_sweep_count=%d ", ms_count_);
+ PrintF("mark_compact_count=%d ", mc_count_);
PrintF("max_gc_pause=%d ", get_max_gc_pause());
PrintF("min_in_mutator=%d ", get_min_in_mutator());
PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
@@ -5540,9 +5452,6 @@ void Heap::TearDown() {
lo_space_ = NULL;
}
- store_buffer()->TearDown();
- incremental_marking()->TearDown();
-
isolate_->memory_allocator()->TearDown();
#ifdef DEBUG
@@ -5556,7 +5465,7 @@ void Heap::Shrink() {
// Try to shrink all paged spaces.
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
- space->ReleaseAllUnusedPages();
+ space->Shrink();
}
@@ -5759,6 +5668,45 @@ class HeapObjectsFilter {
};
+class FreeListNodesFilter : public HeapObjectsFilter {
+ public:
+ FreeListNodesFilter() {
+ MarkFreeListNodes();
+ }
+
+ bool SkipObject(HeapObject* object) {
+ if (object->IsMarked()) {
+ object->ClearMark();
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ private:
+ void MarkFreeListNodes() {
+ Heap* heap = HEAP;
+ heap->old_pointer_space()->MarkFreeListNodes();
+ heap->old_data_space()->MarkFreeListNodes();
+ MarkCodeSpaceFreeListNodes(heap);
+ heap->map_space()->MarkFreeListNodes();
+ heap->cell_space()->MarkFreeListNodes();
+ }
+
+ void MarkCodeSpaceFreeListNodes(Heap* heap) {
+ // For code space, using FreeListNode::IsFreeListNode is OK.
+ HeapObjectIterator iter(heap->code_space());
+ for (HeapObject* obj = iter.next_object();
+ obj != NULL;
+ obj = iter.next_object()) {
+ if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
+ }
+ }
+
+ AssertNoAllocation no_alloc;
+};
+
+
class UnreachableObjectsFilter : public HeapObjectsFilter {
public:
UnreachableObjectsFilter() {
@@ -5766,8 +5714,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
bool SkipObject(HeapObject* object) {
- if (IntrusiveMarking::IsMarked(object)) {
- IntrusiveMarking::ClearMark(object);
+ if (object->IsMarked()) {
+ object->ClearMark();
return true;
} else {
return false;
@@ -5783,8 +5731,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
- if (IntrusiveMarking::IsMarked(obj)) {
- IntrusiveMarking::ClearMark(obj);
+ if (obj->IsMarked()) {
+ obj->ClearMark();
list_.Add(obj);
}
}
@@ -5806,7 +5754,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
- IntrusiveMarking::SetMark(obj);
+ obj->SetMark();
}
UnmarkingVisitor visitor;
HEAP->IterateRoots(&visitor, VISIT_ALL);
@@ -5840,11 +5788,10 @@ HeapIterator::~HeapIterator() {
void HeapIterator::Init() {
// Start the iteration.
space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
- new SpaceIterator(Isolate::Current()->heap()->
- GcSafeSizeOfOldObjectFunction());
+ new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
switch (filtering_) {
case kFilterFreeListNodes:
- // TODO(gc): Not handled.
+ filter_ = new FreeListNodesFilter;
break;
case kFilterUnreachable:
filter_ = new UnreachableObjectsFilter;
@@ -5981,11 +5928,6 @@ void PathTracer::TracePathFrom(Object** root) {
}
-static bool SafeIsGlobalContext(HeapObject* obj) {
- return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
-}
-
-
void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
if (!(*p)->IsHeapObject()) return;
@@ -6004,7 +5946,7 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
return;
}
- bool is_global_context = SafeIsGlobalContext(obj);
+ bool is_global_context = obj->IsGlobalContext();
// not visited yet
Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
@@ -6112,7 +6054,7 @@ static intptr_t CountTotalHolesSize() {
for (OldSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
- holes_size += space->Waste() + space->Available();
+ holes_size += space->Waste() + space->AvailableFree();
}
return holes_size;
}
@@ -6123,10 +6065,17 @@ GCTracer::GCTracer(Heap* heap)
start_size_(0),
gc_count_(0),
full_gc_count_(0),
+ is_compacting_(false),
+ marked_count_(0),
allocated_since_last_gc_(0),
spent_in_mutator_(0),
promoted_objects_size_(0),
heap_(heap) {
+ // These two fields reflect the state of the previous full collection.
+ // Set them before they are changed by the collector.
+ previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
+ previous_marked_count_ =
+ heap_->mark_compact_collector_.previous_marked_count();
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
start_time_ = OS::TimeCurrentMillis();
start_size_ = heap_->SizeOfObjects();
@@ -6143,14 +6092,6 @@ GCTracer::GCTracer(Heap* heap)
if (heap_->last_gc_end_timestamp_ > 0) {
spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
}
-
- steps_count_ = heap_->incremental_marking()->steps_count();
- steps_took_ = heap_->incremental_marking()->steps_took();
- longest_step_ = heap_->incremental_marking()->longest_step();
- steps_count_since_last_gc_ =
- heap_->incremental_marking()->steps_count_since_last_gc();
- steps_took_since_last_gc_ =
- heap_->incremental_marking()->steps_took_since_last_gc();
}
@@ -6185,21 +6126,7 @@ GCTracer::~GCTracer() {
SizeOfHeapObjects());
if (external_time > 0) PrintF("%d / ", external_time);
- PrintF("%d ms", time);
- if (steps_count_ > 0) {
- if (collector_ == SCAVENGER) {
- PrintF(" (+ %d ms in %d steps since last GC)",
- static_cast<int>(steps_took_since_last_gc_),
- steps_count_since_last_gc_);
- } else {
- PrintF(" (+ %d ms in %d steps since start of marking, "
- "biggest step %f ms)",
- static_cast<int>(steps_took_),
- steps_count_,
- longest_step_);
- }
- }
- PrintF(".\n");
+ PrintF("%d ms.\n", time);
} else {
PrintF("pause=%d ", time);
PrintF("mutator=%d ",
@@ -6211,7 +6138,8 @@ GCTracer::~GCTracer() {
PrintF("s");
break;
case MARK_COMPACTOR:
- PrintF("ms");
+ PrintF("%s",
+ heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
break;
default:
UNREACHABLE();
@@ -6233,14 +6161,6 @@ GCTracer::~GCTracer() {
PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
- if (collector_ == SCAVENGER) {
- PrintF("stepscount=%d ", steps_count_since_last_gc_);
- PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
- } else {
- PrintF("stepscount=%d ", steps_count_);
- PrintF("stepstook=%d ", static_cast<int>(steps_took_));
- }
-
PrintF("\n");
}
@@ -6253,7 +6173,8 @@ const char* GCTracer::CollectorString() {
case SCAVENGER:
return "Scavenge";
case MARK_COMPACTOR:
- return "Mark-sweep";
+ return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
+ : "Mark-sweep";
}
return "Unknown GC";
}
@@ -6360,52 +6281,4 @@ void ExternalStringTable::TearDown() {
}
-void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
- chunk->set_next_chunk(chunks_queued_for_free_);
- chunks_queued_for_free_ = chunk;
-}
-
-
-void Heap::FreeQueuedChunks() {
- if (chunks_queued_for_free_ == NULL) return;
- MemoryChunk* next;
- MemoryChunk* chunk;
- for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
- next = chunk->next_chunk();
- chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
-
- if (chunk->owner()->identity() == LO_SPACE) {
- // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
- // If FromAnyPointerAddress encounters a slot that belongs to a large
- // chunk queued for deletion it will fail to find the chunk because
- // it try to perform a search in the list of pages owned by of the large
- // object space and queued chunks were detached from that list.
- // To work around this we split large chunk into normal kPageSize aligned
- // pieces and initialize owner field and flags of every piece.
- // If FromAnyPointerAddress encounteres a slot that belongs to one of
- // these smaller pieces it will treat it as a slot on a normal Page.
- MemoryChunk* inner = MemoryChunk::FromAddress(
- chunk->address() + Page::kPageSize);
- MemoryChunk* inner_last = MemoryChunk::FromAddress(
- chunk->address() + chunk->size() - 1);
- while (inner <= inner_last) {
- // Size of a large chunk is always a multiple of
- // OS::AllocationAlignment() so there is always
- // enough space for a fake MemoryChunk header.
- inner->set_owner(lo_space());
- inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
- inner = MemoryChunk::FromAddress(
- inner->address() + Page::kPageSize);
- }
- }
- }
- isolate_->heap()->store_buffer()->Compact();
- isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
- for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
- next = chunk->next_chunk();
- isolate_->memory_allocator()->Free(chunk);
- }
- chunks_queued_for_free_ = NULL;
-}
-
} } // namespace v8::internal