diff options
Diffstat (limited to 'deps/v8/src/heap.h')
-rw-r--r-- | deps/v8/src/heap.h | 648 |
1 files changed, 451 insertions, 197 deletions
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index d81ff6cad..741e3d977 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -32,11 +32,15 @@ #include "allocation.h" #include "globals.h" +#include "incremental-marking.h" #include "list.h" #include "mark-compact.h" +#include "objects-visiting.h" #include "spaces.h" #include "splay-tree-inl.h" +#include "store-buffer.h" #include "v8-counters.h" +#include "v8globals.h" namespace v8 { namespace internal { @@ -48,30 +52,43 @@ inline Heap* _inline_get_heap_(); // Defines all the roots in Heap. -#define STRONG_ROOT_LIST(V) \ - /* Put the byte array map early. We need it to be in place by the time */ \ - /* the deserializer hits the next page, since it wants to put a byte */ \ - /* array in the unused space at the end of the page. */ \ +#define STRONG_ROOT_LIST(V) \ V(Map, byte_array_map, ByteArrayMap) \ + V(Map, free_space_map, FreeSpaceMap) \ V(Map, one_pointer_filler_map, OnePointerFillerMap) \ V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ /* Cluster the most popular ones in a few cache lines here at the top. */ \ - V(Object, undefined_value, UndefinedValue) \ - V(Object, the_hole_value, TheHoleValue) \ - V(Object, null_value, NullValue) \ - V(Object, true_value, TrueValue) \ - V(Object, false_value, FalseValue) \ - V(Object, arguments_marker, ArgumentsMarker) \ + V(Smi, store_buffer_top, StoreBufferTop) \ + V(Oddball, undefined_value, UndefinedValue) \ + V(Oddball, the_hole_value, TheHoleValue) \ + V(Oddball, null_value, NullValue) \ + V(Oddball, true_value, TrueValue) \ + V(Oddball, false_value, FalseValue) \ + V(Map, global_property_cell_map, GlobalPropertyCellMap) \ + V(Map, shared_function_info_map, SharedFunctionInfoMap) \ + V(Map, meta_map, MetaMap) \ + V(Map, ascii_symbol_map, AsciiSymbolMap) \ + V(Map, ascii_string_map, AsciiStringMap) \ V(Map, heap_number_map, HeapNumberMap) \ V(Map, global_context_map, GlobalContextMap) \ V(Map, fixed_array_map, FixedArrayMap) \ - V(Map, serialized_scope_info_map, SerializedScopeInfoMap) \ + V(Map, code_map, CodeMap) \ + V(Map, scope_info_map, ScopeInfoMap) \ V(Map, fixed_cow_array_map, FixedCOWArrayMap) \ V(Map, fixed_double_array_map, FixedDoubleArrayMap) \ V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ - V(Map, meta_map, MetaMap) \ V(Map, hash_table_map, HashTableMap) \ + V(FixedArray, empty_fixed_array, EmptyFixedArray) \ + V(ByteArray, empty_byte_array, EmptyByteArray) \ + V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray) \ + V(String, empty_string, EmptyString) \ + V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ V(Smi, stack_limit, StackLimit) \ + V(Oddball, frame_alignment_marker, FrameAlignmentMarker) \ + V(Oddball, arguments_marker, ArgumentsMarker) \ + /* The first 32 roots above this line should be boring from a GC point of */ \ + /* view. This means they are never in new space and never on a page that */ \ + /* is being compacted. */ \ V(FixedArray, number_string_cache, NumberStringCache) \ V(Object, instanceof_cache_function, InstanceofCacheFunction) \ V(Object, instanceof_cache_map, InstanceofCacheMap) \ @@ -79,19 +96,12 @@ inline Heap* _inline_get_heap_(); V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ V(FixedArray, string_split_cache, StringSplitCache) \ V(Object, termination_exception, TerminationException) \ - V(FixedArray, empty_fixed_array, EmptyFixedArray) \ - V(ByteArray, empty_byte_array, EmptyByteArray) \ - V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray) \ - V(String, empty_string, EmptyString) \ - V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ V(Map, string_map, StringMap) \ - V(Map, ascii_string_map, AsciiStringMap) \ V(Map, symbol_map, SymbolMap) \ V(Map, cons_string_map, ConsStringMap) \ V(Map, cons_ascii_string_map, ConsAsciiStringMap) \ V(Map, sliced_string_map, SlicedStringMap) \ V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \ - V(Map, ascii_symbol_map, AsciiSymbolMap) \ V(Map, cons_symbol_map, ConsSymbolMap) \ V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \ V(Map, external_symbol_map, ExternalSymbolMap) \ @@ -100,6 +110,16 @@ inline Heap* _inline_get_heap_(); V(Map, external_string_map, ExternalStringMap) \ V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \ V(Map, external_ascii_string_map, ExternalAsciiStringMap) \ + V(Map, short_external_symbol_map, ShortExternalSymbolMap) \ + V(Map, \ + short_external_symbol_with_ascii_data_map, \ + ShortExternalSymbolWithAsciiDataMap) \ + V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap) \ + V(Map, short_external_string_map, ShortExternalStringMap) \ + V(Map, \ + short_external_string_with_ascii_data_map, \ + ShortExternalStringWithAsciiDataMap) \ + V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \ V(Map, undetectable_string_map, UndetectableStringMap) \ V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \ V(Map, external_pixel_array_map, ExternalPixelArrayMap) \ @@ -116,14 +136,12 @@ inline Heap* _inline_get_heap_(); V(Map, catch_context_map, CatchContextMap) \ V(Map, with_context_map, WithContextMap) \ V(Map, block_context_map, BlockContextMap) \ - V(Map, code_map, CodeMap) \ V(Map, oddball_map, OddballMap) \ - V(Map, global_property_cell_map, GlobalPropertyCellMap) \ - V(Map, shared_function_info_map, SharedFunctionInfoMap) \ V(Map, message_object_map, JSMessageObjectMap) \ V(Map, foreign_map, ForeignMap) \ - V(Object, nan_value, NanValue) \ - V(Object, minus_zero_value, MinusZeroValue) \ + V(HeapNumber, nan_value, NanValue) \ + V(HeapNumber, infinity_value, InfinityValue) \ + V(HeapNumber, minus_zero_value, MinusZeroValue) \ V(Map, neander_map, NeanderMap) \ V(JSObject, message_listeners, MessageListeners) \ V(Foreign, prototype_accessors, PrototypeAccessors) \ @@ -226,7 +244,9 @@ inline Heap* _inline_get_heap_(); V(closure_symbol, "(closure)") \ V(use_strict, "use strict") \ V(dot_symbol, ".") \ - V(anonymous_function_symbol, "(anonymous function)") + V(anonymous_function_symbol, "(anonymous function)") \ + V(infinity_symbol, "Infinity") \ + V(minus_infinity_symbol, "-Infinity") // Forward declarations. class GCTracer; @@ -238,10 +258,26 @@ class WeakObjectRetainer; typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap, Object** pointer); -typedef bool (*DirtyRegionCallback)(Heap* heap, - Address start, - Address end, - ObjectSlotCallback copy_object_func); +class StoreBufferRebuilder { + public: + explicit StoreBufferRebuilder(StoreBuffer* store_buffer) + : store_buffer_(store_buffer) { + } + + void Callback(MemoryChunk* page, StoreBufferEvent event); + + private: + StoreBuffer* store_buffer_; + + // We record in this variable how full the store buffer was when we started + // iterating over the current page, finding pointers to new space. If the + // store buffer overflows again we can exempt the page from the store buffer + // by rewinding to this point instead of having to search the store buffer. + Object*** start_of_current_page_; + // The current page we are scanning in the store buffer iterator. + MemoryChunk* current_page_; +}; + // The all static Heap captures the interface to the global object heap. @@ -256,32 +292,103 @@ class HeapDebugUtils; // by it's size to avoid dereferencing a map pointer for scanning. class PromotionQueue { public: - PromotionQueue() : front_(NULL), rear_(NULL) { } + explicit PromotionQueue(Heap* heap) + : front_(NULL), + rear_(NULL), + limit_(NULL), + emergency_stack_(0), + heap_(heap) { } + + void Initialize(); + + void Destroy() { + ASSERT(is_empty()); + delete emergency_stack_; + emergency_stack_ = NULL; + } - void Initialize(Address start_address) { - front_ = rear_ = reinterpret_cast<intptr_t*>(start_address); + inline void ActivateGuardIfOnTheSamePage(); + + Page* GetHeadPage() { + return Page::FromAllocationTop(reinterpret_cast<Address>(rear_)); } - bool is_empty() { return front_ <= rear_; } + void SetNewLimit(Address limit) { + if (!guard_) { + return; + } + + ASSERT(GetHeadPage() == Page::FromAllocationTop(limit)); + limit_ = reinterpret_cast<intptr_t*>(limit); + + if (limit_ <= rear_) { + return; + } + + RelocateQueueHead(); + } + + bool is_empty() { + return (front_ == rear_) && + (emergency_stack_ == NULL || emergency_stack_->length() == 0); + } inline void insert(HeapObject* target, int size); void remove(HeapObject** target, int* size) { + ASSERT(!is_empty()); + if (front_ == rear_) { + Entry e = emergency_stack_->RemoveLast(); + *target = e.obj_; + *size = e.size_; + return; + } + + if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) { + NewSpacePage* front_page = + NewSpacePage::FromAddress(reinterpret_cast<Address>(front_)); + ASSERT(!front_page->prev_page()->is_anchor()); + front_ = + reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit()); + } *target = reinterpret_cast<HeapObject*>(*(--front_)); *size = static_cast<int>(*(--front_)); // Assert no underflow. - ASSERT(front_ >= rear_); + SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), + reinterpret_cast<Address>(front_)); } private: - // The front of the queue is higher in memory than the rear. + // The front of the queue is higher in the memory page chain than the rear. intptr_t* front_; intptr_t* rear_; + intptr_t* limit_; + + bool guard_; + + static const int kEntrySizeInWords = 2; + + struct Entry { + Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { } + + HeapObject* obj_; + int size_; + }; + List<Entry>* emergency_stack_; + + Heap* heap_; + + void RelocateQueueHead(); DISALLOW_COPY_AND_ASSIGN(PromotionQueue); }; +typedef void (*ScavengingCallback)(Map* map, + HeapObject** slot, + HeapObject* object); + + // External strings table is a place where all external strings are // registered. We need to keep track of such strings to properly // finalize them. @@ -327,8 +434,8 @@ class Heap { // Configure heap size before setup. Return false if the heap has been // setup already. bool ConfigureHeap(int max_semispace_size, - int max_old_gen_size, - int max_executable_size); + intptr_t max_old_gen_size, + intptr_t max_executable_size); bool ConfigureHeapDefault(); // Initializes the global object heap. If create_heap_objects is true, @@ -456,6 +563,7 @@ class Heap { // size, but keeping the original prototype. The receiver must have at least // the size of the new object. The object is reinitialized and behaves as an // object that has been freshly allocated. + // Returns failure if an error occured, otherwise object. MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object, InstanceType type, int size); @@ -484,8 +592,10 @@ class Heap { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this function does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type, - int instance_size); + MUST_USE_RESULT MaybeObject* AllocateMap( + InstanceType instance_type, + int instance_size, + ElementsKind elements_kind = FAST_ELEMENTS); // Allocates a partial map for bootstrapping. MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type, @@ -498,7 +608,7 @@ class Heap { MUST_USE_RESULT MaybeObject* AllocateCodeCache(); // Allocates a serialized scope info. - MUST_USE_RESULT MaybeObject* AllocateSerializedScopeInfo(int length); + MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length); // Allocates an empty PolymorphicCodeCache. MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache(); @@ -688,7 +798,7 @@ class Heap { // Allocate a block context. MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function, Context* previous, - SerializedScopeInfo* info); + ScopeInfo* info); // Allocates a new utility object in the old generation. MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type); @@ -737,13 +847,15 @@ class Heap { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - MUST_USE_RESULT inline MaybeObject* NumberFromInt32(int32_t value); + MUST_USE_RESULT inline MaybeObject* NumberFromInt32( + int32_t value, PretenureFlag pretenure = NOT_TENURED); // Converts an int into either a Smi or a HeapNumber object. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - MUST_USE_RESULT inline MaybeObject* NumberFromUint32(uint32_t value); + MUST_USE_RESULT inline MaybeObject* NumberFromUint32( + uint32_t value, PretenureFlag pretenure = NOT_TENURED); // Allocates a new foreign object. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation @@ -796,9 +908,9 @@ class Heap { // failed. // Please note this does not perform a garbage collection. MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii( - ExternalAsciiString::Resource* resource); + const ExternalAsciiString::Resource* resource); MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte( - ExternalTwoByteString::Resource* resource); + const ExternalTwoByteString::Resource* resource); // Finalizes an external string by deleting the associated external // data and clearing the resource pointer. @@ -885,13 +997,24 @@ class Heap { // collect more garbage. inline bool CollectGarbage(AllocationSpace space); - // Performs a full garbage collection. Force compaction if the - // parameter is true. - void CollectAllGarbage(bool force_compaction); + static const int kNoGCFlags = 0; + static const int kMakeHeapIterableMask = 1; + + // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is + // non-zero, then the slower precise sweeper is used, which leaves the heap + // in a state where we can iterate over the heap visiting all objects. + void CollectAllGarbage(int flags); // Last hope GC, should try to squeeze as much as possible. void CollectAllAvailableGarbage(); + // Check whether the heap is currently iterable. + bool IsHeapIterable(); + + // Ensure that we have swept all spaces in such a way that we can iterate + // over all objects. May cause a GC. + void EnsureHeapIsIterable(); + // Notify the heap that a context has been disposed. int NotifyContextDisposed() { return ++contexts_disposed_; } @@ -899,6 +1022,20 @@ class Heap { // ensure correct callback for weak global handles. void PerformScavenge(); + inline void increment_scan_on_scavenge_pages() { + scan_on_scavenge_pages_++; + if (FLAG_gc_verbose) { + PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); + } + } + + inline void decrement_scan_on_scavenge_pages() { + scan_on_scavenge_pages_--; + if (FLAG_gc_verbose) { + PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); + } + } + PromotionQueue* promotion_queue() { return &promotion_queue_; } #ifdef DEBUG @@ -925,6 +1062,8 @@ class Heap { // Heap root getters. We have versions with and without type::cast() here. // You can't use type::cast during GC because the assert fails. + // TODO(1490): Try removing the unchecked accessors, now that GC marking does + // not corrupt the stack. #define ROOT_ACCESSOR(type, name, camel_name) \ type* name() { \ return type::cast(roots_[k##camel_name##RootIndex]); \ @@ -958,6 +1097,9 @@ class Heap { } Object* global_contexts_list() { return global_contexts_list_; } + // Number of mark-sweeps. + int ms_count() { return ms_count_; } + // Iterates over all roots in the heap. void IterateRoots(ObjectVisitor* v, VisitMode mode); // Iterates over all strong roots in the heap. @@ -965,60 +1107,16 @@ class Heap { // Iterates over all the other roots in the heap. void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); - enum ExpectedPageWatermarkState { - WATERMARK_SHOULD_BE_VALID, - WATERMARK_CAN_BE_INVALID - }; - - // For each dirty region on a page in use from an old space call - // visit_dirty_region callback. - // If either visit_dirty_region or callback can cause an allocation - // in old space and changes in allocation watermark then - // can_preallocate_during_iteration should be set to true. - // All pages will be marked as having invalid watermark upon - // iteration completion. - void IterateDirtyRegions( - PagedSpace* space, - DirtyRegionCallback visit_dirty_region, - ObjectSlotCallback callback, - ExpectedPageWatermarkState expected_page_watermark_state); - - // Interpret marks as a bitvector of dirty marks for regions of size - // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering - // memory interval from start to top. For each dirty region call a - // visit_dirty_region callback. Return updated bitvector of dirty marks. - uint32_t IterateDirtyRegions(uint32_t marks, - Address start, - Address end, - DirtyRegionCallback visit_dirty_region, - ObjectSlotCallback callback); - // Iterate pointers to from semispace of new space found in memory interval // from start to end. - // Update dirty marks for page containing start address. void IterateAndMarkPointersToFromSpace(Address start, Address end, ObjectSlotCallback callback); - // Iterate pointers to new space found in memory interval from start to end. - // Return true if pointers to new space was found. - static bool IteratePointersInDirtyRegion(Heap* heap, - Address start, - Address end, - ObjectSlotCallback callback); - - - // Iterate pointers to new space found in memory interval from start to end. - // This interval is considered to belong to the map space. - // Return true if pointers to new space was found. - static bool IteratePointersInDirtyMapsRegion(Heap* heap, - Address start, - Address end, - ObjectSlotCallback callback); - - // Returns whether the object resides in new space. inline bool InNewSpace(Object* object); + inline bool InNewSpace(Address addr); + inline bool InNewSpacePage(Address addr); inline bool InFromSpace(Object* object); inline bool InToSpace(Object* object); @@ -1057,11 +1155,19 @@ class Heap { roots_[kEmptyScriptRootIndex] = script; } + void public_set_store_buffer_top(Address* top) { + roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top); + } + // Update the next script id. inline void SetLastScriptId(Object* last_script_id); // Generated code can embed this address to get access to the roots. - Object** roots_address() { return roots_; } + Object** roots_array_start() { return roots_; } + + Address* store_buffer_top_address() { + return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); + } // Get address of global contexts list for serialization support. Object** global_contexts_list_address() { @@ -1075,6 +1181,10 @@ class Heap { // Verify the heap is in its normal state before or after a GC. void Verify(); + void OldPointerSpaceCheckStoreBuffer(); + void MapSpaceCheckStoreBuffer(); + void LargeObjectSpaceCheckStoreBuffer(); + // Report heap statistics. void ReportHeapStatistics(const char* title); void ReportCodeStatistics(const char* title); @@ -1170,26 +1280,59 @@ class Heap { MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length, PretenureFlag pretenure); + inline intptr_t PromotedTotalSize() { + return PromotedSpaceSize() + PromotedExternalMemorySize(); + } + // True if we have reached the allocation limit in the old generation that // should force the next GC (caused normally) to be a full one. - bool OldGenerationPromotionLimitReached() { - return (PromotedSpaceSize() + PromotedExternalMemorySize()) - > old_gen_promotion_limit_; + inline bool OldGenerationPromotionLimitReached() { + return PromotedTotalSize() > old_gen_promotion_limit_; } - intptr_t OldGenerationSpaceAvailable() { - return old_gen_allocation_limit_ - - (PromotedSpaceSize() + PromotedExternalMemorySize()); + inline intptr_t OldGenerationSpaceAvailable() { + return old_gen_allocation_limit_ - PromotedTotalSize(); } - // True if we have reached the allocation limit in the old generation that - // should artificially cause a GC right now. - bool OldGenerationAllocationLimitReached() { - return OldGenerationSpaceAvailable() < 0; + static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize; + static const intptr_t kMinimumAllocationLimit = + 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); + + // When we sweep lazily we initially guess that there is no garbage on the + // heap and set the limits for the next GC accordingly. As we sweep we find + // out that some of the pages contained garbage and we have to adjust + // downwards the size of the heap. This means the limits that control the + // timing of the next GC also need to be adjusted downwards. + void LowerOldGenLimits(intptr_t adjustment) { + size_of_old_gen_at_last_old_space_gc_ -= adjustment; + old_gen_promotion_limit_ = + OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_); + old_gen_allocation_limit_ = + OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_); } - // Can be called when the embedding application is idle. - bool IdleNotification(); + intptr_t OldGenPromotionLimit(intptr_t old_gen_size) { + const int divisor = FLAG_stress_compaction ? 10 : 3; + intptr_t limit = + Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit); + limit += new_space_.Capacity(); + limit *= old_gen_limit_factor_; + intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; + return Min(limit, halfway_to_the_max); + } + + intptr_t OldGenAllocationLimit(intptr_t old_gen_size) { + const int divisor = FLAG_stress_compaction ? 8 : 2; + intptr_t limit = + Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit); + limit += new_space_.Capacity(); + limit *= old_gen_limit_factor_; + intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; + return Min(limit, halfway_to_the_max); + } + + // Implements the corresponding V8 API function. + bool IdleNotification(int hint); // Declare all the root indices. enum RootListIndex { @@ -1213,6 +1356,8 @@ class Heap { MUST_USE_RESULT MaybeObject* NumberToString( Object* number, bool check_number_string_cache = true); + MUST_USE_RESULT MaybeObject* Uint32ToString( + uint32_t value, bool check_number_string_cache = true); Map* MapForExternalArrayType(ExternalArrayType array_type); RootListIndex RootIndexForExternalArrayType( @@ -1224,18 +1369,10 @@ class Heap { // by pointer size. static inline void CopyBlock(Address dst, Address src, int byte_size); - inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst, - Address src, - int byte_size); - // Optimized version of memmove for blocks with pointer size aligned sizes and // pointer size aligned addresses. static inline void MoveBlock(Address dst, Address src, int byte_size); - inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst, - Address src, - int byte_size); - // Check new space expansion criteria and expand semispaces if it was hit. void CheckNewSpaceExpansionCriteria(); @@ -1244,9 +1381,31 @@ class Heap { survived_since_last_expansion_ += survived; } + inline bool NextGCIsLikelyToBeFull() { + if (FLAG_gc_global) return true; + + intptr_t total_promoted = PromotedTotalSize(); + + intptr_t adjusted_promotion_limit = + old_gen_promotion_limit_ - new_space_.Capacity(); + + if (total_promoted >= adjusted_promotion_limit) return true; + + intptr_t adjusted_allocation_limit = + old_gen_allocation_limit_ - new_space_.Capacity() / 5; + + if (PromotedSpaceSize() >= adjusted_allocation_limit) return true; + + return false; + } + + void UpdateNewSpaceReferencesInExternalStringTable( ExternalStringTableUpdaterCallback updater_func); + void UpdateReferencesInExternalStringTable( + ExternalStringTableUpdaterCallback updater_func); + void ProcessWeakReferences(WeakObjectRetainer* retainer); // Helper function that governs the promotion policy from new space to @@ -1263,6 +1422,9 @@ class Heap { GCTracer* tracer() { return tracer_; } + // Returns the size of objects residing in non new spaces. + intptr_t PromotedSpaceSize(); + double total_regexp_code_generated() { return total_regexp_code_generated_; } void IncreaseTotalRegexpCodeGenerated(int size) { total_regexp_code_generated_ += size; @@ -1281,6 +1443,29 @@ class Heap { return &mark_compact_collector_; } + StoreBuffer* store_buffer() { + return &store_buffer_; + } + + Marking* marking() { + return &marking_; + } + + IncrementalMarking* incremental_marking() { + return &incremental_marking_; + } + + bool IsSweepingComplete() { + return old_data_space()->IsSweepingComplete() && + old_pointer_space()->IsSweepingComplete(); + } + + bool AdvanceSweepers(int step_size) { + bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size); + sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size); + return sweeping_complete; + } + ExternalStringTable* external_string_table() { return &external_string_table_; } @@ -1291,16 +1476,35 @@ class Heap { } inline Isolate* isolate(); - bool is_safe_to_read_maps() { return is_safe_to_read_maps_; } - void CallGlobalGCPrologueCallback() { + inline void CallGlobalGCPrologueCallback() { if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_(); } - void CallGlobalGCEpilogueCallback() { + inline void CallGlobalGCEpilogueCallback() { if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_(); } + inline bool OldGenerationAllocationLimitReached(); + + inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) { + scavenging_visitors_table_.GetVisitor(map)(map, slot, obj); + } + + void QueueMemoryChunkForFree(MemoryChunk* chunk); + void FreeQueuedChunks(); + + // Completely clear the Instanceof cache (to stop it keeping objects alive + // around a GC). + inline void CompletelyClearInstanceofCache(); + + // The roots that have an index less than this are always in old space. + static const int kOldSpaceRoots = 0x20; + + bool idle_notification_will_schedule_next_gc() { + return idle_notification_will_schedule_next_gc_; + } + private: Heap(); @@ -1308,12 +1512,12 @@ class Heap { // more expedient to get at the isolate directly from within Heap methods. Isolate* isolate_; + intptr_t code_range_size_; int reserved_semispace_size_; int max_semispace_size_; int initial_semispace_size_; intptr_t max_old_generation_size_; intptr_t max_executable_size_; - intptr_t code_range_size_; // For keeping track of how much data has survived // scavenge since last new space expansion. @@ -1328,6 +1532,8 @@ class Heap { // For keeping track of context disposals. int contexts_disposed_; + int scan_on_scavenge_pages_; + #if defined(V8_TARGET_ARCH_X64) static const int kMaxObjectSizeInNewSpace = 1024*KB; #else @@ -1344,13 +1550,9 @@ class Heap { HeapState gc_state_; int gc_post_processing_depth_; - // Returns the size of object residing in non new spaces. - intptr_t PromotedSpaceSize(); - // Returns the amount of external memory registered since last global gc. int PromotedExternalMemorySize(); - int mc_count_; // how many mark-compact collections happened int ms_count_; // how many mark-sweep collections happened unsigned int gc_count_; // how many gc happened @@ -1358,7 +1560,10 @@ class Heap { int unflattened_strings_length_; #define ROOT_ACCESSOR(type, name, camel_name) \ - inline void set_##name(type* value) { \ + inline void set_##name(type* value) { \ + /* The deserializer makes use of the fact that these common roots are */ \ + /* never in new space and never on a page that is being compacted. */ \ + ASSERT(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \ roots_[k##camel_name##RootIndex] = value; \ } ROOT_LIST(ROOT_ACCESSOR) @@ -1379,6 +1584,10 @@ class Heap { HeapDebugUtils* debug_utils_; #endif // DEBUG + // Indicates that the new space should be kept small due to high promotion + // rates caused by the mutator allocating a lot of long-lived objects. + bool new_space_high_promotion_mode_active_; + // Limit that triggers a global GC on the next (normally caused) GC. This // is checked when we have already decided to do a GC to help determine // which collector to invoke. @@ -1389,6 +1598,13 @@ class Heap { // every allocation in large object space. intptr_t old_gen_allocation_limit_; + // Sometimes the heuristics dictate that those limits are increased. This + // variable records that fact. + int old_gen_limit_factor_; + + // Used to adjust the limits that control the timing of the next GC. + intptr_t size_of_old_gen_at_last_old_space_gc_; + // Limit on the amount of externally allocated memory allowed // between global GCs. If reached a global GC is forced. intptr_t external_allocation_limit_; @@ -1408,6 +1624,8 @@ class Heap { Object* global_contexts_list_; + StoreBufferRebuilder store_buffer_rebuilder_; + struct StringTypeTable { InstanceType type; int size; @@ -1465,13 +1683,11 @@ class Heap { // Support for computing object sizes during GC. HeapObjectCallback gc_safe_size_of_old_object_; static int GcSafeSizeOfOldObject(HeapObject* object); - static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object); // Update the GC state. Called from the mark-compact collector. void MarkMapPointersAsEncoded(bool encoded) { - gc_safe_size_of_old_object_ = encoded - ? &GcSafeSizeOfOldObjectWithEncodedMap - : &GcSafeSizeOfOldObject; + ASSERT(!encoded); + gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject; } // Checks whether a global GC is necessary @@ -1483,11 +1699,10 @@ class Heap { bool PerformGarbageCollection(GarbageCollector collector, GCTracer* tracer); - static const intptr_t kMinimumPromotionLimit = 2 * MB; - static const intptr_t kMinimumAllocationLimit = 8 * MB; inline void UpdateOldSpaceLimits(); + // Allocate an uninitialized object in map space. The behavior is identical // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't // have to test the allocation space argument and (b) can reduce code size @@ -1522,8 +1737,6 @@ class Heap { // Allocate empty fixed double array. MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray(); - void SwitchScavengingVisitorsTableIfProfilingWasEnabled(); - // Performs a minor collection in new generation. void Scavenge(); @@ -1532,16 +1745,15 @@ class Heap { Object** pointer); Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); + static void ScavengeStoreBufferCallback(Heap* heap, + MemoryChunk* page, + StoreBufferEvent event); // Performs a major collection in the whole heap. void MarkCompact(GCTracer* tracer); // Code to be run before and after mark-compact. - void MarkCompactPrologue(bool is_compacting); - - // Completely clear the Instanceof cache (to stop it keeping objects alive - // around a GC). - inline void CompletelyClearInstanceofCache(); + void MarkCompactPrologue(); // Record statistics before and after garbage collection. void ReportStatisticsBeforeGC(); @@ -1551,12 +1763,11 @@ class Heap { static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); // Initializes a function with a shared part and prototype. - // Returns the function. // Note: this code was factored out of AllocateFunction such that // other parts of the VM could use it. Specifically, a function that creates // instances of type JS_FUNCTION_TYPE benefit from the use of this function. // Please note this does not perform a garbage collection. - MUST_USE_RESULT inline MaybeObject* InitializeFunction( + inline void InitializeFunction( JSFunction* function, SharedFunctionInfo* shared, Object* prototype); @@ -1617,10 +1828,40 @@ class Heap { return survival_rate_trend() == INCREASING; } + bool IsDecreasingSurvivalTrend() { + return survival_rate_trend() == DECREASING; + } + bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; } + void SelectScavengingVisitorsTable(); + + void StartIdleRound() { + mark_sweeps_since_idle_round_started_ = 0; + ms_count_at_last_idle_notification_ = ms_count_; + } + + void FinishIdleRound() { + mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound; + scavenges_since_last_idle_round_ = 0; + } + + bool EnoughGarbageSinceLastIdleRound() { + return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold); + } + + bool WorthStartingGCWhenIdle() { + if (contexts_disposed_ > 0) { + return true; + } + return incremental_marking()->WorthActivating(); + } + + // Returns true if no more GC work is left. + bool IdleGlobalGC(); + static const int kInitialSymbolTableSize = 2048; static const int kInitialEvalCacheSize = 64; @@ -1640,15 +1881,25 @@ class Heap { MarkCompactCollector mark_compact_collector_; - // This field contains the meaning of the WATERMARK_INVALIDATED flag. - // Instead of clearing this flag from all pages we just flip - // its meaning at the beginning of a scavenge. - intptr_t page_watermark_invalidated_mark_; + StoreBuffer store_buffer_; + + Marking marking_; + + IncrementalMarking incremental_marking_; int number_idle_notifications_; unsigned int last_idle_notification_gc_count_; bool last_idle_notification_gc_count_init_; + bool idle_notification_will_schedule_next_gc_; + int mark_sweeps_since_idle_round_started_; + int ms_count_at_last_idle_notification_; + unsigned int gc_count_at_last_idle_gc_; + int scavenges_since_last_idle_round_; + + static const int kMaxMarkSweepsInIdleRound = 7; + static const int kIdleScavengeThreshold = 5; + // Shared state read by the scavenge collector and set by ScavengeObject. PromotionQueue promotion_queue_; @@ -1658,7 +1909,9 @@ class Heap { ExternalStringTable external_string_table_; - bool is_safe_to_read_maps_; + VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; + + MemoryChunk* chunks_queued_for_free_; friend class Factory; friend class GCTracer; @@ -1757,29 +2010,6 @@ class VerifyPointersVisitor: public ObjectVisitor { } } }; - - -// Visitor class to verify interior pointers in spaces that use region marks -// to keep track of intergenerational references. -// As VerifyPointersVisitor but also checks that dirty marks are set -// for regions covering intergenerational references. -class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor { - public: - void VisitPointers(Object** start, Object** end) { - for (Object** current = start; current < end; current++) { - if ((*current)->IsHeapObject()) { - HeapObject* object = HeapObject::cast(*current); - ASSERT(HEAP->Contains(object)); - ASSERT(object->map()->IsMap()); - if (HEAP->InNewSpace(object)) { - ASSERT(HEAP->InToSpace(object)); - Address addr = reinterpret_cast<Address>(current); - ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr)); - } - } - } - } -}; #endif @@ -1854,7 +2084,6 @@ class HeapIterator BASE_EMBEDDED { public: enum HeapObjectsFiltering { kNoFiltering, - kFilterFreeListNodes, kFilterUnreachable }; @@ -2078,7 +2307,13 @@ class GCTracer BASE_EMBEDDED { MC_MARK, MC_SWEEP, MC_SWEEP_NEWSPACE, - MC_COMPACT, + MC_EVACUATE_PAGES, + MC_UPDATE_NEW_TO_NEW_POINTERS, + MC_UPDATE_ROOT_TO_NEW_POINTERS, + MC_UPDATE_OLD_TO_NEW_POINTERS, + MC_UPDATE_POINTERS_TO_EVACUATED, + MC_UPDATE_POINTERS_BETWEEN_EVACUATED, + MC_UPDATE_MISC_POINTERS, MC_FLUSH_CODE, kNumberOfScopes }; @@ -2112,16 +2347,6 @@ class GCTracer BASE_EMBEDDED { // Sets the full GC count. void set_full_gc_count(int count) { full_gc_count_ = count; } - // Sets the flag that this is a compacting full GC. - void set_is_compacting() { is_compacting_ = true; } - bool is_compacting() const { return is_compacting_; } - - // Increment and decrement the count of marked objects. - void increment_marked_count() { ++marked_count_; } - void decrement_marked_count() { --marked_count_; } - - int marked_count() { return marked_count_; } - void increment_promoted_objects_size(int object_size) { promoted_objects_size_ += object_size; } @@ -2146,23 +2371,6 @@ class GCTracer BASE_EMBEDDED { // A count (including this one) of the number of full garbage collections. int full_gc_count_; - // True if the current GC is a compacting full collection, false - // otherwise. - bool is_compacting_; - - // True if the *previous* full GC cwas a compacting collection (will be - // false if there has not been a previous full GC). - bool previous_has_compacted_; - - // On a full GC, a count of the number of marked objects. Incremented - // when an object is marked and decremented when an object's mark bit is - // cleared. Will be zero on a scavenge collection. - int marked_count_; - - // The count from the end of the previous full GC. Will be zero if there - // was no previous full GC. - int previous_marked_count_; - // Amounts of time spent in different scopes during GC. double scopes_[Scope::kNumberOfScopes]; @@ -2181,6 +2389,13 @@ class GCTracer BASE_EMBEDDED { // Size of objects promoted during the current collection. intptr_t promoted_objects_size_; + // Incremental marking steps counters. + int steps_count_; + double steps_took_; + double longest_step_; + int steps_count_since_last_gc_; + double steps_took_since_last_gc_; + Heap* heap_; }; @@ -2292,6 +2507,46 @@ class WeakObjectRetainer { }; +// Intrusive object marking uses least significant bit of +// heap object's map word to mark objects. +// Normally all map words have least significant bit set +// because they contain tagged map pointer. +// If the bit is not set object is marked. +// All objects should be unmarked before resuming +// JavaScript execution. +class IntrusiveMarking { + public: + static bool IsMarked(HeapObject* object) { + return (object->map_word().ToRawValue() & kNotMarkedBit) == 0; + } + + static void ClearMark(HeapObject* object) { + uintptr_t map_word = object->map_word().ToRawValue(); + object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit)); + ASSERT(!IsMarked(object)); + } + + static void SetMark(HeapObject* object) { + uintptr_t map_word = object->map_word().ToRawValue(); + object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit)); + ASSERT(IsMarked(object)); + } + + static Map* MapOfMarkedObject(HeapObject* object) { + uintptr_t map_word = object->map_word().ToRawValue(); + return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap(); + } + + static int SizeOfMarkedObject(HeapObject* object) { + return object->SizeFromMap(MapOfMarkedObject(object)); + } + + private: + static const uintptr_t kNotMarkedBit = 0x1; + STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0); +}; + + #if defined(DEBUG) || defined(LIVE_OBJECT_LIST) // Helper class for tracing paths to a search target Object from all roots. // The TracePathFrom() method can be used to trace paths from a specific @@ -2350,7 +2605,6 @@ class PathTracer : public ObjectVisitor { }; #endif // DEBUG || LIVE_OBJECT_LIST - } } // namespace v8::internal #undef HEAP |