summaryrefslogtreecommitdiff
path: root/deps/v8/src/heap.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/heap.cc')
-rw-r--r--deps/v8/src/heap.cc684
1 files changed, 275 insertions, 409 deletions
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 24e4039422..fa358c5392 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -67,29 +67,14 @@ namespace internal {
Heap::Heap()
: isolate_(NULL),
+ code_range_size_(kIs64BitArch ? 512 * MB : 0),
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
-#if V8_TARGET_ARCH_X64
-#define LUMP_OF_MEMORY (2 * MB)
- code_range_size_(512*MB),
-#else
-#define LUMP_OF_MEMORY MB
- code_range_size_(0),
-#endif
-#if defined(ANDROID) || V8_TARGET_ARCH_MIPS
- reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- initial_semispace_size_(Page::kPageSize),
- max_old_generation_size_(192*MB),
- max_executable_size_(max_old_generation_size_),
-#else
- reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+ reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
+ max_semispace_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(Page::kPageSize),
- max_old_generation_size_(700ul * LUMP_OF_MEMORY),
- max_executable_size_(256l * LUMP_OF_MEMORY),
-#endif
-
+ max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
+ max_executable_size_(256ul * (kPointerSize / 4) * MB),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
// Will be 4 * reserved_semispace_size_ to ensure that young
@@ -101,6 +86,7 @@ Heap::Heap()
contexts_disposed_(0),
global_ic_age_(0),
flush_monomorphic_ics_(false),
+ allocation_mementos_found_(0),
scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
@@ -129,8 +115,6 @@ Heap::Heap()
old_gen_exhausted_(false),
store_buffer_rebuilder_(store_buffer()),
hidden_string_(NULL),
- global_gc_prologue_callback_(NULL),
- global_gc_epilogue_callback_(NULL),
gc_safe_size_of_old_object_(NULL),
total_regexp_code_generated_(0),
tracer_(NULL),
@@ -157,9 +141,11 @@ Heap::Heap()
mark_sweeps_since_idle_round_started_(0),
gc_count_at_last_idle_gc_(0),
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
+ full_codegen_bytes_generated_(0),
+ crankshaft_codegen_bytes_generated_(0),
gcs_since_last_deopt_(0),
#ifdef VERIFY_HEAP
- no_weak_embedded_maps_verification_scope_depth_(0),
+ no_weak_object_verification_scope_depth_(0),
#endif
promotion_queue_(this),
configured_(false),
@@ -172,6 +158,9 @@ Heap::Heap()
max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
#endif
+ // Ensure old_generation_size_ is a multiple of kPageSize.
+ ASSERT(MB >= Page::kPageSize);
+
intptr_t max_virtual = OS::MaxVirtualMemory();
if (max_virtual > 0) {
@@ -461,6 +450,10 @@ void Heap::GarbageCollectionPrologue() {
#endif // DEBUG
store_buffer()->GCPrologue();
+
+ if (FLAG_concurrent_osr) {
+ isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
+ }
}
@@ -521,10 +514,31 @@ void Heap::GarbageCollectionEpilogue() {
isolate_->counters()->number_of_symbols()->Set(
string_table()->NumberOfElements());
+ if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
+ isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
+ static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
+ (crankshaft_codegen_bytes_generated_
+ + full_codegen_bytes_generated_)));
+ }
+
if (CommittedMemory() > 0) {
isolate_->counters()->external_fragmentation_total()->AddSample(
static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
+ isolate_->counters()->heap_fraction_new_space()->
+ AddSample(static_cast<int>(
+ (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+ isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
+ static_cast<int>(
+ (old_pointer_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
+ isolate_->counters()->heap_fraction_old_data_space()->AddSample(
+ static_cast<int>(
+ (old_data_space()->CommittedMemory() * 100.0) /
+ CommittedMemory()));
+ isolate_->counters()->heap_fraction_code_space()->
+ AddSample(static_cast<int>(
+ (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_map_space()->AddSample(
static_cast<int>(
(map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
@@ -535,6 +549,9 @@ void Heap::GarbageCollectionEpilogue() {
AddSample(static_cast<int>(
(property_cell_space()->CommittedMemory() * 100.0) /
CommittedMemory()));
+ isolate_->counters()->heap_fraction_lo_space()->
+ AddSample(static_cast<int>(
+ (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_sample_total_committed()->AddSample(
static_cast<int>(CommittedMemory() / KB));
@@ -548,6 +565,8 @@ void Heap::GarbageCollectionEpilogue() {
heap_sample_property_cell_space_committed()->
AddSample(static_cast<int>(
property_cell_space()->CommittedMemory() / KB));
+ isolate_->counters()->heap_sample_code_space_committed()->AddSample(
+ static_cast<int>(code_space()->CommittedMemory() / KB));
}
#define UPDATE_COUNTERS_FOR_SPACE(space) \
@@ -610,6 +629,11 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
+ if (FLAG_concurrent_recompilation) {
+ // The optimizing compiler may be unnecessarily holding on to memory.
+ DisallowHeapAllocation no_recursive_gc;
+ isolate()->optimizing_compiler_thread()->Flush();
+ }
mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
@@ -1055,12 +1079,17 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
- global_gc_prologue_callback_();
- }
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_type & gc_prologue_callbacks_[i].gc_type) {
- gc_prologue_callbacks_[i].callback(gc_type, flags);
+ if (!gc_prologue_callbacks_[i].pass_isolate_) {
+ v8::GCPrologueCallback callback =
+ reinterpret_cast<v8::GCPrologueCallback>(
+ gc_prologue_callbacks_[i].callback);
+ callback(gc_type, flags);
+ } else {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+ gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
+ }
}
}
}
@@ -1069,12 +1098,18 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
- gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
+ if (!gc_epilogue_callbacks_[i].pass_isolate_) {
+ v8::GCPrologueCallback callback =
+ reinterpret_cast<v8::GCPrologueCallback>(
+ gc_epilogue_callbacks_[i].callback);
+ callback(gc_type, kNoGCCallbackFlags);
+ } else {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+ gc_epilogue_callbacks_[i].callback(
+ isolate, gc_type, kNoGCCallbackFlags);
+ }
}
}
- if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
- global_gc_epilogue_callback_();
- }
}
@@ -1326,6 +1361,8 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() {
RelocationLock relocation_lock(this);
+ allocation_mementos_found_ = 0;
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
@@ -1473,6 +1510,11 @@ void Heap::Scavenge() {
gc_state_ = NOT_IN_GC;
scavenges_since_last_idle_round_++;
+
+ if (FLAG_trace_track_allocation_sites && allocation_mementos_found_ > 0) {
+ PrintF("AllocationMementos found during scavenge = %d\n",
+ allocation_mementos_found_);
+ }
}
@@ -1947,6 +1989,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
+STATIC_ASSERT((ConstantPoolArray::kHeaderSize & kDoubleAlignmentMask) == 0);
INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
@@ -2091,8 +2134,12 @@ class ScavengingVisitor : public StaticVisitorBase {
if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
// Update NewSpace stats if necessary.
RecordCopiedObject(heap, target);
- HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Isolate* isolate = heap->isolate();
+ HeapProfiler* heap_profiler = isolate->heap_profiler();
+ if (heap_profiler->is_profiling()) {
+ heap_profiler->ObjectMoveEvent(source->address(), target->address(),
+ size);
+ }
if (isolate->logger()->is_logging_code_events() ||
isolate->cpu_profiler()->is_profiling()) {
if (target->IsSharedFunctionInfo()) {
@@ -2129,12 +2176,10 @@ class ScavengingVisitor : public StaticVisitorBase {
MaybeObject* maybe_result;
if (object_contents == DATA_OBJECT) {
- // TODO(mstarzinger): Turn this check into a regular assert soon!
- CHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
} else {
- // TODO(mstarzinger): Turn this check into a regular assert soon!
- CHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
}
@@ -2165,8 +2210,7 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
}
- // TODO(mstarzinger): Turn this check into a regular assert soon!
- CHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
+ ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked();
@@ -2392,7 +2436,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result;
- MaybeObject* maybe_result = AllocateRawMap();
+ MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
// Map::cast cannot be used due to uninitialized map field.
@@ -2417,7 +2461,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
int instance_size,
ElementsKind elements_kind) {
Object* result;
- MaybeObject* maybe_result = AllocateRawMap();
+ MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!maybe_result->To(&result)) return maybe_result;
Map* map = reinterpret_cast<Map*>(result);
@@ -2650,6 +2694,12 @@ bool Heap::CreateInitialMaps() {
set_fixed_double_array_map(Map::cast(obj));
{ MaybeObject* maybe_obj =
+ AllocateMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_constant_pool_array_map(Map::cast(obj));
+
+ { MaybeObject* maybe_obj =
AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -2887,12 +2937,12 @@ bool Heap::CreateInitialMaps() {
MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
+ int size = HeapNumber::kSize;
STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -2902,26 +2952,12 @@ MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
}
-MaybeObject* Heap::AllocateHeapNumber(double value) {
- // Use general version, if we're forced to always allocate.
- if (always_allocate()) return AllocateHeapNumber(value, TENURED);
-
- // This version of AllocateHeapNumber is optimized for
- // allocation in new space.
- STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
- Object* result;
- { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
- HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
- HeapNumber::cast(result)->set_value(value);
- return result;
-}
-
-
MaybeObject* Heap::AllocateCell(Object* value) {
+ int size = Cell::kSize;
+ STATIC_ASSERT(Cell::kSize <= Page::kNonCodeObjectAreaSize);
+
Object* result;
- { MaybeObject* maybe_result = AllocateRawCell();
+ { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
@@ -2930,9 +2966,13 @@ MaybeObject* Heap::AllocateCell(Object* value) {
}
-MaybeObject* Heap::AllocatePropertyCell(Object* value) {
+MaybeObject* Heap::AllocatePropertyCell() {
+ int size = PropertyCell::kSize;
+ STATIC_ASSERT(PropertyCell::kSize <= Page::kNonCodeObjectAreaSize);
+
Object* result;
- MaybeObject* maybe_result = AllocateRawPropertyCell();
+ MaybeObject* maybe_result =
+ AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
HeapObject::cast(result)->set_map_no_write_barrier(
@@ -2940,10 +2980,8 @@ MaybeObject* Heap::AllocatePropertyCell(Object* value) {
PropertyCell* cell = PropertyCell::cast(result);
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
- cell->set_value(value);
+ cell->set_value(the_hole_value());
cell->set_type(Type::None());
- maybe_result = cell->SetValueInferType(value);
- if (maybe_result->IsFailure()) return maybe_result;
return result;
}
@@ -2958,17 +2996,16 @@ MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateAllocationSite() {
- Object* result;
+ AllocationSite* site;
MaybeObject* maybe_result = Allocate(allocation_site_map(),
OLD_POINTER_SPACE);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- AllocationSite* site = AllocationSite::cast(result);
+ if (!maybe_result->To(&site)) return maybe_result;
site->Initialize();
// Link the site
site->set_weak_next(allocation_sites_list());
set_allocation_sites_list(site);
- return result;
+ return site;
}
@@ -4057,31 +4094,8 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > ByteArray::kMaxLength) {
return Failure::OutOfMemoryException(0x7);
}
- if (pretenure == NOT_TENURED) {
- return AllocateByteArray(length);
- }
int size = ByteArray::SizeFor(length);
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_DATA_SPACE;
- Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
-
- reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
- byte_array_map());
- reinterpret_cast<ByteArray*>(result)->set_length(length);
- return result;
-}
-
-
-MaybeObject* Heap::AllocateByteArray(int length) {
- if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException(0x8);
- }
- int size = ByteArray::SizeFor(length);
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
{ MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -4112,11 +4126,10 @@ MaybeObject* Heap::AllocateExternalArray(int length,
ExternalArrayType array_type,
void* external_pointer,
PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ int size = ExternalArray::kAlignedSize;
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
- space,
- OLD_DATA_SPACE);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -4134,7 +4147,8 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_reference,
bool immovable,
- bool crankshafted) {
+ bool crankshafted,
+ int prologue_offset) {
// Allocate ByteArray before the Code object, so that we do not risk
// leaving uninitialized Code object (and breaking the heap).
ByteArray* reloc_info;
@@ -4184,10 +4198,18 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
- code->set_prologue_offset(kPrologueOffsetNotSet);
+ code->set_prologue_offset(prologue_offset);
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
}
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (code->kind() == Code::FUNCTION) {
+ code->set_has_debug_break_slots(
+ isolate_->debugger()->IsDebuggerActive());
+ }
+#endif
+
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@@ -4310,6 +4332,7 @@ MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(result) + map->instance_size());
alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+ ASSERT(allocation_site->map() == allocation_site_map());
alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
return result;
}
@@ -4414,10 +4437,6 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
arguments_object_size = kArgumentsObjectSize;
}
- // This calls Copy directly rather than using Heap::AllocateRaw so we
- // duplicate the check here.
- ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
-
// Check that the size of the boilerplate matches our
// expectations. The ArgumentsAccessStub::GenerateNewObject relies
// on the size being a known constant.
@@ -4553,9 +4572,8 @@ MaybeObject* Heap::AllocateJSObjectFromMap(
}
// Allocate the JSObject.
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
+ int size = map->instance_size();
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
Object* obj;
MaybeObject* maybe_obj = Allocate(map, space);
if (!maybe_obj->To(&obj)) return maybe_obj;
@@ -4588,8 +4606,8 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
}
// Allocate the JSObject.
- AllocationSpace space = NEW_SPACE;
- if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
+ int size = map->instance_size();
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
Object* obj;
MaybeObject* maybe_obj =
AllocateWithAllocationSite(map, space, allocation_site);
@@ -4745,20 +4763,6 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
}
-MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
- ElementsKind elements_kind,
- int length,
- int capacity,
- Handle<AllocationSite> allocation_site,
- ArrayStorageAllocationMode mode) {
- MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
- allocation_site);
- JSArray* array;
- if (!maybe_array->To(&array)) return maybe_array;
- return AllocateJSArrayStorage(array, length, capacity, mode);
-}
-
-
MaybeObject* Heap::AllocateJSArrayStorage(
JSArray* array,
int length,
@@ -4861,74 +4865,7 @@ MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
}
-MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
- ASSERT(constructor->has_initial_map());
- Map* map = constructor->initial_map();
- ASSERT(map->is_dictionary_map());
-
- // Make sure no field properties are described in the initial map.
- // This guarantees us that normalizing the properties does not
- // require us to change property values to PropertyCells.
- ASSERT(map->NextFreePropertyIndex() == 0);
-
- // Make sure we don't have a ton of pre-allocated slots in the
- // global objects. They will be unused once we normalize the object.
- ASSERT(map->unused_property_fields() == 0);
- ASSERT(map->inobject_properties() == 0);
-
- // Initial size of the backing store to avoid resize of the storage during
- // bootstrapping. The size differs between the JS global object ad the
- // builtins object.
- int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
-
- // Allocate a dictionary object for backing storage.
- NameDictionary* dictionary;
- MaybeObject* maybe_dictionary =
- NameDictionary::Allocate(
- this,
- map->NumberOfOwnDescriptors() * 2 + initial_size);
- if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
-
- // The global object might be created from an object template with accessors.
- // Fill these accessors into the dictionary.
- DescriptorArray* descs = map->instance_descriptors();
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
- PropertyDetails details = descs->GetDetails(i);
- ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
- PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
- Object* value = descs->GetCallbacksObject(i);
- MaybeObject* maybe_value = AllocatePropertyCell(value);
- if (!maybe_value->ToObject(&value)) return maybe_value;
-
- MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
- if (!maybe_added->To(&dictionary)) return maybe_added;
- }
-
- // Allocate the global object and initialize it with the backing store.
- JSObject* global;
- MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
- if (!maybe_global->To(&global)) return maybe_global;
-
- InitializeJSObjectFromMap(global, dictionary, map);
-
- // Create a new map for the global object.
- Map* new_map;
- MaybeObject* maybe_map = map->CopyDropDescriptors();
- if (!maybe_map->To(&new_map)) return maybe_map;
- new_map->set_dictionary_map(true);
-
- // Set up the global object as a normalized object.
- global->set_map(new_map);
- global->set_properties(dictionary);
-
- // Make sure result is a global object with properties in dictionary.
- ASSERT(global->IsGlobalObject());
- ASSERT(!global->HasFastProperties());
- return global;
-}
-
-
-MaybeObject* Heap::CopyJSObject(JSObject* source) {
+MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
// Never used to copy functions. If functions need to be copied we
// have to be careful to clear the literals array.
SLOW_ASSERT(!source->IsJSFunction());
@@ -4938,6 +4875,9 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
int object_size = map->instance_size();
Object* clone;
+ ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) &&
+ map->instance_type() == JS_ARRAY_TYPE));
+
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
// If we're forced to always allocate, we use the general allocation
@@ -4958,7 +4898,10 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
} else {
wb_mode = SKIP_WRITE_BARRIER;
- { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
+ { int adjusted_object_size = site != NULL
+ ? object_size + AllocationMemento::kSize
+ : object_size;
+ MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
}
SLOW_ASSERT(InNewSpace(clone));
@@ -4967,115 +4910,21 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
CopyBlock(HeapObject::cast(clone)->address(),
source->address(),
object_size);
- }
-
- SLOW_ASSERT(
- JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
- FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
- FixedArray* properties = FixedArray::cast(source->properties());
- // Update elements if necessary.
- if (elements->length() > 0) {
- Object* elem;
- { MaybeObject* maybe_elem;
- if (elements->map() == fixed_cow_array_map()) {
- maybe_elem = FixedArray::cast(elements);
- } else if (source->HasFastDoubleElements()) {
- maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
- } else {
- maybe_elem = CopyFixedArray(FixedArray::cast(elements));
- }
- if (!maybe_elem->ToObject(&elem)) return maybe_elem;
- }
- JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
- }
- // Update properties if necessary.
- if (properties->length() > 0) {
- Object* prop;
- { MaybeObject* maybe_prop = CopyFixedArray(properties);
- if (!maybe_prop->ToObject(&prop)) return maybe_prop;
- }
- JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
- }
- // Return the new clone.
- return clone;
-}
-
-
-MaybeObject* Heap::CopyJSObjectWithAllocationSite(
- JSObject* source,
- AllocationSite* site) {
- // Never used to copy functions. If functions need to be copied we
- // have to be careful to clear the literals array.
- SLOW_ASSERT(!source->IsJSFunction());
-
- // Make the clone.
- Map* map = source->map();
- int object_size = map->instance_size();
- Object* clone;
-
- ASSERT(AllocationSite::CanTrack(map->instance_type()));
- ASSERT(map->instance_type() == JS_ARRAY_TYPE);
- WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
-
- // If we're forced to always allocate, we use the general allocation
- // functions which may leave us with an object in old space.
- int adjusted_object_size = object_size;
- if (always_allocate()) {
- // We'll only track origin if we are certain to allocate in new space
- const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
- if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
- adjusted_object_size += AllocationMemento::kSize;
- }
-
- { MaybeObject* maybe_clone =
- AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- Address clone_address = HeapObject::cast(clone)->address();
- CopyBlock(clone_address,
- source->address(),
- object_size);
- // Update write barrier for all fields that lie beyond the header.
- int write_barrier_offset = adjusted_object_size > object_size
- ? JSArray::kSize + AllocationMemento::kSize
- : JSObject::kHeaderSize;
- if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
- RecordWrites(clone_address,
- write_barrier_offset,
- (object_size - write_barrier_offset) / kPointerSize);
- }
- // Track allocation site information, if we failed to allocate it inline.
- if (InNewSpace(clone) &&
- adjusted_object_size == object_size) {
- MaybeObject* maybe_alloc_memento =
- AllocateStruct(ALLOCATION_MEMENTO_TYPE);
- AllocationMemento* alloc_memento;
- if (maybe_alloc_memento->To(&alloc_memento)) {
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
+ if (site != NULL) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(clone) + object_size);
+ alloc_memento->set_map_no_write_barrier(allocation_memento_map());
+ ASSERT(site->map() == allocation_site_map());
+ alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
+ HeapProfiler* profiler = isolate()->heap_profiler();
+ if (profiler->is_tracking_allocations()) {
+ profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
+ object_size);
+ profiler->NewObjectEvent(alloc_memento->address(),
+ AllocationMemento::kSize);
}
}
- } else {
- wb_mode = SKIP_WRITE_BARRIER;
- adjusted_object_size += AllocationMemento::kSize;
-
- { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
- if (!maybe_clone->ToObject(&clone)) return maybe_clone;
- }
- SLOW_ASSERT(InNewSpace(clone));
- // Since we know the clone is allocated in new space, we can copy
- // the contents without worrying about updating the write barrier.
- CopyBlock(HeapObject::cast(clone)->address(),
- source->address(),
- object_size);
- }
-
- if (adjusted_object_size > object_size) {
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(clone) + object_size);
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
}
SLOW_ASSERT(
@@ -5366,12 +5215,11 @@ MaybeObject* Heap::AllocateInternalizedStringImpl(
map = internalized_string_map();
size = SeqTwoByteString::SizeFor(chars);
}
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
// Allocate string.
Object* result;
- { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
- ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
- : old_data_space_->AllocateRaw(size);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -5410,16 +5258,10 @@ MaybeObject* Heap::AllocateRawOneByteString(int length,
}
int size = SeqOneByteString::SizeFor(length);
ASSERT(size <= SeqOneByteString::kMaxSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- }
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -5440,16 +5282,10 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
}
int size = SeqTwoByteString::SizeFor(length);
ASSERT(size <= SeqTwoByteString::kMaxSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- }
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
- { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
@@ -5474,24 +5310,6 @@ MaybeObject* Heap::AllocateJSArray(
}
-MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
- ElementsKind elements_kind,
- Handle<AllocationSite> allocation_site) {
- Context* native_context = isolate()->context()->native_context();
- JSFunction* array_function = native_context->array_function();
- Map* map = array_function->initial_map();
- Object* maybe_map_array = native_context->js_array_maps();
- if (!maybe_map_array->IsUndefined()) {
- Object* maybe_transitioned_map =
- FixedArray::cast(maybe_map_array)->get(elements_kind);
- if (!maybe_transitioned_map->IsUndefined()) {
- map = Map::cast(maybe_transitioned_map);
- }
- }
- return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
-}
-
-
MaybeObject* Heap::AllocateEmptyFixedArray() {
int size = FixedArray::SizeFor(0);
Object* result;
@@ -5512,25 +5330,10 @@ MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
}
-MaybeObject* Heap::AllocateRawFixedArray(int length) {
- if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException(0xd);
- }
- ASSERT(length > 0);
- // Use the general function if we're forced to always allocate.
- if (always_allocate()) return AllocateFixedArray(length, TENURED);
- // Allocate the raw data for a fixed array.
- int size = FixedArray::SizeFor(length);
- return size <= Page::kMaxNonCodeHeapObjectSize
- ? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
-}
-
-
MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
int len = src->length();
Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
+ { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
if (InNewSpace(obj)) {
@@ -5570,21 +5373,24 @@ MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
}
-MaybeObject* Heap::AllocateFixedArray(int length) {
- ASSERT(length >= 0);
- if (length == 0) return empty_fixed_array();
- Object* result;
- { MaybeObject* maybe_result = AllocateRawFixedArray(length);
- if (!maybe_result->ToObject(&result)) return maybe_result;
+MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
+ Map* map) {
+ int int64_entries = src->count_of_int64_entries();
+ int ptr_entries = src->count_of_ptr_entries();
+ int int32_entries = src->count_of_int32_entries();
+ Object* obj;
+ { MaybeObject* maybe_obj =
+ AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- // Initialize header.
- FixedArray* array = reinterpret_cast<FixedArray*>(result);
- array->set_map_no_write_barrier(fixed_array_map());
- array->set_length(length);
- // Initialize body.
- ASSERT(!InNewSpace(undefined_value()));
- MemsetPointer(array->data_start(), undefined_value(), length);
- return result;
+ HeapObject* dst = HeapObject::cast(obj);
+ dst->set_map_no_write_barrier(map);
+ CopyBlock(
+ dst->address() + ConstantPoolArray::kLengthOffset,
+ src->address() + ConstantPoolArray::kLengthOffset,
+ ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
+ - ConstantPoolArray::kLengthOffset);
+ return obj;
}
@@ -5593,35 +5399,26 @@ MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
return Failure::OutOfMemoryException(0xe);
}
int size = FixedArray::SizeFor(length);
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_POINTER_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
- }
-
- return AllocateRaw(size, space, retry_space);
+ return AllocateRaw(size, space, OLD_POINTER_SPACE);
}
-MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
- Heap* heap,
- int length,
- PretenureFlag pretenure,
- Object* filler) {
+MaybeObject* Heap::AllocateFixedArrayWithFiller(int length,
+ PretenureFlag pretenure,
+ Object* filler) {
ASSERT(length >= 0);
- ASSERT(heap->empty_fixed_array()->IsFixedArray());
- if (length == 0) return heap->empty_fixed_array();
+ ASSERT(empty_fixed_array()->IsFixedArray());
+ if (length == 0) return empty_fixed_array();
- ASSERT(!heap->InNewSpace(filler));
+ ASSERT(!InNewSpace(filler));
Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
+ { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
@@ -5630,19 +5427,13 @@ MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(this,
- length,
- pretenure,
- undefined_value());
+ return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
}
MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(this,
- length,
- pretenure,
- the_hole_value());
+ return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
}
@@ -5650,7 +5441,7 @@ MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
if (length == 0) return empty_fixed_array();
Object* obj;
- { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
+ { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
@@ -5720,24 +5511,52 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
return Failure::OutOfMemoryException(0xf);
}
int size = FixedDoubleArray::SizeFor(length);
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- AllocationSpace retry_space = OLD_DATA_SPACE;
-
#ifndef V8_HOST_ARCH_64_BIT
size += kPointerSize;
#endif
+ AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
- if (size > Page::kMaxNonCodeHeapObjectSize) {
- // Allocate in large object space, retry space will be ignored.
- space = LO_SPACE;
+ HeapObject* object;
+ { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
}
+ return EnsureDoubleAligned(this, object, size);
+}
+
+
+MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
+ int number_of_ptr_entries,
+ int number_of_int32_entries) {
+ ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
+ number_of_int32_entries > 0);
+ int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries);
+#ifndef V8_HOST_ARCH_64_BIT
+ size += kPointerSize;
+#endif
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
+
HeapObject* object;
- { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
+ { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE);
if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
}
+ object = EnsureDoubleAligned(this, object, size);
+ HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map());
- return EnsureDoubleAligned(this, object, size);
+ ConstantPoolArray* constant_pool =
+ reinterpret_cast<ConstantPoolArray*>(object);
+ constant_pool->SetEntryCounts(number_of_int64_entries,
+ number_of_ptr_entries,
+ number_of_int32_entries);
+ MemsetPointer(
+ HeapObject::RawField(
+ constant_pool,
+ constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
+ undefined_value(),
+ number_of_ptr_entries);
+ return constant_pool;
}
@@ -5937,8 +5756,7 @@ STRUCT_LIST(MAKE_CASE)
return Failure::InternalError();
}
int size = map->instance_size();
- AllocationSpace space =
- (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
+ AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
Object* result;
{ MaybeObject* maybe_result = Allocate(map, space);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -6965,6 +6783,7 @@ bool Heap::CreateHeapObjects() {
native_contexts_list_ = undefined_value();
array_buffers_list_ = undefined_value();
allocation_sites_list_ = undefined_value();
+ weak_object_to_code_table_ = undefined_value();
return true;
}
@@ -7068,15 +6887,17 @@ void Heap::TearDown() {
}
-void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
+void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+ GCType gc_type,
+ bool pass_isolate) {
ASSERT(callback != NULL);
- GCPrologueCallbackPair pair(callback, gc_type);
+ GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
ASSERT(!gc_prologue_callbacks_.Contains(pair));
return gc_prologue_callbacks_.Add(pair);
}
-void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
+void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
ASSERT(callback != NULL);
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_prologue_callbacks_[i].callback == callback) {
@@ -7088,15 +6909,17 @@ void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
}
-void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
+void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+ GCType gc_type,
+ bool pass_isolate) {
ASSERT(callback != NULL);
- GCEpilogueCallbackPair pair(callback, gc_type);
+ GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
ASSERT(!gc_epilogue_callbacks_.Contains(pair));
return gc_epilogue_callbacks_.Add(pair);
}
-void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
+void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
ASSERT(callback != NULL);
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_epilogue_callbacks_[i].callback == callback) {
@@ -7108,6 +6931,37 @@ void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
}
+MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj,
+ DependentCode* dep) {
+ ASSERT(!InNewSpace(obj));
+ ASSERT(!InNewSpace(dep));
+ MaybeObject* maybe_obj =
+ WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep);
+ WeakHashTable* table;
+ if (!maybe_obj->To(&table)) return maybe_obj;
+ if (ShouldZapGarbage() && weak_object_to_code_table_ != table) {
+ WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
+ }
+ set_weak_object_to_code_table(table);
+ ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj));
+ return weak_object_to_code_table_;
+}
+
+
+DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) {
+ Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
+ if (dep->IsDependentCode()) return DependentCode::cast(dep);
+ return DependentCode::cast(empty_fixed_array());
+}
+
+
+void Heap::EnsureWeakObjectToCodeTable() {
+ if (!weak_object_to_code_table()->IsHashTable()) {
+ set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16));
+ }
+}
+
+
#ifdef DEBUG
class PrintHandleVisitor: public ObjectVisitor {
@@ -8090,6 +7944,18 @@ void Heap::CheckpointObjectStats() {
static_cast<int>(object_sizes_last_time_[index]));
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ index = FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge; \
+ counters->count_of_CODE_AGE_##name()->Increment( \
+ static_cast<int>(object_counts_[index])); \
+ counters->count_of_CODE_AGE_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[index])); \
+ counters->size_of_CODE_AGE_##name()->Increment( \
+ static_cast<int>(object_sizes_[index])); \
+ counters->size_of_CODE_AGE_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[index]));
+ CODE_AGE_LIST_WITH_NO_AGE(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));