diff options
Diffstat (limited to 'deps/v8/src')
216 files changed, 27570 insertions, 15248 deletions
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 52607f15c..f3ae8078b 100644..100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -84,6 +84,7 @@ SOURCES = { hydrogen.cc hydrogen-instructions.cc ic.cc + incremental-marking.cc inspector.cc interpreter-irregexp.cc isolate.cc @@ -133,6 +134,7 @@ SOURCES = { v8utils.cc variables.cc version.cc + store-buffer.cc zone.cc extensions/gc-extension.cc extensions/externalize-string-extension.cc diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 5c8a3142a..a03b7411c 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -185,7 +185,10 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) { int end_marker; heap_stats.end_marker = &end_marker; i::Isolate* isolate = i::Isolate::Current(); - isolate->heap()->RecordStats(&heap_stats, take_snapshot); + // BUG(1718): + // Don't use the take_snapshot since we don't support HeapIterator here + // without doing a special GC. + isolate->heap()->RecordStats(&heap_stats, false); i::V8::SetFatalError(); FatalErrorCallback callback = GetFatalErrorHandler(); { @@ -501,9 +504,12 @@ void RegisterExtension(Extension* that) { Extension::Extension(const char* name, const char* source, int dep_count, - const char** deps) + const char** deps, + int source_length) : name_(name), - source_(source), + source_length_(source_length >= 0 ? + source_length : (source ? strlen(source) : 0)), + source_(source, source_length_), dep_count_(dep_count), deps_(deps), auto_enable_(false) { } @@ -1407,7 +1413,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) { ScriptData* ScriptData::PreCompile(const char* input, int length) { i::Utf8ToUC16CharacterStream stream( reinterpret_cast<const unsigned char*>(input), length); - return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping); + return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping); } @@ -1416,10 +1422,10 @@ ScriptData* ScriptData::PreCompile(v8::Handle<String> source) { if (str->IsExternalTwoByteString()) { i::ExternalTwoByteStringUC16CharacterStream stream( i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length()); - return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping); + return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping); } else { i::GenericStringUC16CharacterStream stream(str, 0, str->length()); - return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping); + return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping); } } @@ -1781,7 +1787,7 @@ v8::Handle<v8::StackTrace> Message::GetStackTrace() const { static i::Handle<i::Object> CallV8HeapFunction(const char* name, i::Handle<i::Object> recv, int argc, - i::Object** argv[], + i::Handle<i::Object> argv[], bool* has_pending_exception) { i::Isolate* isolate = i::Isolate::Current(); i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name); @@ -1798,10 +1804,10 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name, static i::Handle<i::Object> CallV8HeapFunction(const char* name, i::Handle<i::Object> data, bool* has_pending_exception) { - i::Object** argv[1] = { data.location() }; + i::Handle<i::Object> argv[] = { data }; return CallV8HeapFunction(name, i::Isolate::Current()->js_builtins_object(), - 1, + ARRAY_SIZE(argv), argv, has_pending_exception); } @@ -2621,10 +2627,11 @@ bool Value::Equals(Handle<Value> that) const { if (obj->IsJSObject() && other->IsJSObject()) { return *obj == *other; } - i::Object** args[1] = { other.location() }; + i::Handle<i::Object> args[] = { other }; EXCEPTION_PREAMBLE(isolate); i::Handle<i::Object> result = - CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception); + CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args, + &has_pending_exception); EXCEPTION_BAILOUT_CHECK(isolate, false); return *result == i::Smi::FromInt(i::EQUAL); } @@ -3204,21 +3211,10 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key, ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> hidden_props(i::GetHiddenProperties( - self, - i::JSObject::ALLOW_CREATION)); - i::Handle<i::Object> key_obj = Utils::OpenHandle(*key); + i::Handle<i::String> key_obj = Utils::OpenHandle(*key); i::Handle<i::Object> value_obj = Utils::OpenHandle(*value); - EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> obj = i::SetProperty( - hidden_props, - key_obj, - value_obj, - static_cast<PropertyAttributes>(None), - i::kNonStrictMode); - has_pending_exception = obj.is_null(); - EXCEPTION_BAILOUT_CHECK(isolate, false); - return true; + i::Handle<i::Object> result = i::SetHiddenProperty(self, key_obj, value_obj); + return *result == *self; } @@ -3228,20 +3224,9 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) { return Local<v8::Value>()); ENTER_V8(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> hidden_props(i::GetHiddenProperties( - self, - i::JSObject::OMIT_CREATION)); - if (hidden_props->IsUndefined()) { - return v8::Local<v8::Value>(); - } i::Handle<i::String> key_obj = Utils::OpenHandle(*key); - EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> result = i::GetProperty(hidden_props, key_obj); - has_pending_exception = result.is_null(); - EXCEPTION_BAILOUT_CHECK(isolate, v8::Local<v8::Value>()); - if (result->IsUndefined()) { - return v8::Local<v8::Value>(); - } + i::Handle<i::Object> result(self->GetHiddenProperty(*key_obj)); + if (result->IsUndefined()) return v8::Local<v8::Value>(); return Utils::ToLocal(result); } @@ -3252,15 +3237,9 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) { ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> hidden_props(i::GetHiddenProperties( - self, - i::JSObject::OMIT_CREATION)); - if (hidden_props->IsUndefined()) { - return true; - } - i::Handle<i::JSObject> js_obj(i::JSObject::cast(*hidden_props)); i::Handle<i::String> key_obj = Utils::OpenHandle(*key); - return i::DeleteProperty(js_obj, key_obj)->IsTrue(); + self->DeleteHiddenProperty(*key_obj); + return true; } @@ -3310,22 +3289,12 @@ void PrepareExternalArrayElements(i::Handle<i::JSObject> object, i::Handle<i::ExternalArray> array = isolate->factory()->NewExternalArray(length, array_type, data); - // If the object already has external elements, create a new, unique - // map if the element type is now changing, because assumptions about - // generated code based on the receiver's map will be invalid. - i::Handle<i::HeapObject> elements(object->elements()); - bool cant_reuse_map = - elements->map()->IsUndefined() || - !elements->map()->has_external_array_elements() || - elements->map() != isolate->heap()->MapForExternalArrayType(array_type); - if (cant_reuse_map) { - i::Handle<i::Map> external_array_map = - isolate->factory()->GetElementsTransitionMap( - i::Handle<i::Map>(object->map()), - GetElementsKindFromExternalArrayType(array_type), - object->HasFastProperties()); - object->set_map(*external_array_map); - } + i::Handle<i::Map> external_array_map = + isolate->factory()->GetElementsTransitionMap( + object, + GetElementsKindFromExternalArrayType(array_type)); + + object->set_map(*external_array_map); object->set_elements(*array); } @@ -3484,7 +3453,8 @@ bool v8::Object::IsCallable() { } -Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc, +Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, + int argc, v8::Handle<v8::Value> argv[]) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::CallAsFunction()", @@ -3495,7 +3465,7 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc, i::Handle<i::JSObject> obj = Utils::OpenHandle(this); i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); - i::Object*** args = reinterpret_cast<i::Object***>(argv); + i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv); i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>(); if (obj->IsJSFunction()) { fun = i::Handle<i::JSFunction>::cast(obj); @@ -3525,7 +3495,7 @@ Local<v8::Value> Object::CallAsConstructor(int argc, i::HandleScope scope(isolate); i::Handle<i::JSObject> obj = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); - i::Object*** args = reinterpret_cast<i::Object***>(argv); + i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv); if (obj->IsJSFunction()) { i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj); EXCEPTION_PREAMBLE(isolate); @@ -3567,7 +3537,7 @@ Local<v8::Object> Function::NewInstance(int argc, HandleScope scope; i::Handle<i::JSFunction> function = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); - i::Object*** args = reinterpret_cast<i::Object***>(argv); + i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv); EXCEPTION_PREAMBLE(isolate); i::Handle<i::Object> returned = i::Execution::New(function, argc, args, &has_pending_exception); @@ -3588,7 +3558,7 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc, i::Handle<i::JSFunction> fun = Utils::OpenHandle(this); i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); - i::Object*** args = reinterpret_cast<i::Object***>(argv); + i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv); EXCEPTION_PREAMBLE(isolate); i::Handle<i::Object> returned = i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception); @@ -3799,10 +3769,11 @@ bool v8::String::IsExternalAscii() const { void v8::String::VerifyExternalStringResource( v8::String::ExternalStringResource* value) const { i::Handle<i::String> str = Utils::OpenHandle(this); - v8::String::ExternalStringResource* expected; + const v8::String::ExternalStringResource* expected; if (i::StringShape(*str).IsExternalTwoByte()) { - void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource(); - expected = reinterpret_cast<ExternalStringResource*>(resource); + const void* resource = + i::Handle<i::ExternalTwoByteString>::cast(str)->resource(); + expected = reinterpret_cast<const ExternalStringResource*>(resource); } else { expected = NULL; } @@ -3810,7 +3781,7 @@ void v8::String::VerifyExternalStringResource( } -v8::String::ExternalAsciiStringResource* +const v8::String::ExternalAsciiStringResource* v8::String::GetExternalAsciiStringResource() const { i::Handle<i::String> str = Utils::OpenHandle(this); if (IsDeadCheck(str->GetIsolate(), @@ -3818,8 +3789,9 @@ v8::String::ExternalAsciiStringResource* return NULL; } if (i::StringShape(*str).IsExternalAscii()) { - void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource(); - return reinterpret_cast<ExternalAsciiStringResource*>(resource); + const void* resource = + i::Handle<i::ExternalAsciiString>::cast(str)->resource(); + return reinterpret_cast<const ExternalAsciiStringResource*>(resource); } else { return NULL; } @@ -4009,7 +3981,7 @@ bool v8::V8::IdleNotification() { void v8::V8::LowMemoryNotification() { i::Isolate* isolate = i::Isolate::Current(); if (!isolate->IsInitialized()) return; - isolate->heap()->CollectAllGarbage(true); + isolate->heap()->CollectAllAvailableGarbage(); } @@ -4528,6 +4500,7 @@ bool v8::String::MakeExternal( bool v8::String::CanMakeExternal() { + if (!internal::FLAG_clever_optimizations) return false; i::Handle<i::String> obj = Utils::OpenHandle(this); i::Isolate* isolate = obj->GetIsolate(); if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false; @@ -5480,6 +5453,12 @@ bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) { wait_for_connection); } + +void Debug::DisableAgent() { + return i::Isolate::Current()->debugger()->StopAgent(); +} + + void Debug::ProcessDebugMessages() { i::Execution::ProcessDebugMesssages(true); } @@ -5804,6 +5783,16 @@ const HeapGraphNode* HeapGraphNode::GetDominatorNode() const { } +v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const { + i::Isolate* isolate = i::Isolate::Current(); + IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue"); + i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject(); + return v8::Handle<Value>(!object.is_null() ? + ToApi<Value>(object) : ToApi<Value>( + isolate->factory()->undefined_value())); +} + + static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) { return const_cast<i::HeapSnapshot*>( reinterpret_cast<const i::HeapSnapshot*>(snapshot)); diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 3e19a4538..93cecf52b 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -77,6 +77,11 @@ int RelocInfo::target_address_size() { void RelocInfo::set_target_address(Address target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); Assembler::set_target_address_at(pc_, target); + if (host() != NULL && IsCodeTarget(rmode_)) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } } @@ -101,6 +106,10 @@ Object** RelocInfo::target_object_address() { void RelocInfo::set_target_object(Object* target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target)); + if (host() != NULL && target->IsHeapObject()) { + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), &Memory::Object_at(pc_), HeapObject::cast(target)); + } } @@ -131,6 +140,12 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) { ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Memory::Address_at(pc_) = address; + if (host() != NULL) { + // TODO(1550) We are passing NULL as a slot because cell can never be on + // evacuation candidate. + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), NULL, cell); + } } @@ -147,6 +162,11 @@ void RelocInfo::set_call_address(Address target) { ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target; + if (host() != NULL) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } } @@ -195,7 +215,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() { void RelocInfo::Visit(ObjectVisitor* visitor) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - visitor->VisitPointer(target_object_address()); + visitor->VisitEmbeddedPointer(this); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { @@ -221,7 +241,7 @@ template<typename StaticVisitor> void RelocInfo::Visit(Heap* heap) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - StaticVisitor::VisitPointer(heap, target_object_address()); + StaticVisitor::VisitEmbeddedPointer(heap, this); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(heap, this); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 0ec36921a..329493a34 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -78,7 +78,9 @@ static uint64_t CpuFeaturesImpliedByCompiler() { void CpuFeatures::Probe() { - ASSERT(!initialized_); + unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() | + CpuFeaturesImpliedByCompiler()); + ASSERT(supported_ == 0 || supported_ == standard_features); #ifdef DEBUG initialized_ = true; #endif @@ -86,8 +88,7 @@ void CpuFeatures::Probe() { // Get the features implied by the OS and the compiler settings. This is the // minimal set of features which is also alowed for generated code in the // snapshot. - supported_ |= OS::CpuFeaturesImpliedByPlatform(); - supported_ |= CpuFeaturesImpliedByCompiler(); + supported_ |= standard_features; if (Serializer::enabled()) { // No probing for features if we might serialize (generate snapshot). @@ -2505,7 +2506,8 @@ void Assembler::dd(uint32_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { - RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants + // We do not try to reuse pool constants. + RelocInfo rinfo(pc_, rmode, data, NULL); if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { // Adjust code for new modes. ASSERT(RelocInfo::IsDebugBreakSlot(rmode) @@ -2537,7 +2539,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { } ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { - RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId()); + RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL); ClearRecordedAstId(); reloc_info_writer.Write(&reloc_info_with_ast_id); } else { diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 9a586936f..d19b64da5 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -1209,6 +1209,10 @@ class Assembler : public AssemblerBase { PositionsRecorder* positions_recorder() { return &positions_recorder_; } // Read/patch instructions + Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } + void instr_at_put(int pos, Instr instr) { + *reinterpret_cast<Instr*>(buffer_ + pos) = instr; + } static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } static void instr_at_put(byte* pc, Instr instr) { *reinterpret_cast<Instr*>(pc) = instr; @@ -1263,12 +1267,6 @@ class Assembler : public AssemblerBase { int buffer_space() const { return reloc_info_writer.pos() - pc_; } - // Read/patch instructions - Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } - void instr_at_put(int pos, Instr instr) { - *reinterpret_cast<Instr*>(buffer_ + pos) = instr; - } - // Decode branch instruction at pos and return branch target pos int target_at(int pos); diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 60d2081c2..32b7896a5 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -582,10 +582,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { __ bind(&convert_argument); __ push(function); // Preserve the function. __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4); - __ EnterInternalFrame(); - __ push(r0); - __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r0); + __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); + } __ pop(function); __ mov(argument, r0); __ b(&argument_is_string); @@ -601,10 +602,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // create a string wrapper. __ bind(&gc_required); __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4); - __ EnterInternalFrame(); - __ push(argument); - __ CallRuntime(Runtime::kNewStringWrapper, 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(argument); + __ CallRuntime(Runtime::kNewStringWrapper, 1); + } __ Ret(); } @@ -617,12 +619,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // -- sp[...]: constructor arguments // ----------------------------------- - Label non_function_call; + Label slow, non_function_call; // Check that the function is not a smi. __ JumpIfSmi(r1, &non_function_call); // Check that the function is a JSFunction. __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &non_function_call); + __ b(ne, &slow); // Jump to the function-specific construct stub. __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); @@ -631,10 +633,19 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // r0: number of arguments // r1: called object + // r2: object type + Label do_call; + __ bind(&slow); + __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE)); + __ b(ne, &non_function_call); + __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); + __ jmp(&do_call); + __ bind(&non_function_call); + __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); + __ bind(&do_call); // Set expected number of arguments to zero (not changing r0). __ mov(r2, Operand(0, RelocInfo::NONE)); - __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ SetCallKind(r5, CALL_AS_METHOD); __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), RelocInfo::CODE_TARGET); @@ -650,321 +661,329 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, Isolate* isolate = masm->isolate(); // Enter a construct frame. - __ EnterConstructFrame(); - - // Preserve the two incoming parameters on the stack. - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); - __ push(r0); // Smi-tagged arguments count. - __ push(r1); // Constructor function. - - // Try to allocate the object without transitioning into C code. If any of the - // preconditions is not met, the code bails out to the runtime call. - Label rt_call, allocated; - if (FLAG_inline_new) { - Label undo_allocation; + { + FrameScope scope(masm, StackFrame::CONSTRUCT); + + // Preserve the two incoming parameters on the stack. + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ push(r0); // Smi-tagged arguments count. + __ push(r1); // Constructor function. + + // Try to allocate the object without transitioning into C code. If any of + // the preconditions is not met, the code bails out to the runtime call. + Label rt_call, allocated; + if (FLAG_inline_new) { + Label undo_allocation; #ifdef ENABLE_DEBUGGER_SUPPORT - ExternalReference debug_step_in_fp = - ExternalReference::debug_step_in_fp_address(isolate); - __ mov(r2, Operand(debug_step_in_fp)); - __ ldr(r2, MemOperand(r2)); - __ tst(r2, r2); - __ b(ne, &rt_call); + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(isolate); + __ mov(r2, Operand(debug_step_in_fp)); + __ ldr(r2, MemOperand(r2)); + __ tst(r2, r2); + __ b(ne, &rt_call); #endif - // Load the initial map and verify that it is in fact a map. - // r1: constructor function - __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); - __ JumpIfSmi(r2, &rt_call); - __ CompareObjectType(r2, r3, r4, MAP_TYPE); - __ b(ne, &rt_call); + // Load the initial map and verify that it is in fact a map. + // r1: constructor function + __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + __ JumpIfSmi(r2, &rt_call); + __ CompareObjectType(r2, r3, r4, MAP_TYPE); + __ b(ne, &rt_call); + + // Check that the constructor is not constructing a JSFunction (see + // comments in Runtime_NewObject in runtime.cc). In which case the + // initial map's instance type would be JS_FUNCTION_TYPE. + // r1: constructor function + // r2: initial map + __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); + __ b(eq, &rt_call); - // Check that the constructor is not constructing a JSFunction (see comments - // in Runtime_NewObject in runtime.cc). In which case the initial map's - // instance type would be JS_FUNCTION_TYPE. - // r1: constructor function - // r2: initial map - __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); - __ b(eq, &rt_call); - - if (count_constructions) { - Label allocate; - // Decrease generous allocation count. - __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - MemOperand constructor_count = - FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset); - __ ldrb(r4, constructor_count); - __ sub(r4, r4, Operand(1), SetCC); - __ strb(r4, constructor_count); - __ b(ne, &allocate); - - __ Push(r1, r2); - - __ push(r1); // constructor - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); - - __ pop(r2); - __ pop(r1); - - __ bind(&allocate); - } - - // Now allocate the JSObject on the heap. - // r1: constructor function - // r2: initial map - __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); - __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS); + if (count_constructions) { + Label allocate; + // Decrease generous allocation count. + __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + MemOperand constructor_count = + FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset); + __ ldrb(r4, constructor_count); + __ sub(r4, r4, Operand(1), SetCC); + __ strb(r4, constructor_count); + __ b(ne, &allocate); + + __ Push(r1, r2); + + __ push(r1); // constructor + // The call will replace the stub, so the countdown is only done once. + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); + + __ pop(r2); + __ pop(r1); + + __ bind(&allocate); + } - // Allocated the JSObject, now initialize the fields. Map is set to initial - // map and properties and elements are set to empty fixed array. - // r1: constructor function - // r2: initial map - // r3: object size - // r4: JSObject (not tagged) - __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); - __ mov(r5, r4); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); - __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); - ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); - __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); - ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); - __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); - - // Fill all the in-object properties with the appropriate filler. - // r1: constructor function - // r2: initial map - // r3: object size (in words) - // r4: JSObject (not tagged) - // r5: First in-object property of JSObject (not tagged) - __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. - ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); - { Label loop, entry; + // Now allocate the JSObject on the heap. + // r1: constructor function + // r2: initial map + __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); + __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS); + + // Allocated the JSObject, now initialize the fields. Map is set to + // initial map and properties and elements are set to empty fixed array. + // r1: constructor function + // r2: initial map + // r3: object size + // r4: JSObject (not tagged) + __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); + __ mov(r5, r4); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); + __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); + + // Fill all the in-object properties with the appropriate filler. + // r1: constructor function + // r2: initial map + // r3: object size (in words) + // r4: JSObject (not tagged) + // r5: First in-object property of JSObject (not tagged) + __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); if (count_constructions) { + __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); + __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, + kBitsPerByte); + __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2)); + // r0: offset of first field after pre-allocated fields + if (FLAG_debug_code) { + __ cmp(r0, r6); + __ Assert(le, "Unexpected number of pre-allocated property fields."); + } + __ InitializeFieldsWithFiller(r5, r0, r7); // To allow for truncation. __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex); - } else { - __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); } - __ b(&entry); - __ bind(&loop); - __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); - __ bind(&entry); - __ cmp(r5, r6); - __ b(lt, &loop); - } + __ InitializeFieldsWithFiller(r5, r6, r7); + + // Add the object tag to make the JSObject real, so that we can continue + // and jump into the continuation code at any time from now on. Any + // failures need to undo the allocation, so that the heap is in a + // consistent state and verifiable. + __ add(r4, r4, Operand(kHeapObjectTag)); + + // Check if a non-empty properties array is needed. Continue with + // allocated object if not fall through to runtime call if it is. + // r1: constructor function + // r4: JSObject + // r5: start of next object (not tagged) + __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset)); + // The field instance sizes contains both pre-allocated property fields + // and in-object properties. + __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); + __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, + kBitsPerByte); + __ add(r3, r3, Operand(r6)); + __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte, + kBitsPerByte); + __ sub(r3, r3, Operand(r6), SetCC); + + // Done if no extra properties are to be allocated. + __ b(eq, &allocated); + __ Assert(pl, "Property allocation count failed."); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // r1: constructor + // r3: number of elements in properties array + // r4: JSObject + // r5: start of next object + __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); + __ AllocateInNewSpace( + r0, + r5, + r6, + r2, + &undo_allocation, + static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); + + // Initialize the FixedArray. + // r1: constructor + // r3: number of elements in properties array + // r4: JSObject + // r5: FixedArray (not tagged) + __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); + __ mov(r2, r5); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + __ str(r6, MemOperand(r2, kPointerSize, PostIndex)); + ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); + __ mov(r0, Operand(r3, LSL, kSmiTagSize)); + __ str(r0, MemOperand(r2, kPointerSize, PostIndex)); + + // Initialize the fields to undefined. + // r1: constructor function + // r2: First element of FixedArray (not tagged) + // r3: number of elements in properties array + // r4: JSObject + // r5: FixedArray (not tagged) + __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); + { Label loop, entry; + if (count_constructions) { + __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + } else if (FLAG_debug_code) { + __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); + __ cmp(r7, r8); + __ Assert(eq, "Undefined value not loaded."); + } + __ b(&entry); + __ bind(&loop); + __ str(r7, MemOperand(r2, kPointerSize, PostIndex)); + __ bind(&entry); + __ cmp(r2, r6); + __ b(lt, &loop); + } - // Add the object tag to make the JSObject real, so that we can continue and - // jump into the continuation code at any time from now on. Any failures - // need to undo the allocation, so that the heap is in a consistent state - // and verifiable. - __ add(r4, r4, Operand(kHeapObjectTag)); + // Store the initialized FixedArray into the properties field of + // the JSObject + // r1: constructor function + // r4: JSObject + // r5: FixedArray (not tagged) + __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag. + __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset)); + + // Continue with JSObject being successfully allocated + // r1: constructor function + // r4: JSObject + __ jmp(&allocated); + + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // r4: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(r4, r5); + } - // Check if a non-empty properties array is needed. Continue with allocated - // object if not fall through to runtime call if it is. + // Allocate the new receiver object using the runtime call. // r1: constructor function + __ bind(&rt_call); + __ push(r1); // argument for Runtime_NewObject + __ CallRuntime(Runtime::kNewObject, 1); + __ mov(r4, r0); + + // Receiver for constructor call allocated. // r4: JSObject - // r5: start of next object (not tagged) - __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset)); - // The field instance sizes contains both pre-allocated property fields and - // in-object properties. - __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); - __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8); - __ add(r3, r3, Operand(r6)); - __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8); - __ sub(r3, r3, Operand(r6), SetCC); - - // Done if no extra properties are to be allocated. - __ b(eq, &allocated); - __ Assert(pl, "Property allocation count failed."); - - // Scale the number of elements by pointer size and add the header for - // FixedArrays to the start of the next object calculation from above. - // r1: constructor - // r3: number of elements in properties array - // r4: JSObject - // r5: start of next object - __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); - __ AllocateInNewSpace( - r0, - r5, - r6, - r2, - &undo_allocation, - static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); - - // Initialize the FixedArray. - // r1: constructor - // r3: number of elements in properties array - // r4: JSObject - // r5: FixedArray (not tagged) - __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); - __ mov(r2, r5); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); - __ str(r6, MemOperand(r2, kPointerSize, PostIndex)); - ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); - __ mov(r0, Operand(r3, LSL, kSmiTagSize)); - __ str(r0, MemOperand(r2, kPointerSize, PostIndex)); - - // Initialize the fields to undefined. + __ bind(&allocated); + __ push(r4); + + // Push the function and the allocated receiver from the stack. + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ ldr(r1, MemOperand(sp, kPointerSize)); + __ push(r1); // Constructor function. + __ push(r4); // Receiver. + + // Reload the number of arguments from the stack. // r1: constructor function - // r2: First element of FixedArray (not tagged) - // r3: number of elements in properties array - // r4: JSObject - // r5: FixedArray (not tagged) - __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. - ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); - { Label loop, entry; - if (count_constructions) { - __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); - } else if (FLAG_debug_code) { - __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); - __ cmp(r7, r8); - __ Assert(eq, "Undefined value not loaded."); - } - __ b(&entry); - __ bind(&loop); - __ str(r7, MemOperand(r2, kPointerSize, PostIndex)); - __ bind(&entry); - __ cmp(r2, r6); - __ b(lt, &loop); - } - - // Store the initialized FixedArray into the properties field of - // the JSObject + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + __ ldr(r3, MemOperand(sp, 4 * kPointerSize)); + + // Setup pointer to last argument. + __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); + + // Setup number of arguments for function call below + __ mov(r0, Operand(r3, LSR, kSmiTagSize)); + + // Copy arguments and receiver to the expression stack. + // r0: number of arguments + // r2: address of last argument (caller sp) // r1: constructor function - // r4: JSObject - // r5: FixedArray (not tagged) - __ add(r5, r5, Operand(kHeapObjectTag)); // Add the heap tag. - __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset)); + // r3: number of arguments (smi-tagged) + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + Label loop, entry; + __ b(&entry); + __ bind(&loop); + __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1)); + __ push(ip); + __ bind(&entry); + __ sub(r3, r3, Operand(2), SetCC); + __ b(ge, &loop); - // Continue with JSObject being successfully allocated + // Call the function. + // r0: number of arguments // r1: constructor function - // r4: JSObject - __ jmp(&allocated); - - // Undo the setting of the new top so that the heap is verifiable. For - // example, the map's unused properties potentially do not match the - // allocated objects unused properties. - // r4: JSObject (previous new top) - __ bind(&undo_allocation); - __ UndoAllocationInNewSpace(r4, r5); - } - - // Allocate the new receiver object using the runtime call. - // r1: constructor function - __ bind(&rt_call); - __ push(r1); // argument for Runtime_NewObject - __ CallRuntime(Runtime::kNewObject, 1); - __ mov(r4, r0); - - // Receiver for constructor call allocated. - // r4: JSObject - __ bind(&allocated); - __ push(r4); - - // Push the function and the allocated receiver from the stack. - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ ldr(r1, MemOperand(sp, kPointerSize)); - __ push(r1); // Constructor function. - __ push(r4); // Receiver. - - // Reload the number of arguments from the stack. - // r1: constructor function - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - __ ldr(r3, MemOperand(sp, 4 * kPointerSize)); - - // Setup pointer to last argument. - __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); - - // Setup number of arguments for function call below - __ mov(r0, Operand(r3, LSR, kSmiTagSize)); + if (is_api_function) { + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + Handle<Code> code = + masm->isolate()->builtins()->HandleApiCallConstruct(); + ParameterCount expected(0); + __ InvokeCode(code, expected, expected, + RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); + } else { + ParameterCount actual(r0); + __ InvokeFunction(r1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } - // Copy arguments and receiver to the expression stack. - // r0: number of arguments - // r2: address of last argument (caller sp) - // r1: constructor function - // r3: number of arguments (smi-tagged) - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - Label loop, entry; - __ b(&entry); - __ bind(&loop); - __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1)); - __ push(ip); - __ bind(&entry); - __ sub(r3, r3, Operand(2), SetCC); - __ b(ge, &loop); + // Pop the function from the stack. + // sp[0]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + __ pop(); - // Call the function. - // r0: number of arguments - // r1: constructor function - if (is_api_function) { - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - Handle<Code> code = - masm->isolate()->builtins()->HandleApiCallConstruct(); - ParameterCount expected(0); - __ InvokeCode(code, expected, expected, - RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); - } else { - ParameterCount actual(r0); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Restore context from the frame. + // r0: result + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, exit; + + // If the result is a smi, it is *not* an object in the ECMA sense. + // r0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ JumpIfSmi(r0, &use_receiver); + + // If the type of the result (stored in its map) is less than + // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. + __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE); + __ b(ge, &exit); + + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ ldr(r0, MemOperand(sp)); + + // Remove receiver from the stack, remove caller arguments, and + // return. + __ bind(&exit); + // r0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); + + // Leave construct frame. } - // Pop the function from the stack. - // sp[0]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - __ pop(); - - // Restore context from the frame. - // r0: result - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - - // If the result is an object (in the ECMA sense), we should get rid - // of the receiver and use the result; see ECMA-262 section 13.2.2-7 - // on page 74. - Label use_receiver, exit; - - // If the result is a smi, it is *not* an object in the ECMA sense. - // r0: result - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ JumpIfSmi(r0, &use_receiver); - - // If the type of the result (stored in its map) is less than - // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. - __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, &exit); - - // Throw away the result of the constructor invocation and use the - // on-stack receiver as the result. - __ bind(&use_receiver); - __ ldr(r0, MemOperand(sp)); - - // Remove receiver from the stack, remove caller arguments, and - // return. - __ bind(&exit); - // r0: result - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); - __ LeaveConstructFrame(); __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1)); __ add(sp, sp, Operand(kPointerSize)); __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2); @@ -997,63 +1016,64 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r4: argv // r5-r7, cp may be clobbered - // Clear the context before we push it when entering the JS frame. + // Clear the context before we push it when entering the internal frame. __ mov(cp, Operand(0, RelocInfo::NONE)); // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Set up the context from the function argument. - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + // Set up the context from the function argument. + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - // Set up the roots register. - ExternalReference roots_address = - ExternalReference::roots_address(masm->isolate()); - __ mov(r10, Operand(roots_address)); + // Set up the roots register. + ExternalReference roots_address = + ExternalReference::roots_address(masm->isolate()); + __ mov(r10, Operand(roots_address)); - // Push the function and the receiver onto the stack. - __ push(r1); - __ push(r2); + // Push the function and the receiver onto the stack. + __ push(r1); + __ push(r2); - // Copy arguments to the stack in a loop. - // r1: function - // r3: argc - // r4: argv, i.e. points to first arg - Label loop, entry; - __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2)); - // r2 points past last arg. - __ b(&entry); - __ bind(&loop); - __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter - __ ldr(r0, MemOperand(r0)); // dereference handle - __ push(r0); // push parameter - __ bind(&entry); - __ cmp(r4, r2); - __ b(ne, &loop); - - // Initialize all JavaScript callee-saved registers, since they will be seen - // by the garbage collector as part of handlers. - __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); - __ mov(r5, Operand(r4)); - __ mov(r6, Operand(r4)); - __ mov(r7, Operand(r4)); - if (kR9Available == 1) { - __ mov(r9, Operand(r4)); - } + // Copy arguments to the stack in a loop. + // r1: function + // r3: argc + // r4: argv, i.e. points to first arg + Label loop, entry; + __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2)); + // r2 points past last arg. + __ b(&entry); + __ bind(&loop); + __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter + __ ldr(r0, MemOperand(r0)); // dereference handle + __ push(r0); // push parameter + __ bind(&entry); + __ cmp(r4, r2); + __ b(ne, &loop); - // Invoke the code and pass argc as r0. - __ mov(r0, Operand(r3)); - if (is_construct) { - __ Call(masm->isolate()->builtins()->JSConstructCall()); - } else { - ParameterCount actual(r0); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); - } + // Initialize all JavaScript callee-saved registers, since they will be seen + // by the garbage collector as part of handlers. + __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); + __ mov(r5, Operand(r4)); + __ mov(r6, Operand(r4)); + __ mov(r7, Operand(r4)); + if (kR9Available == 1) { + __ mov(r9, Operand(r4)); + } - // Exit the JS frame and remove the parameters (except function), and return. - // Respect ABI stack constraint. - __ LeaveInternalFrame(); + // Invoke the code and pass argc as r0. + __ mov(r0, Operand(r3)); + if (is_construct) { + __ Call(masm->isolate()->builtins()->JSConstructCall()); + } else { + ParameterCount actual(r0); + __ InvokeFunction(r1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } + // Exit the JS frame and remove the parameters (except function), and + // return. + // Respect ABI stack constraint. + } __ Jump(lr); // r0: result @@ -1072,26 +1092,27 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { void Builtins::Generate_LazyCompile(MacroAssembler* masm) { // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Preserve the function. - __ push(r1); - // Push call kind information. - __ push(r5); + // Preserve the function. + __ push(r1); + // Push call kind information. + __ push(r5); - // Push the function on the stack as the argument to the runtime function. - __ push(r1); - __ CallRuntime(Runtime::kLazyCompile, 1); - // Calculate the entry point. - __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Push the function on the stack as the argument to the runtime function. + __ push(r1); + __ CallRuntime(Runtime::kLazyCompile, 1); + // Calculate the entry point. + __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); - // Restore call kind information. - __ pop(r5); - // Restore saved function. - __ pop(r1); + // Restore call kind information. + __ pop(r5); + // Restore saved function. + __ pop(r1); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down internal frame. + } // Do a tail-call of the compiled function. __ Jump(r2); @@ -1100,26 +1121,27 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) { void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Preserve the function. - __ push(r1); - // Push call kind information. - __ push(r5); + // Preserve the function. + __ push(r1); + // Push call kind information. + __ push(r5); - // Push the function on the stack as the argument to the runtime function. - __ push(r1); - __ CallRuntime(Runtime::kLazyRecompile, 1); - // Calculate the entry point. - __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Push the function on the stack as the argument to the runtime function. + __ push(r1); + __ CallRuntime(Runtime::kLazyRecompile, 1); + // Calculate the entry point. + __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); - // Restore call kind information. - __ pop(r5); - // Restore saved function. - __ pop(r1); + // Restore call kind information. + __ pop(r5); + // Restore saved function. + __ pop(r1); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down internal frame. + } // Do a tail-call of the compiled function. __ Jump(r2); @@ -1128,12 +1150,13 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { - __ EnterInternalFrame(); - // Pass the function and deoptimization type to the runtime system. - __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type)))); - __ push(r0); - __ CallRuntime(Runtime::kNotifyDeoptimized, 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Pass the function and deoptimization type to the runtime system. + __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type)))); + __ push(r0); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); + } // Get the full codegen state from the stack and untag it -> r6. __ ldr(r6, MemOperand(sp, 0 * kPointerSize)); @@ -1173,9 +1196,10 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { // the registers without worrying about which of them contain // pointers. This seems a bit fragile. __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit()); - __ EnterInternalFrame(); - __ CallRuntime(Runtime::kNotifyOSR, 0); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kNotifyOSR, 0); + } __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit()); __ Ret(); } @@ -1191,10 +1215,11 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame and push it as an // argument to the on-stack replacement function. __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - __ EnterInternalFrame(); - __ push(r0); - __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r0); + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + } // If the result was -1 it means that we couldn't optimize the // function. Just return and continue in the unoptimized version. @@ -1276,17 +1301,23 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ b(ge, &shift_arguments); __ bind(&convert_to_object); - __ EnterInternalFrame(); // In order to preserve argument count. - __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged. - __ push(r0); - __ push(r2); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ mov(r2, r0); + { + // Enter an internal frame in order to preserve argument count. + FrameScope scope(masm, StackFrame::INTERNAL); + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Smi-tagged. + __ push(r0); + + __ push(r2); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(r2, r0); + + __ pop(r0); + __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + + // Exit the internal frame. + } - __ pop(r0); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); - __ LeaveInternalFrame(); // Restore the function to r1, and the flag to r4. __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); __ mov(r4, Operand(0, RelocInfo::NONE)); @@ -1406,156 +1437,157 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { const int kRecvOffset = 3 * kPointerSize; const int kFunctionOffset = 4 * kPointerSize; - __ EnterInternalFrame(); + { + FrameScope frame_scope(masm, StackFrame::INTERNAL); - __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function - __ push(r0); - __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array - __ push(r0); - __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); - - // Check the stack for overflow. We are not trying to catch - // interruptions (e.g. debug break and preemption) here, so the "real stack - // limit" is checked. - Label okay; - __ LoadRoot(r2, Heap::kRealStackLimitRootIndex); - // Make r2 the space we have left. The stack might already be overflowed - // here which will cause r2 to become negative. - __ sub(r2, sp, r2); - // Check if the arguments will overflow the stack. - __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ b(gt, &okay); // Signed comparison. - - // Out of stack space. - __ ldr(r1, MemOperand(fp, kFunctionOffset)); - __ push(r1); - __ push(r0); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); - // End of stack check. + __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function + __ push(r0); + __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array + __ push(r0); + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + __ LoadRoot(r2, Heap::kRealStackLimitRootIndex); + // Make r2 the space we have left. The stack might already be overflowed + // here which will cause r2 to become negative. + __ sub(r2, sp, r2); + // Check if the arguments will overflow the stack. + __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ b(gt, &okay); // Signed comparison. + + // Out of stack space. + __ ldr(r1, MemOperand(fp, kFunctionOffset)); + __ push(r1); + __ push(r0); + __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + // End of stack check. - // Push current limit and index. - __ bind(&okay); - __ push(r0); // limit - __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index - __ push(r1); + // Push current limit and index. + __ bind(&okay); + __ push(r0); // limit + __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index + __ push(r1); - // Get the receiver. - __ ldr(r0, MemOperand(fp, kRecvOffset)); + // Get the receiver. + __ ldr(r0, MemOperand(fp, kRecvOffset)); - // Check that the function is a JS function (otherwise it must be a proxy). - Label push_receiver; - __ ldr(r1, MemOperand(fp, kFunctionOffset)); - __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &push_receiver); + // Check that the function is a JS function (otherwise it must be a proxy). + Label push_receiver; + __ ldr(r1, MemOperand(fp, kFunctionOffset)); + __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); + __ b(ne, &push_receiver); - // Change context eagerly to get the right global object if necessary. - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - // Load the shared function info while the function is still in r1. - __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + // Change context eagerly to get the right global object if necessary. + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + // Load the shared function info while the function is still in r1. + __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - // Compute the receiver. - // Do not transform the receiver for strict mode functions. - Label call_to_object, use_global_receiver; - __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); - __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + - kSmiTagSize))); - __ b(ne, &push_receiver); - - // Do not transform the receiver for strict mode functions. - __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); - __ b(ne, &push_receiver); - - // Compute the receiver in non-strict mode. - __ JumpIfSmi(r0, &call_to_object); - __ LoadRoot(r1, Heap::kNullValueRootIndex); - __ cmp(r0, r1); - __ b(eq, &use_global_receiver); - __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); - __ cmp(r0, r1); - __ b(eq, &use_global_receiver); - - // Check if the receiver is already a JavaScript object. - // r0: receiver - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, &push_receiver); - - // Convert the receiver to a regular object. - // r0: receiver - __ bind(&call_to_object); - __ push(r0); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ b(&push_receiver); - - // Use the current global receiver object as the receiver. - __ bind(&use_global_receiver); - const int kGlobalOffset = - Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; - __ ldr(r0, FieldMemOperand(cp, kGlobalOffset)); - __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset)); - __ ldr(r0, FieldMemOperand(r0, kGlobalOffset)); - __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); - - // Push the receiver. - // r0: receiver - __ bind(&push_receiver); - __ push(r0); + // Compute the receiver. + // Do not transform the receiver for strict mode functions. + Label call_to_object, use_global_receiver; + __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); + __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + kSmiTagSize))); + __ b(ne, &push_receiver); - // Copy all arguments from the array to the stack. - Label entry, loop; - __ ldr(r0, MemOperand(fp, kIndexOffset)); - __ b(&entry); + // Do not transform the receiver for strict mode functions. + __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); + __ b(ne, &push_receiver); - // Load the current argument from the arguments array and push it to the - // stack. - // r0: current argument index - __ bind(&loop); - __ ldr(r1, MemOperand(fp, kArgsOffset)); - __ push(r1); - __ push(r0); + // Compute the receiver in non-strict mode. + __ JumpIfSmi(r0, &call_to_object); + __ LoadRoot(r1, Heap::kNullValueRootIndex); + __ cmp(r0, r1); + __ b(eq, &use_global_receiver); + __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); + __ cmp(r0, r1); + __ b(eq, &use_global_receiver); - // Call the runtime to access the property in the arguments array. - __ CallRuntime(Runtime::kGetProperty, 2); - __ push(r0); + // Check if the receiver is already a JavaScript object. + // r0: receiver + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); + __ b(ge, &push_receiver); - // Use inline caching to access the arguments. - __ ldr(r0, MemOperand(fp, kIndexOffset)); - __ add(r0, r0, Operand(1 << kSmiTagSize)); - __ str(r0, MemOperand(fp, kIndexOffset)); + // Convert the receiver to a regular object. + // r0: receiver + __ bind(&call_to_object); + __ push(r0); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ b(&push_receiver); - // Test if the copy loop has finished copying all the elements from the - // arguments object. - __ bind(&entry); - __ ldr(r1, MemOperand(fp, kLimitOffset)); - __ cmp(r0, r1); - __ b(ne, &loop); - - // Invoke the function. - Label call_proxy; - ParameterCount actual(r0); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); - __ ldr(r1, MemOperand(fp, kFunctionOffset)); - __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &call_proxy); - __ InvokeFunction(r1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Use the current global receiver object as the receiver. + __ bind(&use_global_receiver); + const int kGlobalOffset = + Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; + __ ldr(r0, FieldMemOperand(cp, kGlobalOffset)); + __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset)); + __ ldr(r0, FieldMemOperand(r0, kGlobalOffset)); + __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); + + // Push the receiver. + // r0: receiver + __ bind(&push_receiver); + __ push(r0); - // Tear down the internal frame and remove function, receiver and args. - __ LeaveInternalFrame(); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Jump(lr); + // Copy all arguments from the array to the stack. + Label entry, loop; + __ ldr(r0, MemOperand(fp, kIndexOffset)); + __ b(&entry); - // Invoke the function proxy. - __ bind(&call_proxy); - __ push(r1); // add function proxy as last argument - __ add(r0, r0, Operand(1)); - __ mov(r2, Operand(0, RelocInfo::NONE)); - __ SetCallKind(r5, CALL_AS_METHOD); - __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); - __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + // Load the current argument from the arguments array and push it to the + // stack. + // r0: current argument index + __ bind(&loop); + __ ldr(r1, MemOperand(fp, kArgsOffset)); + __ push(r1); + __ push(r0); + + // Call the runtime to access the property in the arguments array. + __ CallRuntime(Runtime::kGetProperty, 2); + __ push(r0); + + // Use inline caching to access the arguments. + __ ldr(r0, MemOperand(fp, kIndexOffset)); + __ add(r0, r0, Operand(1 << kSmiTagSize)); + __ str(r0, MemOperand(fp, kIndexOffset)); - __ LeaveInternalFrame(); + // Test if the copy loop has finished copying all the elements from the + // arguments object. + __ bind(&entry); + __ ldr(r1, MemOperand(fp, kLimitOffset)); + __ cmp(r0, r1); + __ b(ne, &loop); + + // Invoke the function. + Label call_proxy; + ParameterCount actual(r0); + __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + __ ldr(r1, MemOperand(fp, kFunctionOffset)); + __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); + __ b(ne, &call_proxy); + __ InvokeFunction(r1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + + frame_scope.GenerateLeaveFrame(); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Jump(lr); + + // Invoke the function proxy. + __ bind(&call_proxy); + __ push(r1); // add function proxy as last argument + __ add(r0, r0, Operand(1)); + __ mov(r2, Operand(0, RelocInfo::NONE)); + __ SetCallKind(r5, CALL_AS_METHOD); + __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); + __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + + // Tear down the internal frame and remove function, receiver and args. + } __ add(sp, sp, Operand(3 * kPointerSize)); __ Jump(lr); } diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index e65f6d9b6..44923a184 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -189,6 +189,72 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { } +void FastNewBlockContextStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [sp]: function. + // [sp + kPointerSize]: serialized scope info + + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + __ AllocateInNewSpace(FixedArray::SizeFor(length), + r0, r1, r2, &gc, TAG_OBJECT); + + // Load the function from the stack. + __ ldr(r3, MemOperand(sp, 0)); + + // Load the serialized scope info from the stack. + __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); + + // Setup the object header. + __ LoadRoot(r2, Heap::kBlockContextMapRootIndex); + __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ mov(r2, Operand(Smi::FromInt(length))); + __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); + + // If this block context is nested in the global context we get a smi + // sentinel instead of a function. The block context should get the + // canonical empty function of the global context as its closure which + // we still have to look up. + Label after_sentinel; + __ JumpIfNotSmi(r3, &after_sentinel); + if (FLAG_debug_code) { + const char* message = "Expected 0 as a Smi sentinel"; + __ cmp(r3, Operand::Zero()); + __ Assert(eq, message); + } + __ ldr(r3, GlobalObjectOperand()); + __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset)); + __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); + __ bind(&after_sentinel); + + // Setup the fixed slots. + __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); + __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); + __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); + + // Copy the global object from the previous context. + __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX)); + __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX)); + + // Initialize the rest of the slots to the hole value. + __ LoadRoot(r1, Heap::kTheHoleValueRootIndex); + for (int i = 0; i < slots_; i++) { + __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS)); + } + + // Remove the on-stack argument and return. + __ mov(cp, r0); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); +} + + void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // Stack layout on entry: // @@ -838,9 +904,11 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } - // Call C routine that may not cause GC or other trouble. - __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), - 0, 2); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); + } // Store answer in the overwritable heap number. Double returned in // registers r0 and r1 or in d0. if (masm->use_eabi_hardfloat()) { @@ -857,6 +925,29 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( } +bool WriteInt32ToHeapNumberStub::IsPregenerated() { + // These variants are compiled ahead of time. See next method. + if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { + return true; + } + if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { + return true; + } + // Other register combinations are generated as and when they are needed, + // so it is unsafe to call them from stubs (we can't generate a stub while + // we are generating a stub). + return false; +} + + +void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { + WriteInt32ToHeapNumberStub stub1(r1, r0, r2); + WriteInt32ToHeapNumberStub stub2(r2, r0, r3); + stub1.GetCode()->set_is_pregenerated(true); + stub2.GetCode()->set_is_pregenerated(true); +} + + // See comment for class. void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { Label max_negative_int; @@ -1197,6 +1288,8 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } + + AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 0, 2); __ pop(pc); // Return. @@ -1214,7 +1307,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, // If either operand is a JS object or an oddball value, then they are // not equal since their pointers are different. // There is no test for undetectability in strict equality. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); Label first_non_object; // Get the type of the first operand into r2 and compare it with // FIRST_SPEC_OBJECT_TYPE. @@ -1606,6 +1699,8 @@ void CompareStub::Generate(MacroAssembler* masm) { // The stub expects its argument in the tos_ register and returns its result in // it, too: zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. // This stub uses VFP3 instructions. CpuFeatures::Scope scope(VFP3); @@ -1713,6 +1808,41 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { } +void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { + // We don't allow a GC during a store buffer overflow so there is no need to + // store the registers in any particular way, but we do have to store and + // restore them. + __ stm(db_w, sp, kCallerSaved | lr.bit()); + if (save_doubles_ == kSaveFPRegs) { + CpuFeatures::Scope scope(VFP3); + __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); + for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + __ vstr(reg, MemOperand(sp, i * kDoubleSize)); + } + } + const int argument_count = 1; + const int fp_argument_count = 0; + const Register scratch = r1; + + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); + __ mov(r0, Operand(ExternalReference::isolate_address())); + __ CallCFunction( + ExternalReference::store_buffer_overflow_function(masm->isolate()), + argument_count); + if (save_doubles_ == kSaveFPRegs) { + CpuFeatures::Scope scope(VFP3); + for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + __ vldr(reg, MemOperand(sp, i * kDoubleSize)); + } + __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); + } + __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). +} + + void UnaryOpStub::PrintName(StringStream* stream) { const char* op_name = Token::Name(op_); const char* overwrite_name = NULL; // Make g++ happy. @@ -1866,12 +1996,13 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - __ push(r0); - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(r1, Operand(r0)); - __ pop(r0); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r0); + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mov(r1, Operand(r0)); + __ pop(r0); + } __ bind(&heapnumber_allocated); __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); @@ -1912,13 +2043,14 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - __ push(r0); // Push the heap number, not the untagged int32. - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(r2, r0); // Move the new heap number into r2. - // Get the heap number into r0, now that the new heap number is in r2. - __ pop(r0); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r0); // Push the heap number, not the untagged int32. + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mov(r2, r0); // Move the new heap number into r2. + // Get the heap number into r0, now that the new heap number is in r2. + __ pop(r0); + } // Convert the heap number in r0 to an untagged integer in r1. // This can't go slow-case because it's the same number we already @@ -2028,6 +2160,10 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( void BinaryOpStub::Generate(MacroAssembler* masm) { + // Explicitly allow generation of nested stubs. It is safe here because + // generation code does not use any raw pointers. + AllowStubCallsScope allow_stub_calls(masm, true); + switch (operands_type_) { case BinaryOpIC::UNINITIALIZED: GenerateTypeTransition(masm); @@ -3133,10 +3269,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); - __ EnterInternalFrame(); - __ push(r0); - __ CallRuntime(RuntimeFunction(), 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r0); + __ CallRuntime(RuntimeFunction(), 1); + } __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); __ Ret(); @@ -3149,14 +3286,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // We return the value in d2 without adding it to the cache, but // we cause a scavenging GC so that future allocations will succeed. - __ EnterInternalFrame(); - - // Allocate an aligned object larger than a HeapNumber. - ASSERT(4 * kPointerSize >= HeapNumber::kSize); - __ mov(scratch0, Operand(4 * kPointerSize)); - __ push(scratch0); - __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Allocate an aligned object larger than a HeapNumber. + ASSERT(4 * kPointerSize >= HeapNumber::kSize); + __ mov(scratch0, Operand(4 * kPointerSize)); + __ push(scratch0); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + } __ Ret(); } } @@ -3173,6 +3311,7 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, } else { __ vmov(r0, r1, d2); } + AllowExternalCallThatCantCauseGC scope(masm); switch (type_) { case TranscendentalCache::SIN: __ CallCFunction(ExternalReference::math_sin_double_function(isolate), @@ -3268,11 +3407,14 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ push(lr); __ PrepareCallCFunction(1, 1, scratch); __ SetCallCDoubleArguments(double_base, exponent); - __ CallCFunction( - ExternalReference::power_double_int_function(masm->isolate()), - 1, 1); - __ pop(lr); - __ GetCFunctionDoubleResult(double_result); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::power_double_int_function(masm->isolate()), + 1, 1); + __ pop(lr); + __ GetCFunctionDoubleResult(double_result); + } __ vstr(double_result, FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); __ mov(r0, heapnumber); @@ -3298,11 +3440,14 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ push(lr); __ PrepareCallCFunction(0, 2, scratch); __ SetCallCDoubleArguments(double_base, double_exponent); - __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), - 0, 2); - __ pop(lr); - __ GetCFunctionDoubleResult(double_result); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::power_double_double_function(masm->isolate()), + 0, 2); + __ pop(lr); + __ GetCFunctionDoubleResult(double_result); + } __ vstr(double_result, FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); __ mov(r0, heapnumber); @@ -3319,6 +3464,37 @@ bool CEntryStub::NeedsImmovableCode() { } +bool CEntryStub::IsPregenerated() { + return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && + result_size_ == 1; +} + + +void CodeStub::GenerateStubsAheadOfTime() { + CEntryStub::GenerateAheadOfTime(); + WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); + RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); +} + + +void CodeStub::GenerateFPStubs() { + CEntryStub save_doubles(1, kSaveFPRegs); + Handle<Code> code = save_doubles.GetCode(); + code->set_is_pregenerated(true); + StoreBufferOverflowStub stub(kSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + code->GetIsolate()->set_fp_stubs_generated(true); +} + + +void CEntryStub::GenerateAheadOfTime() { + CEntryStub stub(1, kDontSaveFPRegs); + Handle<Code> code = stub.GetCode(); + code->set_is_pregenerated(true); +} + + void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { __ Throw(r0); } @@ -3430,8 +3606,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ b(eq, throw_out_of_memory_exception); // Retrieve the pending exception and clear the variable. - __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); - __ ldr(r3, MemOperand(ip)); + __ mov(r3, Operand(isolate->factory()->the_hole_value())); __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ ldr(r0, MemOperand(ip)); @@ -3469,6 +3644,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { __ sub(r6, r6, Operand(kPointerSize)); // Enter the exit frame that transitions from JavaScript to C++. + FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame(save_doubles_); // Setup argc and the builtin function in callee-saved registers. @@ -3613,8 +3789,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // saved values before returning a failure to C. // Clear any pending exceptions. - __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate))); - __ ldr(r5, MemOperand(ip)); + __ mov(r5, Operand(isolate->factory()->the_hole_value())); __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ str(r5, MemOperand(ip)); @@ -3851,10 +4026,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); } else { - __ EnterInternalFrame(); - __ Push(r0, r1); - __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(r0, r1); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); + } __ cmp(r0, Operand::Zero()); __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); @@ -4250,10 +4426,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { #ifdef V8_INTERPRETED_REGEXP __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); #else // V8_INTERPRETED_REGEXP - if (!FLAG_regexp_entry_native) { - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); - return; - } // Stack frame on entry. // sp[0]: last_match_info (expected JSArray) @@ -4480,8 +4652,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // For arguments 4 and 3 get string length, calculate start of string data and // calculate the shift of the index (0 for ASCII and 1 for two byte). - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); - __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); __ eor(r3, r3, Operand(1)); // Load the length from the original subject string from the previous stack // frame. Therefore we have to use fp, which points exactly to two pointer @@ -4532,8 +4703,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate))); - __ ldr(r1, MemOperand(r1, 0)); + __ mov(r1, Operand(isolate->factory()->the_hole_value())); __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ ldr(r0, MemOperand(r2, 0)); @@ -4575,16 +4745,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ str(r2, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastCaptureCountOffset)); // Store last subject and last input. - __ mov(r3, last_match_info_elements); // Moved up to reduce latency. __ str(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastSubjectOffset)); - __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); + __ mov(r2, subject); + __ RecordWriteField(last_match_info_elements, + RegExpImpl::kLastSubjectOffset, + r2, + r7, + kLRHasNotBeenSaved, + kDontSaveFPRegs); __ str(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastInputOffset)); - __ mov(r3, last_match_info_elements); - __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); + __ RecordWriteField(last_match_info_elements, + RegExpImpl::kLastInputOffset, + subject, + r7, + kLRHasNotBeenSaved, + kDontSaveFPRegs); // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = @@ -4712,6 +4891,22 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { } +void CallFunctionStub::FinishCode(Code* code) { + code->set_has_function_cache(false); +} + + +void CallFunctionStub::Clear(Heap* heap, Address address) { + UNREACHABLE(); +} + + +Object* CallFunctionStub::GetCachedValue(Address address) { + UNREACHABLE(); + return NULL; +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { Label slow, non_function; @@ -4889,23 +5084,26 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { __ cmp(result_, Operand(ip)); __ b(ne, &call_runtime_); // Get the first of the two strings and load its instance type. - __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); + __ ldr(result_, FieldMemOperand(object_, ConsString::kFirstOffset)); __ jmp(&assure_seq_string); // SlicedString, unpack and add offset. __ bind(&sliced_string); __ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset)); __ add(scratch_, scratch_, result_); - __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset)); + __ ldr(result_, FieldMemOperand(object_, SlicedString::kParentOffset)); // Assure that we are dealing with a sequential string. Go to runtime if not. __ bind(&assure_seq_string); - __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); + __ ldr(result_, FieldMemOperand(result_, HeapObject::kMapOffset)); __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); // Check that parent is not an external string. Go to runtime otherwise. STATIC_ASSERT(kSeqStringTag == 0); __ tst(result_, Operand(kStringRepresentationMask)); __ b(ne, &call_runtime_); + // Actually fetch the parent string if it is confirmed to be sequential. + STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset); + __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset)); // Check for 1-byte or 2-byte string. __ bind(&flat_string); @@ -6425,12 +6623,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { // Call the runtime system in a fresh internal frame. ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); - __ EnterInternalFrame(); - __ Push(r1, r0); - __ mov(ip, Operand(Smi::FromInt(op_))); - __ push(ip); - __ CallExternalReference(miss, 3); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(r1, r0); + __ mov(ip, Operand(Smi::FromInt(op_))); + __ push(ip); + __ CallExternalReference(miss, 3); + } // Compute the entry point of the rewritten stub. __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Restore registers. @@ -6613,6 +6812,8 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. // Registers: // result: StringDictionary to probe // r1: key @@ -6702,6 +6903,267 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { } +struct AheadOfTimeWriteBarrierStubList { + Register object, value, address; + RememberedSetAction action; +}; + + +struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { + // Used in RegExpExecStub. + { r6, r4, r7, EMIT_REMEMBERED_SET }, + { r6, r2, r7, EMIT_REMEMBERED_SET }, + // Used in CompileArrayPushCall. + // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. + // Also used in KeyedStoreIC::GenerateGeneric. + { r3, r4, r5, EMIT_REMEMBERED_SET }, + // Used in CompileStoreGlobal. + { r4, r1, r2, OMIT_REMEMBERED_SET }, + // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. + { r1, r2, r3, EMIT_REMEMBERED_SET }, + { r3, r2, r1, EMIT_REMEMBERED_SET }, + // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. + { r2, r1, r3, EMIT_REMEMBERED_SET }, + { r3, r1, r2, EMIT_REMEMBERED_SET }, + // KeyedStoreStubCompiler::GenerateStoreFastElement. + { r4, r2, r3, EMIT_REMEMBERED_SET }, + // Null termination. + { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET} +}; + + +bool RecordWriteStub::IsPregenerated() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + if (object_.is(entry->object) && + value_.is(entry->value) && + address_.is(entry->address) && + remembered_set_action_ == entry->action && + save_fp_regs_mode_ == kDontSaveFPRegs) { + return true; + } + } + return false; +} + + +bool StoreBufferOverflowStub::IsPregenerated() { + return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated(); +} + + +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { + StoreBufferOverflowStub stub1(kDontSaveFPRegs); + stub1.GetCode()->set_is_pregenerated(true); +} + + +void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + RecordWriteStub stub(entry->object, + entry->value, + entry->address, + entry->action, + kDontSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + } +} + + +// Takes the input in 3 registers: address_ value_ and object_. A pointer to +// the value has just been written into the object, now this stub makes sure +// we keep the GC informed. The word in the object where the value has been +// written is in the address register. +void RecordWriteStub::Generate(MacroAssembler* masm) { + Label skip_to_incremental_noncompacting; + Label skip_to_incremental_compacting; + + // The first two instructions are generated with labels so as to get the + // offset fixed up correctly by the bind(Label*) call. We patch it back and + // forth between a compare instructions (a nop in this position) and the + // real branch when we start and stop incremental heap marking. + // See RecordWriteStub::Patch for details. + __ b(&skip_to_incremental_noncompacting); + __ b(&skip_to_incremental_compacting); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } + __ Ret(); + + __ bind(&skip_to_incremental_noncompacting); + GenerateIncremental(masm, INCREMENTAL); + + __ bind(&skip_to_incremental_compacting); + GenerateIncremental(masm, INCREMENTAL_COMPACTION); + + // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. + // Will be checked in IncrementalMarking::ActivateGeneratedStub. + ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); + ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); + PatchBranchIntoNop(masm, 0); + PatchBranchIntoNop(masm, Assembler::kInstrSize); +} + + +void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { + regs_.Save(masm); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + Label dont_need_remembered_set; + + __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); + __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. + regs_.scratch0(), + &dont_need_remembered_set); + + __ CheckPageFlag(regs_.object(), + regs_.scratch0(), + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + ne, + &dont_need_remembered_set); + + // First notify the incremental marker if necessary, then update the + // remembered set. + CheckNeedsToInformIncrementalMarker( + masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + + __ bind(&dont_need_remembered_set); + } + + CheckNeedsToInformIncrementalMarker( + masm, kReturnOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ Ret(); +} + + +void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { + regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); + int argument_count = 3; + __ PrepareCallCFunction(argument_count, regs_.scratch0()); + Register address = + r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); + ASSERT(!address.is(regs_.object())); + ASSERT(!address.is(r0)); + __ Move(address, regs_.address()); + __ Move(r0, regs_.object()); + if (mode == INCREMENTAL_COMPACTION) { + __ Move(r1, address); + } else { + ASSERT(mode == INCREMENTAL); + __ ldr(r1, MemOperand(address, 0)); + } + __ mov(r2, Operand(ExternalReference::isolate_address())); + + AllowExternalCallThatCantCauseGC scope(masm); + if (mode == INCREMENTAL_COMPACTION) { + __ CallCFunction( + ExternalReference::incremental_evacuation_record_write_function( + masm->isolate()), + argument_count); + } else { + ASSERT(mode == INCREMENTAL); + __ CallCFunction( + ExternalReference::incremental_marking_record_write_function( + masm->isolate()), + argument_count); + } + regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); +} + + +void RecordWriteStub::CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode) { + Label on_black; + Label need_incremental; + Label need_incremental_pop_scratch; + + // Let's look at the color of the object: If it is not black we don't have + // to inform the incremental marker. + __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ Ret(); + } + + __ bind(&on_black); + + // Get the value from the slot. + __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); + + if (mode == INCREMENTAL_COMPACTION) { + Label ensure_not_white; + + __ CheckPageFlag(regs_.scratch0(), // Contains value. + regs_.scratch1(), // Scratch. + MemoryChunk::kEvacuationCandidateMask, + eq, + &ensure_not_white); + + __ CheckPageFlag(regs_.object(), + regs_.scratch1(), // Scratch. + MemoryChunk::kSkipEvacuationSlotsRecordingMask, + eq, + &need_incremental); + + __ bind(&ensure_not_white); + } + + // We need extra registers for this, so we push the object and the address + // register temporarily. + __ Push(regs_.object(), regs_.address()); + __ EnsureNotWhite(regs_.scratch0(), // The value. + regs_.scratch1(), // Scratch. + regs_.object(), // Scratch. + regs_.address(), // Scratch. + &need_incremental_pop_scratch); + __ Pop(regs_.object(), regs_.address()); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ Ret(); + } + + __ bind(&need_incremental_pop_scratch); + __ Pop(regs_.object(), regs_.address()); + + __ bind(&need_incremental); + + // Fall through when we need to inform the incremental marker. +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 557f7e6d4..3ba75bab1 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -58,6 +58,25 @@ class TranscendentalCacheStub: public CodeStub { }; +class StoreBufferOverflowStub: public CodeStub { + public: + explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) + : save_doubles_(save_fp) { } + + void Generate(MacroAssembler* masm); + + virtual bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + private: + SaveFPRegsMode save_doubles_; + + Major MajorKey() { return StoreBufferOverflow; } + int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } +}; + + class UnaryOpStub: public CodeStub { public: UnaryOpStub(Token::Value op, @@ -323,6 +342,9 @@ class WriteInt32ToHeapNumberStub : public CodeStub { the_heap_number_(the_heap_number), scratch_(scratch) { } + bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); + private: Register the_int_; Register the_heap_number_; @@ -371,6 +393,225 @@ class NumberToStringStub: public CodeStub { }; +class RecordWriteStub: public CodeStub { + public: + RecordWriteStub(Register object, + Register value, + Register address, + RememberedSetAction remembered_set_action, + SaveFPRegsMode fp_mode) + : object_(object), + value_(value), + address_(address), + remembered_set_action_(remembered_set_action), + save_fp_regs_mode_(fp_mode), + regs_(object, // An input reg. + address, // An input reg. + value) { // One scratch reg. + } + + enum Mode { + STORE_BUFFER_ONLY, + INCREMENTAL, + INCREMENTAL_COMPACTION + }; + + virtual bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { + masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20)); + ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos))); + } + + static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { + masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27); + ASSERT(Assembler::IsBranch(masm->instr_at(pos))); + } + + static Mode GetMode(Code* stub) { + Instr first_instruction = Assembler::instr_at(stub->instruction_start()); + Instr second_instruction = Assembler::instr_at(stub->instruction_start() + + Assembler::kInstrSize); + + if (Assembler::IsBranch(first_instruction)) { + return INCREMENTAL; + } + + ASSERT(Assembler::IsTstImmediate(first_instruction)); + + if (Assembler::IsBranch(second_instruction)) { + return INCREMENTAL_COMPACTION; + } + + ASSERT(Assembler::IsTstImmediate(second_instruction)); + + return STORE_BUFFER_ONLY; + } + + static void Patch(Code* stub, Mode mode) { + MacroAssembler masm(NULL, + stub->instruction_start(), + stub->instruction_size()); + switch (mode) { + case STORE_BUFFER_ONLY: + ASSERT(GetMode(stub) == INCREMENTAL || + GetMode(stub) == INCREMENTAL_COMPACTION); + PatchBranchIntoNop(&masm, 0); + PatchBranchIntoNop(&masm, Assembler::kInstrSize); + break; + case INCREMENTAL: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + PatchNopIntoBranch(&masm, 0); + break; + case INCREMENTAL_COMPACTION: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + PatchNopIntoBranch(&masm, Assembler::kInstrSize); + break; + } + ASSERT(GetMode(stub) == mode); + CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize); + } + + private: + // This is a helper class for freeing up 3 scratch registers. The input is + // two registers that must be preserved and one scratch register provided by + // the caller. + class RegisterAllocation { + public: + RegisterAllocation(Register object, + Register address, + Register scratch0) + : object_(object), + address_(address), + scratch0_(scratch0) { + ASSERT(!AreAliased(scratch0, object, address, no_reg)); + scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_); + } + + void Save(MacroAssembler* masm) { + ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); + // We don't have to save scratch0_ because it was given to us as + // a scratch register. + masm->push(scratch1_); + } + + void Restore(MacroAssembler* masm) { + masm->pop(scratch1_); + } + + // If we have to call into C then we need to save and restore all caller- + // saved registers that were not already preserved. The scratch registers + // will be restored by other means so we don't bother pushing them here. + void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { + masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); + if (mode == kSaveFPRegs) { + CpuFeatures::Scope scope(VFP3); + masm->sub(sp, + sp, + Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1))); + // Save all VFP registers except d0. + for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); + } + } + } + + inline void RestoreCallerSaveRegisters(MacroAssembler*masm, + SaveFPRegsMode mode) { + if (mode == kSaveFPRegs) { + CpuFeatures::Scope scope(VFP3); + // Restore all VFP registers except d0. + for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) { + DwVfpRegister reg = DwVfpRegister::from_code(i); + masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); + } + masm->add(sp, + sp, + Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1))); + } + masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); + } + + inline Register object() { return object_; } + inline Register address() { return address_; } + inline Register scratch0() { return scratch0_; } + inline Register scratch1() { return scratch1_; } + + private: + Register object_; + Register address_; + Register scratch0_; + Register scratch1_; + + Register GetRegThatIsNotOneOf(Register r1, + Register r2, + Register r3) { + for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { + Register candidate = Register::FromAllocationIndex(i); + if (candidate.is(r1)) continue; + if (candidate.is(r2)) continue; + if (candidate.is(r3)) continue; + return candidate; + } + UNREACHABLE(); + return no_reg; + } + friend class RecordWriteStub; + }; + + enum OnNoNeedToInformIncrementalMarker { + kReturnOnNoNeedToInformIncrementalMarker, + kUpdateRememberedSetOnNoNeedToInformIncrementalMarker + }; + + void Generate(MacroAssembler* masm); + void GenerateIncremental(MacroAssembler* masm, Mode mode); + void CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode); + void InformIncrementalMarker(MacroAssembler* masm, Mode mode); + + Major MajorKey() { return RecordWrite; } + + int MinorKey() { + return ObjectBits::encode(object_.code()) | + ValueBits::encode(value_.code()) | + AddressBits::encode(address_.code()) | + RememberedSetActionBits::encode(remembered_set_action_) | + SaveFPRegsModeBits::encode(save_fp_regs_mode_); + } + + bool MustBeInStubCache() { + // All stubs must be registered in the stub cache + // otherwise IncrementalMarker would not be able to find + // and patch it. + return true; + } + + void Activate(Code* code) { + code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); + } + + class ObjectBits: public BitField<int, 0, 4> {}; + class ValueBits: public BitField<int, 4, 4> {}; + class AddressBits: public BitField<int, 8, 4> {}; + class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {}; + class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {}; + + Register object_; + Register value_; + Register address_; + RememberedSetAction remembered_set_action_; + SaveFPRegsMode save_fp_regs_mode_; + Label slow_; + RegisterAllocation regs_; +}; + + // Enter C code from generated RegExp code in a way that allows // the C code to fix the return address in case of a GC. // Currently only needed on ARM. @@ -575,6 +816,8 @@ class StringDictionaryLookupStub: public CodeStub { Register r0, Register r1); + virtual bool SometimesSetsUpAFrame() { return false; } + private: static const int kInlinedProbes = 4; static const int kTotalProbes = 20; @@ -587,7 +830,7 @@ class StringDictionaryLookupStub: public CodeStub { StringDictionary::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return StringDictionaryNegativeLookup; } + Major MajorKey() { return StringDictionaryLookup; } int MinorKey() { return LookupModeBits::encode(mode_); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index bf748a9b6..3993ed02b 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -38,12 +38,16 @@ namespace internal { // Platform-specific RuntimeCallHelper functions. void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { - masm->EnterInternalFrame(); + masm->EnterFrame(StackFrame::INTERNAL); + ASSERT(!masm->has_frame()); + masm->set_has_frame(true); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { - masm->LeaveInternalFrame(); + masm->LeaveFrame(StackFrame::INTERNAL); + ASSERT(masm->has_frame()); + masm->set_has_frame(false); } diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index d27982aba..1c0d508d2 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -69,16 +69,6 @@ class CodeGenerator: public AstVisitor { int pos, bool right_here = false); - // Constants related to patching of inlined load/store. - static int GetInlinedKeyedLoadInstructionsAfterPatch() { - return FLAG_debug_code ? 32 : 13; - } - static const int kInlinedKeyedStoreInstructionsAfterPatch = 8; - static int GetInlinedNamedStoreInstructionsAfterPatch() { - ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1); - return Isolate::Current()->inlined_write_barrier_size() + 4; - } - private: DISALLOW_COPY_AND_ASSIGN(CodeGenerator); }; diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index 07a22722c..b866f9cc8 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -132,55 +132,57 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() { static void Generate_DebugBreakCallHelper(MacroAssembler* masm, RegList object_regs, RegList non_object_regs) { - __ EnterInternalFrame(); - - // Store the registers containing live values on the expression stack to - // make sure that these are correctly updated during GC. Non object values - // are stored as a smi causing it to be untouched by GC. - ASSERT((object_regs & ~kJSCallerSaved) == 0); - ASSERT((non_object_regs & ~kJSCallerSaved) == 0); - ASSERT((object_regs & non_object_regs) == 0); - if ((object_regs | non_object_regs) != 0) { - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if ((non_object_regs & (1 << r)) != 0) { - if (FLAG_debug_code) { - __ tst(reg, Operand(0xc0000000)); - __ Assert(eq, "Unable to encode value as smi"); + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Store the registers containing live values on the expression stack to + // make sure that these are correctly updated during GC. Non object values + // are stored as a smi causing it to be untouched by GC. + ASSERT((object_regs & ~kJSCallerSaved) == 0); + ASSERT((non_object_regs & ~kJSCallerSaved) == 0); + ASSERT((object_regs & non_object_regs) == 0); + if ((object_regs | non_object_regs) != 0) { + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((non_object_regs & (1 << r)) != 0) { + if (FLAG_debug_code) { + __ tst(reg, Operand(0xc0000000)); + __ Assert(eq, "Unable to encode value as smi"); + } + __ mov(reg, Operand(reg, LSL, kSmiTagSize)); } - __ mov(reg, Operand(reg, LSL, kSmiTagSize)); } + __ stm(db_w, sp, object_regs | non_object_regs); } - __ stm(db_w, sp, object_regs | non_object_regs); - } #ifdef DEBUG - __ RecordComment("// Calling from debug break to runtime - come in - over"); + __ RecordComment("// Calling from debug break to runtime - come in - over"); #endif - __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments - __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); - - CEntryStub ceb(1); - __ CallStub(&ceb); - - // Restore the register values from the expression stack. - if ((object_regs | non_object_regs) != 0) { - __ ldm(ia_w, sp, object_regs | non_object_regs); - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if ((non_object_regs & (1 << r)) != 0) { - __ mov(reg, Operand(reg, LSR, kSmiTagSize)); - } - if (FLAG_debug_code && - (((object_regs |non_object_regs) & (1 << r)) == 0)) { - __ mov(reg, Operand(kDebugZapValue)); + __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments + __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); + + CEntryStub ceb(1); + __ CallStub(&ceb); + + // Restore the register values from the expression stack. + if ((object_regs | non_object_regs) != 0) { + __ ldm(ia_w, sp, object_regs | non_object_regs); + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((non_object_regs & (1 << r)) != 0) { + __ mov(reg, Operand(reg, LSR, kSmiTagSize)); + } + if (FLAG_debug_code && + (((object_regs |non_object_regs) & (1 << r)) == 0)) { + __ mov(reg, Operand(kDebugZapValue)); + } } } - } - __ LeaveInternalFrame(); + // Leave the internal frame. + } // Now that the break point has been handled, resume normal execution by // jumping to the target address intended by the caller and that was diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 00357f76d..bb03d740d 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -112,12 +112,19 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } #endif + Isolate* isolate = code->GetIsolate(); + // Add the deoptimizing code to the list. DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); - DeoptimizerData* data = code->GetIsolate()->deoptimizer_data(); + DeoptimizerData* data = isolate->deoptimizer_data(); node->set_next(data->deoptimizing_code_list_); data->deoptimizing_code_list_ = node; + // We might be in the middle of incremental marking with compaction. + // Tell collector to treat this code object in a special way and + // ignore all slots that might have been recorded on it. + isolate->heap()->mark_compact_collector()->InvalidateCode(code); + // Set the code for the function to non-optimized version. function->ReplaceCode(function->shared()->code()); @@ -134,7 +141,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } -void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, +void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, + Address pc_after, Code* check_code, Code* replacement_code) { const int kInstrSize = Assembler::kInstrSize; @@ -169,6 +177,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, reinterpret_cast<uint32_t>(check_code->entry())); Memory::uint32_at(stack_check_address_pointer) = reinterpret_cast<uint32_t>(replacement_code->entry()); + + RelocInfo rinfo(pc_after - 2 * kInstrSize, + RelocInfo::CODE_TARGET, + 0, + unoptimized_code); + unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode( + unoptimized_code, &rinfo, replacement_code); } @@ -193,6 +208,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, reinterpret_cast<uint32_t>(replacement_code->entry())); Memory::uint32_at(stack_check_address_pointer) = reinterpret_cast<uint32_t>(check_code->entry()); + + check_code->GetHeap()->incremental_marking()-> + RecordCodeTargetPatch(pc_after - 2 * kInstrSize, check_code); } @@ -632,7 +650,10 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(r5, Operand(ExternalReference::isolate_address())); __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate. // Call Deoptimizer::New(). - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + } // Preserve "deoptimizer" object in register r0 and get the input // frame descriptor pointer to r1 (deoptimizer->input_); @@ -686,8 +707,11 @@ void Deoptimizer::EntryGenerator::Generate() { // r0: deoptimizer object; r1: scratch. __ PrepareCallCFunction(1, r1); // Call Deoptimizer::ComputeOutputFrames(). - __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 1); + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction( + ExternalReference::compute_output_frames_function(isolate), 1); + } __ pop(r0); // Restore deoptimizer object (class Deoptimizer). // Replace the current (input) frame with the output frames. diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h index 26bbd82d0..c66ceee93 100644 --- a/deps/v8/src/arm/frames-arm.h +++ b/deps/v8/src/arm/frames-arm.h @@ -70,6 +70,16 @@ static const RegList kCalleeSaved = 1 << 10 | // r10 v7 1 << 11; // r11 v8 (fp in JavaScript code) +// When calling into C++ (only for C++ calls that can't cause a GC). +// The call code will take care of lr, fp, etc. +static const RegList kCallerSaved = + 1 << 0 | // r0 + 1 << 1 | // r1 + 1 << 2 | // r2 + 1 << 3 | // r3 + 1 << 9; // r9 + + static const int kNumCalleeSaved = 7 + kR9Available; // Double registers d8 to d15 are callee-saved. diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 50ed8b1da..353ce5b10 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -39,6 +39,7 @@ #include "stub-cache.h" #include "arm/code-stubs-arm.h" +#include "arm/macro-assembler-arm.h" namespace v8 { namespace internal { @@ -155,6 +156,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { __ bind(&ok); } + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + FrameScope frame_scope(masm_, StackFrame::MANUAL); + int locals_count = info->scope()->num_stack_slots(); __ Push(lr, fp, cp, r1); @@ -200,13 +206,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // Load parameter from stack. __ ldr(r0, MemOperand(fp, parameter_offset)); // Store it in the context. - __ mov(r1, Operand(Context::SlotOffset(var->index()))); - __ str(r0, MemOperand(cp, r1)); - // Update the write barrier. This clobbers all involved - // registers, so we have to use two more registers to avoid - // clobbering cp. - __ mov(r2, Operand(cp)); - __ RecordWrite(r2, Operand(r1), r3, r0); + MemOperand target = ContextOperand(cp, var->index()); + __ str(r0, target); + + // Update the write barrier. + __ RecordWriteContextSlot( + cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs); } } } @@ -264,7 +269,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { int ignored = 0; - EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored); + EmitDeclaration(scope()->function(), CONST, NULL, &ignored); } VisitDeclarations(scope()->declarations()); } @@ -665,12 +670,15 @@ void FullCodeGenerator::SetVar(Variable* var, ASSERT(!scratch1.is(src)); MemOperand location = VarOperand(var, scratch0); __ str(src, location); + // Emit the write barrier code if the location is in the heap. if (var->IsContextSlot()) { - __ RecordWrite(scratch0, - Operand(Context::SlotOffset(var->index())), - scratch1, - src); + __ RecordWriteContextSlot(scratch0, + location.offset(), + src, + scratch1, + kLRHasBeenSaved, + kDontSaveFPRegs); } } @@ -703,7 +711,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state, void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, - Variable::Mode mode, + VariableMode mode, FunctionLiteral* function, int* global_count) { // If it was not possible to allocate the variable at compile time, we @@ -721,7 +729,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, Comment cmnt(masm_, "[ Declaration"); VisitForAccumulatorValue(function); __ str(result_register(), StackOperand(variable)); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { Comment cmnt(masm_, "[ Declaration"); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ str(ip, StackOperand(variable)); @@ -746,10 +754,16 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, __ str(result_register(), ContextOperand(cp, variable->index())); int offset = Context::SlotOffset(variable->index()); // We know that we have written a function, which is not a smi. - __ mov(r1, Operand(cp)); - __ RecordWrite(r1, Operand(offset), r2, result_register()); + __ RecordWriteContextSlot(cp, + offset, + result_register(), + r2, + kLRHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); PrepareForBailoutForId(proxy->id(), NO_REGISTERS); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { Comment cmnt(masm_, "[ Declaration"); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ str(ip, ContextOperand(cp, variable->index())); @@ -762,10 +776,8 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, Comment cmnt(masm_, "[ Declaration"); __ mov(r2, Operand(variable->name())); // Declaration nodes are always introduced in one of three modes. - ASSERT(mode == Variable::VAR || - mode == Variable::CONST || - mode == Variable::LET); - PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE; + ASSERT(mode == VAR || mode == CONST || mode == LET); + PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE; __ mov(r1, Operand(Smi::FromInt(attr))); // Push initial value, if any. // Note: For variables we must not push an initial value (such as @@ -775,7 +787,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, __ Push(cp, r2, r1); // Push initial value for function declaration. VisitForStackValue(function); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { __ LoadRoot(r0, Heap::kTheHoleValueRootIndex); __ Push(cp, r2, r1, r0); } else { @@ -1205,15 +1217,23 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, // introducing variables. In those cases, we do not want to // perform a runtime call for all variables in the scope // containing the eval. - if (var->mode() == Variable::DYNAMIC_GLOBAL) { + if (var->mode() == DYNAMIC_GLOBAL) { EmitLoadGlobalCheckExtensions(var, typeof_state, slow); __ jmp(done); - } else if (var->mode() == Variable::DYNAMIC_LOCAL) { + } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == Variable::CONST) { + if (local->mode() == CONST || + local->mode() == LET) { __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); + if (local->mode() == CONST) { + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); + } else { // LET + __ b(ne, done); + __ mov(r0, Operand(var->name())); + __ push(r0); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + } } __ jmp(done); } @@ -1246,13 +1266,13 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { Comment cmnt(masm_, var->IsContextSlot() ? "Context variable" : "Stack variable"); - if (var->mode() != Variable::LET && var->mode() != Variable::CONST) { + if (var->mode() != LET && var->mode() != CONST) { context()->Plug(var); } else { // Let and const need a read barrier. GetVar(r0, var); __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); - if (var->mode() == Variable::LET) { + if (var->mode() == LET) { Label done; __ b(ne, &done); __ mov(r0, Operand(var->name())); @@ -1490,14 +1510,23 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { VisitForAccumulatorValue(subexpr); // Store the subexpression value in the array's elements. - __ ldr(r1, MemOperand(sp)); // Copy of array literal. - __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); + __ ldr(r6, MemOperand(sp)); // Copy of array literal. + __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset)); int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ str(result_register(), FieldMemOperand(r1, offset)); + Label no_map_change; + __ JumpIfSmi(result_register(), &no_map_change); // Update the write barrier for the array store with r0 as the scratch // register. - __ RecordWrite(r1, Operand(offset), r2, result_register()); + __ RecordWriteField( + r1, offset, result_register(), r2, kLRHasBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ CheckFastSmiOnlyElements(r3, r2, &no_map_change); + __ push(r6); // Copy of array literal. + __ CallRuntime(Runtime::kNonSmiElementStored, 1); + __ bind(&no_map_change); PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); } @@ -1844,7 +1873,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); } - } else if (var->mode() == Variable::LET && op != Token::INIT_LET) { + } else if (var->mode() == LET && op != Token::INIT_LET) { // Non-initializing assignment to let variable needs a write barrier. if (var->IsLookupSlot()) { __ push(r0); // Value. @@ -1869,11 +1898,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, // RecordWrite may destroy all its register arguments. __ mov(r3, result_register()); int offset = Context::SlotOffset(var->index()); - __ RecordWrite(r1, Operand(offset), r2, r3); + __ RecordWriteContextSlot( + r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs); } } - } else if (var->mode() != Variable::CONST) { + } else if (var->mode() != CONST) { // Assignment to var or initializing assignment to let. if (var->IsStackAllocated() || var->IsContextSlot()) { MemOperand location = VarOperand(var, r1); @@ -1887,7 +1917,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ str(r0, location); if (var->IsContextSlot()) { __ mov(r3, r0); - __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3); + int offset = Context::SlotOffset(var->index()); + __ RecordWriteContextSlot( + r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs); } } else { ASSERT(var->IsLookupSlot()); @@ -2107,10 +2139,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, __ push(r1); // Push the strict mode flag. In harmony mode every eval call // is a strict mode eval call. - StrictModeFlag strict_mode = strict_mode_flag(); - if (FLAG_harmony_block_scoping) { - strict_mode = kStrictMode; - } + StrictModeFlag strict_mode = + FLAG_harmony_scoping ? kStrictMode : strict_mode_flag(); __ mov(r1, Operand(Smi::FromInt(strict_mode))); __ push(r1); @@ -2156,7 +2186,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { // context lookup in the runtime system. Label done; Variable* var = proxy->var(); - if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) { + if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) { Label slow; EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow); // Push the function and resolve eval. @@ -2662,20 +2692,24 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) { // Check that the object is a JS object but take special care of JS // functions to make sure they have 'Function' as their class. + // Assume that there are only two callable types, and one of them is at + // either end of the type range for JS object types. Saves extra comparisons. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE); // Map is now in r0. __ b(lt, &null); - - // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and - // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after - // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == - LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); - __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE)); - __ b(ge, &function); - - // Check if the constructor in the map is a function. + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + __ b(eq, &function); + + __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE)); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + __ b(eq, &function); + // Assume that there is no larger type. + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); + + // Check if the constructor in the map is a JS function. __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset)); __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE); __ b(ne, &non_function_constructor); @@ -2853,7 +2887,9 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) { __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset)); // Update the write barrier. Save the value as it will be // overwritten by the write barrier code and is needed afterward. - __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3); + __ mov(r2, r0); + __ RecordWriteField( + r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs); __ bind(&done); context()->Plug(r0); @@ -3141,16 +3177,31 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) { __ str(scratch1, MemOperand(index2, 0)); __ str(scratch2, MemOperand(index1, 0)); - Label new_space; - __ InNewSpace(elements, scratch1, eq, &new_space); + Label no_remembered_set; + __ CheckPageFlag(elements, + scratch1, + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + ne, + &no_remembered_set); // Possible optimization: do a check that both values are Smis // (or them and test against Smi mask.) - __ mov(scratch1, elements); - __ RecordWriteHelper(elements, index1, scratch2); - __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements. + // We are swapping two objects in an array and the incremental marker never + // pauses in the middle of scanning a single object. Therefore the + // incremental marker is not disturbed, so we don't need to call the + // RecordWrite stub that notifies the incremental marker. + __ RememberedSetHelper(elements, + index1, + scratch2, + kDontSaveFPRegs, + MacroAssembler::kFallThroughAtEnd); + __ RememberedSetHelper(elements, + index2, + scratch2, + kDontSaveFPRegs, + MacroAssembler::kFallThroughAtEnd); - __ bind(&new_space); + __ bind(&no_remembered_set); // We are done. Drop elements from the stack, and return undefined. __ Drop(3); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); @@ -3898,10 +3949,14 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, - Handle<String> check, - Label* if_true, - Label* if_false, - Label* fall_through) { + Handle<String> check) { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + { AccumulatorValueContext context(this); VisitForTypeofValue(expr); } @@ -3942,9 +3997,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } else if (check->Equals(isolate()->heap()->function_symbol())) { __ JumpIfSmi(r0, if_false); - __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE); - Split(ge, if_true, if_false, fall_through); - + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE); + __ b(eq, if_true); + __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE)); + Split(eq, if_true, if_false, fall_through); } else if (check->Equals(isolate()->heap()->object_symbol())) { __ JumpIfSmi(r0, if_false); if (!FLAG_harmony_typeof) { @@ -3963,18 +4020,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } else { if (if_false != fall_through) __ jmp(if_false); } -} - - -void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr, - Label* if_true, - Label* if_false, - Label* fall_through) { - VisitForAccumulatorValue(expr); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - - __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); - Split(eq, if_true, if_false, fall_through); + context()->Plug(if_true, if_false); } @@ -3982,9 +4028,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Comment cmnt(masm_, "[ CompareOperation"); SetSourcePosition(expr->position()); + // First we try a fast inlined version of the compare when one of + // the operands is a literal. + if (TryLiteralCompare(expr)) return; + // Always perform the comparison for its control flow. Pack the result // into the expression's context after the comparison is performed. - Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -3992,13 +4041,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - // First we try a fast inlined version of the compare when one of - // the operands is a literal. - if (TryLiteralCompare(expr, if_true, if_false, fall_through)) { - context()->Plug(if_true, if_false); - return; - } - Token::Value op = expr->op(); VisitForStackValue(expr->left()); switch (op) { @@ -4085,8 +4127,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } -void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { - Comment cmnt(masm_, "[ CompareToNull"); +void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, + Expression* sub_expr, + NilValue nil) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4094,15 +4137,21 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - VisitForAccumulatorValue(expr->expression()); + VisitForAccumulatorValue(sub_expr); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - __ LoadRoot(r1, Heap::kNullValueRootIndex); + Heap::RootListIndex nil_value = nil == kNullValue ? + Heap::kNullValueRootIndex : + Heap::kUndefinedValueRootIndex; + __ LoadRoot(r1, nil_value); __ cmp(r0, r1); - if (expr->is_strict()) { + if (expr->op() == Token::EQ_STRICT) { Split(eq, if_true, if_false, fall_through); } else { + Heap::RootListIndex other_nil_value = nil == kNullValue ? + Heap::kUndefinedValueRootIndex : + Heap::kNullValueRootIndex; __ b(eq, if_true); - __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r1, other_nil_value); __ cmp(r0, r1); __ b(eq, if_true); __ JumpIfSmi(r0, if_false); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 2e49cae92..6e0badca1 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -208,7 +208,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Update the write barrier. Make sure not to clobber the value. __ mov(scratch1, value); - __ RecordWrite(elements, scratch2, scratch1); + __ RecordWrite( + elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); } @@ -504,21 +505,22 @@ static void GenerateCallMiss(MacroAssembler* masm, // Get the receiver of the function from the stack. __ ldr(r3, MemOperand(sp, argc * kPointerSize)); - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Push the receiver and the name of the function. - __ Push(r3, r2); + // Push the receiver and the name of the function. + __ Push(r3, r2); - // Call the entry. - __ mov(r0, Operand(2)); - __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate))); + // Call the entry. + __ mov(r0, Operand(2)); + __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate))); - CEntryStub stub(1); - __ CallStub(&stub); + CEntryStub stub(1); + __ CallStub(&stub); - // Move result to r1 and leave the internal frame. - __ mov(r1, Operand(r0)); - __ LeaveInternalFrame(); + // Move result to r1 and leave the internal frame. + __ mov(r1, Operand(r0)); + } // Check if the receiver is a global object of some sort. // This can happen only for regular CallIC but not KeyedCallIC. @@ -650,12 +652,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // This branch is taken when calling KeyedCallIC_Miss is neither required // nor beneficial. __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3); - __ EnterInternalFrame(); - __ push(r2); // save the key - __ Push(r1, r2); // pass the receiver and the key - __ CallRuntime(Runtime::kKeyedGetProperty, 2); - __ pop(r2); // restore the key - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(r2); // save the key + __ Push(r1, r2); // pass the receiver and the key + __ CallRuntime(Runtime::kKeyedGetProperty, 2); + __ pop(r2); // restore the key + } __ mov(r1, r0); __ jmp(&do_call); @@ -908,7 +911,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, ¬in, &slow); __ str(r0, mapped_location); __ add(r6, r3, r5); - __ RecordWrite(r3, r6, r9); + __ mov(r9, r0); + __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); __ Ret(); __ bind(¬in); // The unmapped lookup expects that the parameter map is in r3. @@ -916,7 +920,8 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow); __ str(r0, unmapped_location); __ add(r6, r3, r4); - __ RecordWrite(r3, r6, r9); + __ mov(r9, r0); + __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); __ Ret(); __ bind(&slow); GenerateMiss(masm, false); @@ -1267,13 +1272,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // -- r2 : receiver // -- lr : return address // ----------------------------------- - Label slow, fast, array, extra; + Label slow, array, extra, check_if_double_array; + Label fast_object_with_map_check, fast_object_without_map_check; + Label fast_double_with_map_check, fast_double_without_map_check; // Register usage. Register value = r0; Register key = r1; Register receiver = r2; Register elements = r3; // Elements array of the receiver. + Register elements_map = r6; + Register receiver_map = r7; // r4 and r5 are used as general scratch registers. // Check that the key is a smi. @@ -1281,35 +1290,26 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // Check that the object isn't a smi. __ JumpIfSmi(receiver, &slow); // Get the map of the object. - __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Check that the receiver does not require access checks. We need // to do this because this generic stub does not perform map checks. - __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset)); + __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); __ b(ne, &slow); // Check if the object is a JS array or not. - __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); __ cmp(r4, Operand(JS_ARRAY_TYPE)); __ b(eq, &array); // Check that the object is some kind of JSObject. - __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE)); + __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); __ b(lt, &slow); - __ cmp(r4, Operand(JS_PROXY_TYPE)); - __ b(eq, &slow); - __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE)); - __ b(eq, &slow); // Object case: Check key against length in the elements array. __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); - // Check that the object is in fast mode and writable. - __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); - __ cmp(r4, ip); - __ b(ne, &slow); // Check array bounds. Both the key and the length of FixedArray are smis. __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ cmp(key, Operand(ip)); - __ b(lo, &fast); + __ b(lo, &fast_object_with_map_check); // Slow case, handle jump to runtime. __ bind(&slow); @@ -1330,21 +1330,31 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ cmp(key, Operand(ip)); __ b(hs, &slow); + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ cmp(elements_map, + Operand(masm->isolate()->factory()->fixed_array_map())); + __ b(ne, &check_if_double_array); // Calculate key + 1 as smi. STATIC_ASSERT(kSmiTag == 0); __ add(r4, key, Operand(Smi::FromInt(1))); __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ b(&fast); + __ b(&fast_object_without_map_check); + + __ bind(&check_if_double_array); + __ cmp(elements_map, + Operand(masm->isolate()->factory()->fixed_double_array_map())); + __ b(ne, &slow); + // Add 1 to key, and go to common element store code for doubles. + STATIC_ASSERT(kSmiTag == 0); + __ add(r4, key, Operand(Smi::FromInt(1))); + __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ jmp(&fast_double_without_map_check); // Array case: Get the length and the elements array from the JS // array. Check that the array is in fast mode (and writable); if it // is the length is always a smi. __ bind(&array); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); - __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); - __ cmp(r4, ip); - __ b(ne, &slow); // Check the key against the length in the array. __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -1352,18 +1362,57 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ b(hs, &extra); // Fall through to fast case. - __ bind(&fast); - // Fast case, store the value to the elements backing store. - __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ str(value, MemOperand(r5)); - // Skip write barrier if the written value is a smi. - __ tst(value, Operand(kSmiTagMask)); - __ Ret(eq); + __ bind(&fast_object_with_map_check); + Register scratch_value = r4; + Register address = r5; + __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ cmp(elements_map, + Operand(masm->isolate()->factory()->fixed_array_map())); + __ b(ne, &fast_double_with_map_check); + __ bind(&fast_object_without_map_check); + // Smi stores don't require further checks. + Label non_smi_value; + __ JumpIfNotSmi(value, &non_smi_value); + // It's irrelevant whether array is smi-only or not when writing a smi. + __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(value, MemOperand(address)); + __ Ret(); + + __ bind(&non_smi_value); + // Escape to slow case when writing non-smi into smi-only array. + __ CheckFastObjectElements(receiver_map, scratch_value, &slow); + // Fast elements array, store the value to the elements backing store. + __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(value, MemOperand(address)); // Update write barrier for the elements array address. - __ sub(r4, r5, Operand(elements)); - __ RecordWrite(elements, Operand(r4), r5, r6); + __ mov(scratch_value, value); // Preserve the value which is returned. + __ RecordWrite(elements, + address, + scratch_value, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ Ret(); + __ bind(&fast_double_with_map_check); + // Check for fast double array case. If this fails, call through to the + // runtime. + __ cmp(elements_map, + Operand(masm->isolate()->factory()->fixed_double_array_map())); + __ b(ne, &slow); + __ bind(&fast_double_without_map_check); + __ StoreNumberToDoubleElements(value, + key, + receiver, + elements, + r4, + r5, + r6, + r7, + &slow); __ Ret(); } diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 30ccd05be..84959397b 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -212,10 +212,11 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) { } -void LIsNullAndBranch::PrintDataTo(StringStream* stream) { +void LIsNilAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if "); InputAt(0)->PrintTo(stream); - stream->Add(is_strict() ? " === null" : " == null"); + stream->Add(kind() == kStrictEquality ? " === " : " == "); + stream->Add(nil() == kNullValue ? "null" : "undefined"); stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); } @@ -711,7 +712,9 @@ LInstruction* LChunkBuilder::DefineFixedDouble( LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { HEnvironment* hydrogen_env = current_block_->last_environment(); - instr->set_environment(CreateEnvironment(hydrogen_env)); + int argument_index_accumulator = 0; + instr->set_environment(CreateEnvironment(hydrogen_env, + &argument_index_accumulator)); return instr; } @@ -994,10 +997,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { } -LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { +LEnvironment* LChunkBuilder::CreateEnvironment( + HEnvironment* hydrogen_env, + int* argument_index_accumulator) { if (hydrogen_env == NULL) return NULL; - LEnvironment* outer = CreateEnvironment(hydrogen_env->outer()); + LEnvironment* outer = + CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator); int ast_id = hydrogen_env->ast_id(); ASSERT(ast_id != AstNode::kNoNumber); int value_count = hydrogen_env->length(); @@ -1007,7 +1013,6 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { argument_count_, value_count, outer); - int argument_index = 0; for (int i = 0; i < value_count; ++i) { if (hydrogen_env->is_special_index(i)) continue; @@ -1016,7 +1021,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { if (value->IsArgumentsObject()) { op = NULL; } else if (value->IsPushArgument()) { - op = new LArgument(argument_index++); + op = new LArgument((*argument_index_accumulator)++); } else { op = UseAny(value); } @@ -1444,9 +1449,9 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch( } -LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) { +LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) { ASSERT(instr->value()->representation().IsTagged()); - return new LIsNullAndBranch(UseRegisterAtStart(instr->value())); + return new LIsNilAndBranch(UseRegisterAtStart(instr->value())); } @@ -1734,7 +1739,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { LLoadGlobalCell* result = new LLoadGlobalCell; - return instr->check_hole_value() + return instr->RequiresHoleCheck() ? AssignEnvironment(DefineAsRegister(result)) : DefineAsRegister(result); } @@ -1748,14 +1753,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { - if (instr->check_hole_value()) { - LOperand* temp = TempRegister(); - LOperand* value = UseRegister(instr->value()); - return AssignEnvironment(new LStoreGlobalCell(value, temp)); - } else { - LOperand* value = UseRegisterAtStart(instr->value()); - return new LStoreGlobalCell(value, NULL); - } + LOperand* temp = TempRegister(); + LOperand* value = UseTempRegister(instr->value()); + LInstruction* result = new LStoreGlobalCell(value, temp); + if (instr->RequiresHoleCheck()) result = AssignEnvironment(result); + return result; } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 8c18760fd..73c7e459c 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -107,7 +107,7 @@ class LCodeGen; V(Integer32ToDouble) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ - V(IsNullAndBranch) \ + V(IsNilAndBranch) \ V(IsObjectAndBranch) \ V(IsSmiAndBranch) \ V(IsUndetectableAndBranch) \ @@ -627,16 +627,17 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> { }; -class LIsNullAndBranch: public LControlInstruction<1, 0> { +class LIsNilAndBranch: public LControlInstruction<1, 0> { public: - explicit LIsNullAndBranch(LOperand* value) { + explicit LIsNilAndBranch(LOperand* value) { inputs_[0] = value; } - DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch) + DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch) - bool is_strict() const { return hydrogen()->is_strict(); } + EqualityKind kind() const { return hydrogen()->kind(); } + NilValue nil() const { return hydrogen()->nil(); } virtual void PrintDataTo(StringStream* stream); }; @@ -2159,7 +2160,8 @@ class LChunkBuilder BASE_EMBEDDED { LInstruction* instr, int ast_id); void ClearInstructionPendingDeoptimizationEnvironment(); - LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env); + LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, + int* argument_index_accumulator); void VisitInstruction(HInstruction* current); diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index f5d744914..70ef88481 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -82,6 +82,14 @@ bool LCodeGen::GenerateCode() { status_ = GENERATING; CpuFeatures::Scope scope1(VFP3); CpuFeatures::Scope scope2(ARMv7); + + CodeStub::GenerateFPStubs(); + + // Open a frame scope to indicate that there is a frame on the stack. The + // NONE indicates that the scope shouldn't actually generate code to set up + // the frame (that is done in GeneratePrologue). + FrameScope frame_scope(masm_, StackFrame::NONE); + return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && @@ -206,13 +214,11 @@ bool LCodeGen::GeneratePrologue() { // Load parameter from stack. __ ldr(r0, MemOperand(fp, parameter_offset)); // Store it in the context. - __ mov(r1, Operand(Context::SlotOffset(var->index()))); - __ str(r0, MemOperand(cp, r1)); - // Update the write barrier. This clobbers all involved - // registers, so we have to use two more registers to avoid - // clobbering cp. - __ mov(r2, Operand(cp)); - __ RecordWrite(r2, Operand(r1), r3, r0); + MemOperand target = ContextOperand(cp, var->index()); + __ str(r0, target); + // Update the write barrier. This clobbers r3 and r0. + __ RecordWriteContextSlot( + cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs); } } Comment(";;; End allocate local context"); @@ -262,6 +268,9 @@ bool LCodeGen::GenerateDeferredCode() { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; __ bind(code->entry()); + Comment(";;; Deferred code @%d: %s.", + code->instruction_index(), + code->instr()->Mnemonic()); code->Generate(); __ jmp(code->exit()); } @@ -739,7 +748,7 @@ void LCodeGen::RecordSafepoint( int deoptimization_index) { ASSERT(expected_safepoint_kind_ == kind); - const ZoneList<LOperand*>* operands = pointers->operands(); + const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); Safepoint safepoint = safepoints_.DefineSafepoint(masm(), kind, arguments, deoptimization_index); for (int i = 0; i < operands->length(); i++) { @@ -1032,6 +1041,7 @@ void LCodeGen::DoDivI(LDivI* instr) { virtual void Generate() { codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV); } + virtual LInstruction* instr() { return instr_; } private: LDivI* instr_; }; @@ -1743,25 +1753,35 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { } -void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { +void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { Register scratch = scratch0(); Register reg = ToRegister(instr->InputAt(0)); + int false_block = chunk_->LookupDestination(instr->false_block_id()); - // TODO(fsc): If the expression is known to be a smi, then it's - // definitely not null. Jump to the false block. + // If the expression is known to be untagged or a smi, then it's definitely + // not null, and it can't be a an undetectable object. + if (instr->hydrogen()->representation().IsSpecialization() || + instr->hydrogen()->type().IsSmi()) { + EmitGoto(false_block); + return; + } int true_block = chunk_->LookupDestination(instr->true_block_id()); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - - __ LoadRoot(ip, Heap::kNullValueRootIndex); + Heap::RootListIndex nil_value = instr->nil() == kNullValue ? + Heap::kNullValueRootIndex : + Heap::kUndefinedValueRootIndex; + __ LoadRoot(ip, nil_value); __ cmp(reg, ip); - if (instr->is_strict()) { + if (instr->kind() == kStrictEquality) { EmitBranch(true_block, false_block, eq); } else { + Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ? + Heap::kUndefinedValueRootIndex : + Heap::kNullValueRootIndex; Label* true_label = chunk_->GetAssemblyLabel(true_block); Label* false_label = chunk_->GetAssemblyLabel(false_block); __ b(eq, true_label); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ LoadRoot(ip, other_nil_value); __ cmp(reg, ip); __ b(eq, true_label); __ JumpIfSmi(reg, false_label); @@ -1918,28 +1938,36 @@ void LCodeGen::EmitClassOfTest(Label* is_true, ASSERT(!input.is(temp)); ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. __ JumpIfSmi(input, is_false); - __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); - __ b(lt, is_false); - // Map is now in temp. - // Functions have class 'Function'. - __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE); if (class_name->IsEqualTo(CStrVector("Function"))) { - __ b(ge, is_true); + // Assuming the following assertions, we can use the same compares to test + // for both being a function type and being in the object type range. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); + __ b(lt, is_false); + __ b(eq, is_true); + __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE)); + __ b(eq, is_true); } else { - __ b(ge, is_false); + // Faster code path to avoid two compares: subtract lower bound from the + // actual type and do a signed compare with the width of the type range. + __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); + __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset)); + __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - + FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ b(gt, is_false); } + // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. // Check if the constructor in the map is a function. __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset)); - // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and - // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after - // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == - LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); - // Objects with a non-function constructor have class 'Object'. __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE); if (class_name->IsEqualTo(CStrVector("Object"))) { @@ -2016,9 +2044,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { virtual void Generate() { codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); } - + virtual LInstruction* instr() { return instr_; } Label* map_check() { return &map_check_; } - private: LInstanceOfKnownGlobal* instr_; Label map_check_; @@ -2180,7 +2207,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { Register result = ToRegister(instr->result()); __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); - if (instr->hydrogen()->check_hole_value()) { + if (instr->hydrogen()->RequiresHoleCheck()) { __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ cmp(result, ip); DeoptimizeIf(eq, instr->environment()); @@ -2203,6 +2230,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { Register value = ToRegister(instr->InputAt(0)); Register scratch = scratch0(); + Register scratch2 = ToRegister(instr->TempAt(0)); // Load the cell. __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell()))); @@ -2211,8 +2239,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { // been deleted from the property dictionary. In that case, we need // to update the property details in the property dictionary to mark // it as no longer deleted. - if (instr->hydrogen()->check_hole_value()) { - Register scratch2 = ToRegister(instr->TempAt(0)); + if (instr->hydrogen()->RequiresHoleCheck()) { __ ldr(scratch2, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); @@ -2222,6 +2249,15 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { // Store the value. __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); + + // Cells are always in the remembered set. + __ RecordWriteField(scratch, + JSGlobalPropertyCell::kValueOffset, + value, + scratch2, + kLRHasBeenSaved, + kSaveFPRegs, + OMIT_REMEMBERED_SET); } @@ -2247,10 +2283,15 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { Register context = ToRegister(instr->context()); Register value = ToRegister(instr->value()); - __ str(value, ContextOperand(context, instr->slot_index())); + MemOperand target = ContextOperand(context, instr->slot_index()); + __ str(value, target); if (instr->needs_write_barrier()) { - int offset = Context::SlotOffset(instr->slot_index()); - __ RecordWrite(context, Operand(offset), value, scratch0()); + __ RecordWriteContextSlot(context, + target.offset(), + value, + scratch0(), + kLRHasBeenSaved, + kSaveFPRegs); } } @@ -2500,13 +2541,9 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); } - if (instr->hydrogen()->RequiresHoleCheck()) { - // TODO(danno): If no hole check is required, there is no need to allocate - // elements into a temporary register, instead scratch can be used. - __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); - __ cmp(scratch, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr->environment()); - } + __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + __ cmp(scratch, Operand(kHoleNanUpper32)); + DeoptimizeIf(eq, instr->environment()); __ vldr(result, elements, 0); } @@ -2577,6 +2614,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -2906,6 +2944,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { virtual void Generate() { codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); } + virtual LInstruction* instr() { return instr_; } private: LUnaryMathOperation* instr_; }; @@ -3202,7 +3241,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { ASSERT(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); - CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT); + CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ Drop(1); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -3262,7 +3301,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { __ str(value, FieldMemOperand(object, offset)); if (instr->needs_write_barrier()) { // Update the write barrier for the object for in-object properties. - __ RecordWrite(object, Operand(offset), value, scratch); + __ RecordWriteField( + object, offset, value, scratch, kLRHasBeenSaved, kSaveFPRegs); } } else { __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); @@ -3270,7 +3310,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (instr->needs_write_barrier()) { // Update the write barrier for the properties array. // object is used as a scratch register. - __ RecordWrite(scratch, Operand(offset), value, object); + __ RecordWriteField( + scratch, offset, value, object, kLRHasBeenSaved, kSaveFPRegs); } } } @@ -3301,6 +3342,13 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; Register scratch = scratch0(); + // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS + // conversion, so it deopts in that case. + if (instr->hydrogen()->ValueNeedsSmiCheck()) { + __ tst(value, Operand(kSmiTagMask)); + DeoptimizeIf(ne, instr->environment()); + } + // Do the store. if (instr->key()->IsConstantOperand()) { ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); @@ -3315,8 +3363,8 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { if (instr->hydrogen()->NeedsWriteBarrier()) { // Compute address of modified element and store it into key register. - __ add(key, scratch, Operand(FixedArray::kHeaderSize)); - __ RecordWrite(elements, key, value); + __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ RecordWrite(elements, key, value, kLRHasBeenSaved, kSaveFPRegs); } } @@ -3417,6 +3465,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3452,6 +3501,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStringCharCodeAt* instr_; }; @@ -3575,6 +3625,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStringCharFromCode* instr_; }; @@ -3646,6 +3697,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } + virtual LInstruction* instr() { return instr_; } private: LNumberTagI* instr_; }; @@ -3711,6 +3763,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } + virtual LInstruction* instr() { return instr_; } private: LNumberTagD* instr_; }; @@ -3819,16 +3872,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, } -class DeferredTaggedToI: public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } - private: - LTaggedToI* instr_; -}; - - void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { Register input_reg = ToRegister(instr->InputAt(0)); Register scratch1 = scratch0(); @@ -3911,6 +3954,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { void LCodeGen::DoTaggedToI(LTaggedToI* instr) { + class DeferredTaggedToI: public LDeferredCode { + public: + DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } + virtual LInstruction* instr() { return instr_; } + private: + LTaggedToI* instr_; + }; + LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); ASSERT(input->Equals(instr->result())); @@ -4343,10 +4396,12 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, final_branch_condition = ne; } else if (type_name->Equals(heap()->function_symbol())) { + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label); - __ CompareObjectType(input, input, scratch, - FIRST_CALLABLE_SPEC_OBJECT_TYPE); - final_branch_condition = ge; + __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE); + __ b(eq, true_label); + __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE)); + final_branch_condition = eq; } else if (type_name->Equals(heap()->object_symbol())) { __ JumpIfSmi(input, false_label); @@ -4468,6 +4523,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStackCheck* instr_; }; diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index ead848903..711e4595e 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -376,16 +376,20 @@ class LCodeGen BASE_EMBEDDED { class LDeferredCode: public ZoneObject { public: explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), external_exit_(NULL) { + : codegen_(codegen), + external_exit_(NULL), + instruction_index_(codegen->current_instruction_) { codegen->AddDeferredCode(this); } virtual ~LDeferredCode() { } virtual void Generate() = 0; + virtual LInstruction* instr() = 0; void SetExit(Label *exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } + int instruction_index() const { return instruction_index_; } protected: LCodeGen* codegen() const { return codegen_; } @@ -396,6 +400,7 @@ class LDeferredCode: public ZoneObject { Label entry_; Label exit_; Label* external_exit_; + int instruction_index_; }; } } // namespace v8::internal diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index f37f31021..918f9ebe0 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -42,7 +42,8 @@ namespace internal { MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) : Assembler(arg_isolate, buffer, size), generating_stub_(false), - allow_stub_calls_(true) { + allow_stub_calls_(true), + has_frame_(false) { if (isolate() != NULL) { code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), isolate()); @@ -406,32 +407,6 @@ void MacroAssembler::StoreRoot(Register source, } -void MacroAssembler::RecordWriteHelper(Register object, - Register address, - Register scratch) { - if (emit_debug_code()) { - // Check that the object is not in new space. - Label not_in_new_space; - InNewSpace(object, scratch, ne, ¬_in_new_space); - Abort("new-space object passed to RecordWriteHelper"); - bind(¬_in_new_space); - } - - // Calculate page address. - Bfc(object, 0, kPageSizeBits); - - // Calculate region number. - Ubfx(address, address, Page::kRegionSizeLog2, - kPageSizeBits - Page::kRegionSizeLog2); - - // Mark region dirty. - ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset)); - mov(ip, Operand(1)); - orr(scratch, scratch, Operand(ip, LSL, address)); - str(scratch, MemOperand(object, Page::kDirtyFlagOffset)); -} - - void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cond, @@ -443,38 +418,52 @@ void MacroAssembler::InNewSpace(Register object, } -// Will clobber 4 registers: object, offset, scratch, ip. The -// register 'object' contains a heap object pointer. The heap object -// tag is shifted away. -void MacroAssembler::RecordWrite(Register object, - Operand offset, - Register scratch0, - Register scratch1) { - // The compiled code assumes that record write doesn't change the - // context register, so we check that none of the clobbered - // registers are cp. - ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp)); - +void MacroAssembler::RecordWriteField( + Register object, + int offset, + Register value, + Register dst, + LinkRegisterStatus lr_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { + // First, check if a write barrier is even needed. The tests below + // catch stores of Smis. Label done; - // First, test that the object is not in the new space. We cannot set - // region marks for new space pages. - InNewSpace(object, scratch0, eq, &done); + // Skip barrier if writing a smi. + if (smi_check == INLINE_SMI_CHECK) { + JumpIfSmi(value, &done); + } - // Add offset into the object. - add(scratch0, object, offset); + // Although the object register is tagged, the offset is relative to the start + // of the object, so so offset must be a multiple of kPointerSize. + ASSERT(IsAligned(offset, kPointerSize)); - // Record the actual write. - RecordWriteHelper(object, scratch0, scratch1); + add(dst, object, Operand(offset - kHeapObjectTag)); + if (emit_debug_code()) { + Label ok; + tst(dst, Operand((1 << kPointerSizeLog2) - 1)); + b(eq, &ok); + stop("Unaligned cell in write barrier"); + bind(&ok); + } + + RecordWrite(object, + dst, + value, + lr_status, + save_fp, + remembered_set_action, + OMIT_SMI_CHECK); bind(&done); - // Clobber all input registers when running with the debug-code flag + // Clobber clobbered input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - mov(object, Operand(BitCast<int32_t>(kZapValue))); - mov(scratch0, Operand(BitCast<int32_t>(kZapValue))); - mov(scratch1, Operand(BitCast<int32_t>(kZapValue))); + mov(value, Operand(BitCast<int32_t>(kZapValue + 4))); + mov(dst, Operand(BitCast<int32_t>(kZapValue + 8))); } } @@ -484,29 +473,94 @@ void MacroAssembler::RecordWrite(Register object, // tag is shifted away. void MacroAssembler::RecordWrite(Register object, Register address, - Register scratch) { + Register value, + LinkRegisterStatus lr_status, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { // The compiled code assumes that record write doesn't change the // context register, so we check that none of the clobbered // registers are cp. - ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp)); + ASSERT(!address.is(cp) && !value.is(cp)); Label done; - // First, test that the object is not in the new space. We cannot set - // region marks for new space pages. - InNewSpace(object, scratch, eq, &done); + if (smi_check == INLINE_SMI_CHECK) { + ASSERT_EQ(0, kSmiTag); + tst(value, Operand(kSmiTagMask)); + b(eq, &done); + } + + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + eq, + &done); + CheckPageFlag(object, + value, // Used as scratch. + MemoryChunk::kPointersFromHereAreInterestingMask, + eq, + &done); // Record the actual write. - RecordWriteHelper(object, address, scratch); + if (lr_status == kLRHasNotBeenSaved) { + push(lr); + } + RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); + CallStub(&stub); + if (lr_status == kLRHasNotBeenSaved) { + pop(lr); + } bind(&done); - // Clobber all input registers when running with the debug-code flag + // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - mov(object, Operand(BitCast<int32_t>(kZapValue))); - mov(address, Operand(BitCast<int32_t>(kZapValue))); - mov(scratch, Operand(BitCast<int32_t>(kZapValue))); + mov(address, Operand(BitCast<int32_t>(kZapValue + 12))); + mov(value, Operand(BitCast<int32_t>(kZapValue + 16))); + } +} + + +void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. + Register address, + Register scratch, + SaveFPRegsMode fp_mode, + RememberedSetFinalAction and_then) { + Label done; + if (FLAG_debug_code) { + Label ok; + JumpIfNotInNewSpace(object, scratch, &ok); + stop("Remembered set pointer is in new space"); + bind(&ok); + } + // Load store buffer top. + ExternalReference store_buffer = + ExternalReference::store_buffer_top(isolate()); + mov(ip, Operand(store_buffer)); + ldr(scratch, MemOperand(ip)); + // Store pointer to buffer and increment buffer top. + str(address, MemOperand(scratch, kPointerSize, PostIndex)); + // Write back new top of buffer. + str(scratch, MemOperand(ip)); + // Call stub on end of buffer. + // Check for end of buffer. + tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); + if (and_then == kFallThroughAtEnd) { + b(eq, &done); + } else { + ASSERT(and_then == kReturnAtEnd); + Ret(eq); + } + push(lr); + StoreBufferOverflowStub store_buffer_overflow = + StoreBufferOverflowStub(fp_mode); + CallStub(&store_buffer_overflow); + pop(lr); + bind(&done); + if (and_then == kReturnAtEnd) { + Ret(); } } @@ -961,6 +1015,9 @@ void MacroAssembler::InvokeCode(Register code, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + Label done; InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, @@ -988,6 +1045,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code, RelocInfo::Mode rmode, InvokeFlag flag, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + Label done; InvokePrologue(expected, actual, code, no_reg, &done, flag, @@ -1011,6 +1071,9 @@ void MacroAssembler::InvokeFunction(Register fun, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + // Contract with called JS functions requires that function is passed in r1. ASSERT(fun.is(r1)); @@ -1035,6 +1098,9 @@ void MacroAssembler::InvokeFunction(JSFunction* function, const ParameterCount& actual, InvokeFlag flag, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + ASSERT(function->is_compiled()); // Get the function and setup the context. @@ -1090,10 +1156,10 @@ void MacroAssembler::IsObjectJSStringType(Register object, #ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { - ASSERT(allow_stub_calls()); mov(r0, Operand(0, RelocInfo::NONE)); mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); CEntryStub ces(1); + ASSERT(AllowThisStubCall(&ces)); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } #endif @@ -1793,13 +1859,127 @@ void MacroAssembler::CompareRoot(Register obj, void MacroAssembler::CheckFastElements(Register map, Register scratch, Label* fail) { - STATIC_ASSERT(FAST_ELEMENTS == 0); + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); b(hi, fail); } +void MacroAssembler::CheckFastObjectElements(Register map, + Register scratch, + Label* fail) { + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); + ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); + cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); + b(ls, fail); + cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); + b(hi, fail); +} + + +void MacroAssembler::CheckFastSmiOnlyElements(Register map, + Register scratch, + Label* fail) { + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); + cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); + b(hi, fail); +} + + +void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, + Register key_reg, + Register receiver_reg, + Register elements_reg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* fail) { + Label smi_value, maybe_nan, have_double_value, is_nan, done; + Register mantissa_reg = scratch2; + Register exponent_reg = scratch3; + + // Handle smi values specially. + JumpIfSmi(value_reg, &smi_value); + + // Ensure that the object is a heap number + CheckMap(value_reg, + scratch1, + isolate()->factory()->heap_number_map(), + fail, + DONT_DO_SMI_CHECK); + + // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 + // in the exponent. + mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); + ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); + cmp(exponent_reg, scratch1); + b(ge, &maybe_nan); + + ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + + bind(&have_double_value); + add(scratch1, elements_reg, + Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); + str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); + uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); + str(exponent_reg, FieldMemOperand(scratch1, offset)); + jmp(&done); + + bind(&maybe_nan); + // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise + // it's an Infinity, and the non-NaN code path applies. + b(gt, &is_nan); + ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + cmp(mantissa_reg, Operand(0)); + b(eq, &have_double_value); + bind(&is_nan); + // Load canonical NaN for storing into the double array. + uint64_t nan_int64 = BitCast<uint64_t>( + FixedDoubleArray::canonical_not_the_hole_nan_as_double()); + mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); + mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); + jmp(&have_double_value); + + bind(&smi_value); + add(scratch1, elements_reg, + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + add(scratch1, scratch1, + Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); + // scratch1 is now effective address of the double element + + FloatingPointHelper::Destination destination; + if (CpuFeatures::IsSupported(VFP3)) { + destination = FloatingPointHelper::kVFPRegisters; + } else { + destination = FloatingPointHelper::kCoreRegisters; + } + + Register untagged_value = receiver_reg; + SmiUntag(untagged_value, value_reg); + FloatingPointHelper::ConvertIntToDouble(this, + untagged_value, + destination, + d0, + mantissa_reg, + exponent_reg, + scratch4, + s2); + if (destination == FloatingPointHelper::kVFPRegisters) { + CpuFeatures::Scope scope(VFP3); + vstr(d0, scratch1, 0); + } else { + str(mantissa_reg, MemOperand(scratch1, 0)); + str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); + } + bind(&done); +} + + void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map, @@ -1895,13 +2075,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. + ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond); } MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. + ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -1913,13 +2093,12 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) { void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. + ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); } MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -2022,6 +2201,12 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( } +bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { + if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; + return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); +} + + void MacroAssembler::IllegalOperation(int num_arguments) { if (num_arguments > 0) { add(sp, sp, Operand(num_arguments * kPointerSize)); @@ -2417,8 +2602,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); mov(r0, Operand(function->nargs)); mov(r1, Operand(ExternalReference(function, isolate()))); - CEntryStub stub(1); - stub.SaveDoubles(); + CEntryStub stub(1, kSaveFPRegs); CallStub(&stub); } @@ -2491,6 +2675,9 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference( void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper& call_wrapper) { + // You can't call a builtin without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + GetBuiltinEntry(r2, id); if (flag == CALL_FUNCTION) { call_wrapper.BeforeCall(CallSize(r2)); @@ -2622,14 +2809,20 @@ void MacroAssembler::Abort(const char* msg) { RecordComment(msg); } #endif - // Disable stub call restrictions to always allow calls to abort. - AllowStubCallsScope allow_scope(this, true); mov(r0, Operand(p0)); push(r0); mov(r0, Operand(Smi::FromInt(p1 - p0))); push(r0); - CallRuntime(Runtime::kAbort, 2); + // Disable stub call restrictions to always allow calls to abort. + if (!has_frame_) { + // We don't actually want to generate a pile of code for this, so just + // claim there is a stack frame, without generating one. + FrameScope scope(this, StackFrame::NONE); + CallRuntime(Runtime::kAbort, 2); + } else { + CallRuntime(Runtime::kAbort, 2); + } // will not return here if (is_const_pool_blocked()) { // If the calling code cares about the exact number of @@ -2930,6 +3123,19 @@ void MacroAssembler::CopyBytes(Register src, } +void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler) { + Label loop, entry; + b(&entry); + bind(&loop); + str(filler, MemOperand(start_offset, kPointerSize, PostIndex)); + bind(&entry); + cmp(start_offset, end_offset); + b(lt, &loop); +} + + void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. Register source, // Input. Register scratch) { @@ -3089,23 +3295,15 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { - CallCFunctionHelper(no_reg, - function, - ip, - num_reg_arguments, - num_double_arguments); + mov(ip, Operand(function)); + CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); } void MacroAssembler::CallCFunction(Register function, - Register scratch, - int num_reg_arguments, - int num_double_arguments) { - CallCFunctionHelper(function, - ExternalReference::the_hole_value_location(isolate()), - scratch, - num_reg_arguments, - num_double_arguments); + int num_reg_arguments, + int num_double_arguments) { + CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } @@ -3116,17 +3314,15 @@ void MacroAssembler::CallCFunction(ExternalReference function, void MacroAssembler::CallCFunction(Register function, - Register scratch, int num_arguments) { - CallCFunction(function, scratch, num_arguments, 0); + CallCFunction(function, num_arguments, 0); } void MacroAssembler::CallCFunctionHelper(Register function, - ExternalReference function_reference, - Register scratch, int num_reg_arguments, int num_double_arguments) { + ASSERT(has_frame()); // Make sure that the stack is aligned before calling a C function unless // running in the simulator. The simulator has its own alignment check which // provides more information. @@ -3150,10 +3346,6 @@ void MacroAssembler::CallCFunctionHelper(Register function, // Just call directly. The function called cannot cause a GC, or // allow preemption, so the return address in the link register // stays correct. - if (function.is(no_reg)) { - mov(scratch, Operand(function_reference)); - function = scratch; - } Call(function); int stack_passed_arguments = CalculateStackPassedWords( num_reg_arguments, num_double_arguments); @@ -3185,6 +3377,185 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, } +void MacroAssembler::CheckPageFlag( + Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met) { + and_(scratch, object, Operand(~Page::kPageAlignmentMask)); + ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); + tst(scratch, Operand(mask)); + b(cc, condition_met); +} + + +void MacroAssembler::JumpIfBlack(Register object, + Register scratch0, + Register scratch1, + Label* on_black) { + HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); +} + + +void MacroAssembler::HasColor(Register object, + Register bitmap_scratch, + Register mask_scratch, + Label* has_color, + int first_bit, + int second_bit) { + ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); + + GetMarkBits(object, bitmap_scratch, mask_scratch); + + Label other_color, word_boundary; + ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + tst(ip, Operand(mask_scratch)); + b(first_bit == 1 ? eq : ne, &other_color); + // Shift left 1 by adding. + add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC); + b(eq, &word_boundary); + tst(ip, Operand(mask_scratch)); + b(second_bit == 1 ? ne : eq, has_color); + jmp(&other_color); + + bind(&word_boundary); + ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); + tst(ip, Operand(1)); + b(second_bit == 1 ? ne : eq, has_color); + bind(&other_color); +} + + +// Detect some, but not all, common pointer-free objects. This is used by the +// incremental write barrier which doesn't care about oddballs (they are always +// marked black immediately so this code is not hit). +void MacroAssembler::JumpIfDataObject(Register value, + Register scratch, + Label* not_data_object) { + Label is_data_object; + ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); + CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); + b(eq, &is_data_object); + ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + // If it's a string and it's not a cons string then it's an object containing + // no GC pointers. + ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); + b(ne, not_data_object); + bind(&is_data_object); +} + + +void MacroAssembler::GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg) { + ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); + and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); + Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); + const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; + Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits); + add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2)); + mov(ip, Operand(1)); + mov(mask_reg, Operand(ip, LSL, mask_reg)); +} + + +void MacroAssembler::EnsureNotWhite( + Register value, + Register bitmap_scratch, + Register mask_scratch, + Register load_scratch, + Label* value_is_white_and_not_data) { + ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); + GetMarkBits(value, bitmap_scratch, mask_scratch); + + // If the value is black or grey we don't need to do anything. + ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); + ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + + Label done; + + // Since both black and grey have a 1 in the first position and white does + // not have a 1 there we only need to check one bit. + ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + tst(mask_scratch, load_scratch); + b(ne, &done); + + if (FLAG_debug_code) { + // Check for impossible bit pattern. + Label ok; + // LSL may overflow, making the check conservative. + tst(load_scratch, Operand(mask_scratch, LSL, 1)); + b(eq, &ok); + stop("Impossible marking bit pattern"); + bind(&ok); + } + + // Value is white. We check whether it is data that doesn't need scanning. + // Currently only checks for HeapNumber and non-cons strings. + Register map = load_scratch; // Holds map while checking type. + Register length = load_scratch; // Holds length of object after testing type. + Label is_data_object; + + // Check for heap-number + ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); + CompareRoot(map, Heap::kHeapNumberMapRootIndex); + mov(length, Operand(HeapNumber::kSize), LeaveCC, eq); + b(eq, &is_data_object); + + // Check for strings. + ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + // If it's a string and it's not a cons string then it's an object containing + // no GC pointers. + Register instance_type = load_scratch; + ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); + tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); + b(ne, value_is_white_and_not_data); + // It's a non-indirect (non-cons and non-slice) string. + // If it's external, the length is just ExternalString::kSize. + // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). + // External strings are the only ones with the kExternalStringTag bit + // set. + ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); + ASSERT_EQ(0, kConsStringTag & kExternalStringTag); + tst(instance_type, Operand(kExternalStringTag)); + mov(length, Operand(ExternalString::kSize), LeaveCC, ne); + b(ne, &is_data_object); + + // Sequential string, either ASCII or UC16. + // For ASCII (char-size of 1) we shift the smi tag away to get the length. + // For UC16 (char-size of 2) we just leave the smi tag in place, thereby + // getting the length multiplied by 2. + ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); + ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + ldr(ip, FieldMemOperand(value, String::kLengthOffset)); + tst(instance_type, Operand(kStringEncodingMask)); + mov(ip, Operand(ip, LSR, 1), LeaveCC, ne); + add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); + and_(length, length, Operand(~kObjectAlignmentMask)); + + bind(&is_data_object); + // Value is a data object, and it is white. Mark it black. Since we know + // that the object is white we can make it black by flipping one bit. + ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + orr(ip, ip, Operand(mask_scratch)); + str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + + and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); + ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); + add(ip, ip, Operand(length)); + str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); + + bind(&done); +} + + void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { Usat(output_reg, 8, Operand(input_reg)); } @@ -3234,6 +3605,17 @@ void MacroAssembler::LoadInstanceDescriptors(Register map, } +bool AreAliased(Register r1, Register r2, Register r3, Register r4) { + if (r1.is(r2)) return true; + if (r1.is(r3)) return true; + if (r1.is(r4)) return true; + if (r2.is(r3)) return true; + if (r2.is(r4)) return true; + if (r3.is(r4)) return true; + return false; +} + + CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), instructions_(instructions), diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 6084fde2d..8ee468a91 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -29,6 +29,7 @@ #define V8_ARM_MACRO_ASSEMBLER_ARM_H_ #include "assembler.h" +#include "frames.h" #include "v8globals.h" namespace v8 { @@ -79,6 +80,14 @@ enum ObjectToDoubleFlags { }; +enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; +enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; +enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; + + +bool AreAliased(Register r1, Register r2, Register r3, Register r4); + + // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: @@ -157,40 +166,126 @@ class MacroAssembler: public Assembler { Heap::RootListIndex index, Condition cond = al); + // --------------------------------------------------------------------------- + // GC Support + + void IncrementalMarkingRecordWriteHelper(Register object, + Register value, + Register address); + + enum RememberedSetFinalAction { + kReturnAtEnd, + kFallThroughAtEnd + }; + + // Record in the remembered set the fact that we have a pointer to new space + // at the address pointed to by the addr register. Only works if addr is not + // in new space. + void RememberedSetHelper(Register object, // Used for debug code. + Register addr, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetFinalAction and_then); + + void CheckPageFlag(Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met); + + // Check if object is in new space. Jumps if the object is not in new space. + // The register scratch can be object itself, but scratch will be clobbered. + void JumpIfNotInNewSpace(Register object, + Register scratch, + Label* branch) { + InNewSpace(object, scratch, ne, branch); + } - // Check if object is in new space. - // scratch can be object itself, but it will be clobbered. - void InNewSpace(Register object, - Register scratch, - Condition cond, // eq for new space, ne otherwise - Label* branch); - + // Check if object is in new space. Jumps if the object is in new space. + // The register scratch can be object itself, but it will be clobbered. + void JumpIfInNewSpace(Register object, + Register scratch, + Label* branch) { + InNewSpace(object, scratch, eq, branch); + } - // For the page containing |object| mark the region covering [address] - // dirty. The object address must be in the first 8K of an allocated page. - void RecordWriteHelper(Register object, - Register address, - Register scratch); + // Check if an object has a given incremental marking color. + void HasColor(Register object, + Register scratch0, + Register scratch1, + Label* has_color, + int first_bit, + int second_bit); - // For the page containing |object| mark the region covering - // [object+offset] dirty. The object address must be in the first 8K - // of an allocated page. The 'scratch' registers are used in the - // implementation and all 3 registers are clobbered by the - // operation, as well as the ip register. RecordWrite updates the - // write barrier even when storing smis. - void RecordWrite(Register object, - Operand offset, + void JumpIfBlack(Register object, Register scratch0, - Register scratch1); + Register scratch1, + Label* on_black); + + // Checks the color of an object. If the object is already grey or black + // then we just fall through, since it is already live. If it is white and + // we can determine that it doesn't need to be scanned, then we just mark it + // black and fall through. For the rest we jump to the label so the + // incremental marker can fix its assumptions. + void EnsureNotWhite(Register object, + Register scratch1, + Register scratch2, + Register scratch3, + Label* object_is_white_and_not_data); - // For the page containing |object| mark the region covering - // [address] dirty. The object address must be in the first 8K of an - // allocated page. All 3 registers are clobbered by the operation, - // as well as the ip register. RecordWrite updates the write barrier - // even when storing smis. - void RecordWrite(Register object, - Register address, - Register scratch); + // Detects conservatively whether an object is data-only, ie it does need to + // be scanned by the garbage collector. + void JumpIfDataObject(Register value, + Register scratch, + Label* not_data_object); + + // Notify the garbage collector that we wrote a pointer into an object. + // |object| is the object being stored into, |value| is the object being + // stored. value and scratch registers are clobbered by the operation. + // The offset is the offset from the start of the object, not the offset from + // the tagged HeapObject pointer. For use with FieldOperand(reg, off). + void RecordWriteField( + Register object, + int offset, + Register value, + Register scratch, + LinkRegisterStatus lr_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); + + // As above, but the offset has the tag presubtracted. For use with + // MemOperand(reg, off). + inline void RecordWriteContextSlot( + Register context, + int offset, + Register value, + Register scratch, + LinkRegisterStatus lr_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK) { + RecordWriteField(context, + offset + kHeapObjectTag, + value, + scratch, + lr_status, + save_fp, + remembered_set_action, + smi_check); + } + + // For a given |object| notify the garbage collector that the slot |address| + // has been written. |value| is the object being stored. The value and + // address registers are clobbered by the operation. + void RecordWrite( + Register object, + Register address, + Register value, + LinkRegisterStatus lr_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); // Push a handle. void Push(Handle<Object> handle); @@ -318,16 +413,6 @@ class MacroAssembler: public Assembler { const double imm, const Condition cond = al); - - // --------------------------------------------------------------------------- - // Activation frames - - void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } - void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } - - void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } - void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } - // Enter exit frame. // stack_space - extra stack space, used for alignment before call to C. void EnterExitFrame(bool save_doubles, int stack_space = 0); @@ -569,6 +654,13 @@ class MacroAssembler: public Assembler { Register length, Register scratch); + // Initialize fields with filler values. Fields starting at |start_offset| + // not including end_offset are overwritten with the value in |filler|. At + // the end the loop, |start_offset| takes the value of |end_offset|. + void InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler); + // --------------------------------------------------------------------------- // Support functions. @@ -608,6 +700,31 @@ class MacroAssembler: public Assembler { Register scratch, Label* fail); + // Check if a map for a JSObject indicates that the object can have both smi + // and HeapObject elements. Jump to the specified label if it does not. + void CheckFastObjectElements(Register map, + Register scratch, + Label* fail); + + // Check if a map for a JSObject indicates that the object has fast smi only + // elements. Jump to the specified label if it does not. + void CheckFastSmiOnlyElements(Register map, + Register scratch, + Label* fail); + + // Check to see if maybe_number can be stored as a double in + // FastDoubleElements. If it can, store it at the index specified by key in + // the FastDoubleElements array elements, otherwise jump to fail. + void StoreNumberToDoubleElements(Register value_reg, + Register key_reg, + Register receiver_reg, + Register elements_reg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* fail); + // Check if the map of an object is equal to a specified map (either // given directly or as an index into the root list) and branch to // label if not. Skip the smi check if not required (object is known @@ -830,11 +947,11 @@ class MacroAssembler: public Assembler { // return address (unless this is somehow accounted for by the called // function). void CallCFunction(ExternalReference function, int num_arguments); - void CallCFunction(Register function, Register scratch, int num_arguments); + void CallCFunction(Register function, int num_arguments); void CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments); - void CallCFunction(Register function, Register scratch, + void CallCFunction(Register function, int num_reg_arguments, int num_double_arguments); @@ -902,6 +1019,9 @@ class MacroAssembler: public Assembler { bool generating_stub() { return generating_stub_; } void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } bool allow_stub_calls() { return allow_stub_calls_; } + void set_has_frame(bool value) { has_frame_ = value; } + bool has_frame() { return has_frame_; } + inline bool AllowThisStubCall(CodeStub* stub); // EABI variant for double arguments in use. bool use_eabi_hardfloat() { @@ -1048,10 +1168,12 @@ class MacroAssembler: public Assembler { void LoadInstanceDescriptors(Register map, Register descriptors); + // Activation support. + void EnterFrame(StackFrame::Type type); + void LeaveFrame(StackFrame::Type type); + private: void CallCFunctionHelper(Register function, - ExternalReference function_reference, - Register scratch, int num_reg_arguments, int num_double_arguments); @@ -1067,16 +1189,25 @@ class MacroAssembler: public Assembler { const CallWrapper& call_wrapper, CallKind call_kind); - // Activation support. - void EnterFrame(StackFrame::Type type); - void LeaveFrame(StackFrame::Type type); - void InitializeNewString(Register string, Register length, Heap::RootListIndex map_index, Register scratch1, Register scratch2); + // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. + void InNewSpace(Register object, + Register scratch, + Condition cond, // eq for new space, ne otherwise. + Label* branch); + + // Helper for finding the mark bits for an address. Afterwards, the + // bitmap register points at the word with the mark bits and the mask + // the position of the first bit. Leaves addr_reg unchanged. + inline void GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg); + // Compute memory operands for safepoint stack slots. static int SafepointRegisterStackIndex(int reg_code); MemOperand SafepointRegisterSlot(Register reg); @@ -1084,6 +1215,7 @@ class MacroAssembler: public Assembler { bool generating_stub_; bool allow_stub_calls_; + bool has_frame_; // This handle will be patched with the code object on installation. Handle<Object> code_object_; diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index cd76edbf1..c87646793 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -371,9 +371,12 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( // Isolate. __ mov(r3, Operand(ExternalReference::isolate_address())); - ExternalReference function = - ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); - __ CallCFunction(function, argument_count); + { + AllowExternalCallThatCantCauseGC scope(masm_); + ExternalReference function = + ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); + __ CallCFunction(function, argument_count); + } // Check if function returned non-zero for success or zero for failure. __ cmp(r0, Operand(0, RelocInfo::NONE)); @@ -611,6 +614,12 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { // Entry code: __ bind(&entry_label_); + + // Tell the system that we have a stack frame. Because the type is MANUAL, no + // is generated. + FrameScope scope(masm_, StackFrame::MANUAL); + + // Actually emit code to start a new stack frame. // Push arguments // Save callee-save registers. // Start new stack frame. diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 6af535553..570420262 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -1618,6 +1618,8 @@ void Simulator::HandleRList(Instruction* instr, bool load) { ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address); intptr_t* address = reinterpret_cast<intptr_t*>(start_address); + // Catch null pointers a little earlier. + ASSERT(start_address > 8191 || start_address < 0); int reg = 0; while (rlist != 0) { if ((rlist & 1) != 0) { diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index f8565924b..4558afe68 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -431,7 +431,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the now unused name_reg as a scratch register. - __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch); + __ mov(name_reg, r0); + __ RecordWriteField(receiver_reg, + offset, + name_reg, + scratch, + kLRHasNotBeenSaved, + kDontSaveFPRegs); } else { // Write to the properties array. int offset = index * kPointerSize + FixedArray::kHeaderSize; @@ -444,7 +450,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Ok to clobber receiver_reg and name_reg, since we return. - __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg); + __ mov(name_reg, r0); + __ RecordWriteField(scratch, + offset, + name_reg, + receiver_reg, + kLRHasNotBeenSaved, + kDontSaveFPRegs); } // Return the value (register r0). @@ -553,9 +565,10 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) { } -static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, - const CallOptimization& optimization, - int argc) { +static MaybeObject* GenerateFastApiDirectCall( + MacroAssembler* masm, + const CallOptimization& optimization, + int argc) { // ----------- S t a t e ------------- // -- sp[0] : holder (set by CheckPrototypes) // -- sp[4] : callee js function @@ -591,6 +604,8 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, ApiFunction fun(api_function_address); const int kApiStackSpace = 4; + + FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); // r0 = v8::Arguments& @@ -616,9 +631,11 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, ExternalReference ref = ExternalReference(&fun, ExternalReference::DIRECT_API_CALL, masm->isolate()); + AllowExternalCallThatCantCauseGC scope(masm); return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); } + class CallInterceptorCompiler BASE_EMBEDDED { public: CallInterceptorCompiler(StubCompiler* stub_compiler, @@ -794,7 +811,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { miss_label); // Call a runtime function to load the interceptor property. - __ EnterInternalFrame(); + FrameScope scope(masm, StackFrame::INTERNAL); // Save the name_ register across the call. __ push(name_); @@ -811,7 +828,8 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Restore the name_ register. __ pop(name_); - __ LeaveInternalFrame(); + + // Leave the internal frame. } void LoadWithInterceptor(MacroAssembler* masm, @@ -820,18 +838,19 @@ class CallInterceptorCompiler BASE_EMBEDDED { JSObject* holder_obj, Register scratch, Label* interceptor_succeeded) { - __ EnterInternalFrame(); - __ Push(holder, name_); - - CompileCallLoadPropertyWithInterceptor(masm, - receiver, - holder, - name_, - holder_obj); - - __ pop(name_); // Restore the name. - __ pop(receiver); // Restore the holder. - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(holder, name_); + + CompileCallLoadPropertyWithInterceptor(masm, + receiver, + holder, + name_, + holder_obj); + + __ pop(name_); // Restore the name. + __ pop(receiver); // Restore the holder. + } // If interceptor returns no-result sentinel, call the constant function. __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex); @@ -1228,7 +1247,10 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, ApiFunction fun(getter_address); const int kApiStackSpace = 1; + + FrameScope frame_scope(masm(), StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); + // Create AccessorInfo instance on the stack above the exit frame with // scratch2 (internal::Object **args_) as the data. __ str(scratch2, MemOperand(sp, 1 * kPointerSize)); @@ -1288,41 +1310,43 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Save necessary data before invoking an interceptor. // Requires a frame to make GC aware of pushed pointers. - __ EnterInternalFrame(); + { + FrameScope frame_scope(masm(), StackFrame::INTERNAL); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - // CALLBACKS case needs a receiver to be passed into C++ callback. - __ Push(receiver, holder_reg, name_reg); - } else { - __ Push(holder_reg, name_reg); - } + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + // CALLBACKS case needs a receiver to be passed into C++ callback. + __ Push(receiver, holder_reg, name_reg); + } else { + __ Push(holder_reg, name_reg); + } - // Invoke an interceptor. Note: map checks from receiver to - // interceptor's holder has been compiled before (see a caller - // of this method.) - CompileCallLoadPropertyWithInterceptor(masm(), - receiver, - holder_reg, - name_reg, - interceptor_holder); - - // Check if interceptor provided a value for property. If it's - // the case, return immediately. - Label interceptor_failed; - __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); - __ cmp(r0, scratch1); - __ b(eq, &interceptor_failed); - __ LeaveInternalFrame(); - __ Ret(); + // Invoke an interceptor. Note: map checks from receiver to + // interceptor's holder has been compiled before (see a caller + // of this method.) + CompileCallLoadPropertyWithInterceptor(masm(), + receiver, + holder_reg, + name_reg, + interceptor_holder); + + // Check if interceptor provided a value for property. If it's + // the case, return immediately. + Label interceptor_failed; + __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); + __ cmp(r0, scratch1); + __ b(eq, &interceptor_failed); + frame_scope.GenerateLeaveFrame(); + __ Ret(); - __ bind(&interceptor_failed); - __ pop(name_reg); - __ pop(holder_reg); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - __ pop(receiver); - } + __ bind(&interceptor_failed); + __ pop(name_reg); + __ pop(holder_reg); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + __ pop(receiver); + } - __ LeaveInternalFrame(); + // Leave the internal frame. + } // Check that the maps from interceptor's holder to lookup's holder // haven't changed. And load lookup's holder into |holder| register. @@ -1556,7 +1580,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, DONT_DO_SMI_CHECK); if (argc == 1) { // Otherwise fall through to call the builtin. - Label exit, with_write_barrier, attempt_to_grow_elements; + Label attempt_to_grow_elements; // Get the array's length into r0 and calculate new length. __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -1571,11 +1595,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ cmp(r0, r4); __ b(gt, &attempt_to_grow_elements); + // Check if value is a smi. + Label with_write_barrier; + __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); + __ JumpIfNotSmi(r4, &with_write_barrier); + // Save new length. __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); // Push the element. - __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); // We may need a register containing the address end_elements below, // so write back the value in end_elements. __ add(end_elements, elements, @@ -1585,14 +1613,31 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); // Check for a smi. - __ JumpIfNotSmi(r4, &with_write_barrier); - __ bind(&exit); __ Drop(argc + 1); __ Ret(); __ bind(&with_write_barrier); - __ InNewSpace(elements, r4, eq, &exit); - __ RecordWriteHelper(elements, end_elements, r4); + + __ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ CheckFastSmiOnlyElements(r6, r6, &call_builtin); + + // Save new length. + __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); + + // Push the element. + // We may need a register containing the address end_elements below, + // so write back the value in end_elements. + __ add(end_elements, elements, + Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); + + __ RecordWrite(elements, + end_elements, + r4, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); __ Drop(argc + 1); __ Ret(); @@ -1604,6 +1649,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ b(&call_builtin); } + __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize)); + // Growing elements that are SMI-only requires special handling in case + // the new element is non-Smi. For now, delegate to the builtin. + Label no_fast_elements_check; + __ JumpIfSmi(r2, &no_fast_elements_check); + __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ CheckFastObjectElements(r7, r7, &call_builtin); + __ bind(&no_fast_elements_check); + Isolate* isolate = masm()->isolate(); ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate); @@ -1630,8 +1684,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, // Update new_space_allocation_top. __ str(r6, MemOperand(r7)); // Push the argument. - __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize)); - __ str(r6, MemOperand(end_elements)); + __ str(r2, MemOperand(end_elements)); // Fill the rest with holes. __ LoadRoot(r6, Heap::kTheHoleValueRootIndex); for (int i = 1; i < kAllocationDelta; i++) { @@ -2713,6 +2766,15 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // Store the value in the cell. __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset)); + __ mov(r1, r0); + __ RecordWriteField(r4, + JSGlobalPropertyCell::kValueOffset, + r1, + r2, + kLRHasNotBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET); + Counters* counters = masm()->isolate()->counters(); __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3); __ Ret(); @@ -3116,7 +3178,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) { } -MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic( +MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic( MapList* receiver_maps, CodeList* handler_ics) { // ----------- S t a t e ------------- @@ -3212,9 +3274,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) { } -MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic( +MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic( MapList* receiver_maps, - CodeList* handler_ics) { + CodeList* handler_stubs, + MapList* transitioned_maps) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key @@ -3227,12 +3290,20 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic( int receiver_count = receiver_maps->length(); __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map(receiver_maps->at(current)); - Handle<Code> code(handler_ics->at(current)); + for (int i = 0; i < receiver_count; ++i) { + Handle<Map> map(receiver_maps->at(i)); + Handle<Code> code(handler_stubs->at(i)); __ mov(ip, Operand(map)); __ cmp(r3, ip); - __ Jump(code, RelocInfo::CODE_TARGET, eq); + if (transitioned_maps->at(i) == NULL) { + __ Jump(code, RelocInfo::CODE_TARGET, eq); + } else { + Label next_map; + __ b(eq, &next_map); + __ mov(r4, Operand(Handle<Map>(transitioned_maps->at(i)))); + __ Jump(code, RelocInfo::CODE_TARGET, al); + __ bind(&next_map); + } } __ bind(&miss); @@ -3454,6 +3525,7 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) { case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3540,6 +3612,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( } break; case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3880,6 +3953,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } break; case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3943,6 +4017,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4082,6 +4157,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4234,8 +4310,10 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( } -void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, - bool is_js_array) { +void KeyedStoreStubCompiler::GenerateStoreFastElement( + MacroAssembler* masm, + bool is_js_array, + ElementsKind elements_kind) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key @@ -4244,7 +4322,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, // -- r3 : scratch // -- r4 : scratch (elements) // ----------------------------------- - Label miss_force_generic; + Label miss_force_generic, transition_elements_kind; Register value_reg = r0; Register key_reg = r1; @@ -4277,15 +4355,33 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, __ cmp(key_reg, scratch); __ b(hs, &miss_force_generic); - __ add(scratch, - elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ str(value_reg, - MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ RecordWrite(scratch, - Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize), - receiver_reg , elements_reg); - + if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + __ JumpIfNotSmi(value_reg, &transition_elements_kind); + __ add(scratch, + elements_reg, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + __ add(scratch, + scratch, + Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(value_reg, MemOperand(scratch)); + } else { + ASSERT(elements_kind == FAST_ELEMENTS); + __ add(scratch, + elements_reg, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + __ add(scratch, + scratch, + Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ str(value_reg, MemOperand(scratch)); + __ mov(receiver_reg, value_reg); + __ RecordWrite(elements_reg, // Object. + scratch, // Address. + receiver_reg, // Value. + kLRHasNotBeenSaved, + kDontSaveFPRegs); + } // value_reg (r0) is preserved. // Done. __ Ret(); @@ -4294,6 +4390,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ Jump(ic, RelocInfo::CODE_TARGET); + + __ bind(&transition_elements_kind); + Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); + __ Jump(ic_miss, RelocInfo::CODE_TARGET); } @@ -4309,15 +4409,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- r4 : scratch // -- r5 : scratch // ----------------------------------- - Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value; + Label miss_force_generic, transition_elements_kind; Register value_reg = r0; Register key_reg = r1; Register receiver_reg = r2; - Register scratch = r3; - Register elements_reg = r4; - Register mantissa_reg = r5; - Register exponent_reg = r6; + Register elements_reg = r3; + Register scratch1 = r4; + Register scratch2 = r5; + Register scratch3 = r6; Register scratch4 = r7; // This stub is meant to be tail-jumped to, the receiver must already @@ -4329,90 +4429,25 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Check that the key is within bounds. if (is_js_array) { - __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); } else { - __ ldr(scratch, + __ ldr(scratch1, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); } // Compare smis, unsigned compare catches both negative and out-of-bound // indexes. - __ cmp(key_reg, scratch); + __ cmp(key_reg, scratch1); __ b(hs, &miss_force_generic); - // Handle smi values specially. - __ JumpIfSmi(value_reg, &smi_value); - - // Ensure that the object is a heap number - __ CheckMap(value_reg, - scratch, - masm->isolate()->factory()->heap_number_map(), - &miss_force_generic, - DONT_DO_SMI_CHECK); - - // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 - // in the exponent. - __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32)); - __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); - __ cmp(exponent_reg, scratch); - __ b(ge, &maybe_nan); - - __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - - __ bind(&have_double_value); - __ add(scratch, elements_reg, - Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize)); - uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); - __ str(exponent_reg, FieldMemOperand(scratch, offset)); - __ Ret(); - - __ bind(&maybe_nan); - // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise - // it's an Infinity, and the non-NaN code path applies. - __ b(gt, &is_nan); - __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - __ cmp(mantissa_reg, Operand(0)); - __ b(eq, &have_double_value); - __ bind(&is_nan); - // Load canonical NaN for storing into the double array. - uint64_t nan_int64 = BitCast<uint64_t>( - FixedDoubleArray::canonical_not_the_hole_nan_as_double()); - __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); - __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); - __ jmp(&have_double_value); - - __ bind(&smi_value); - __ add(scratch, elements_reg, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); - __ add(scratch, scratch, - Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - // scratch is now effective address of the double element - - FloatingPointHelper::Destination destination; - if (CpuFeatures::IsSupported(VFP3)) { - destination = FloatingPointHelper::kVFPRegisters; - } else { - destination = FloatingPointHelper::kCoreRegisters; - } - - Register untagged_value = receiver_reg; - __ SmiUntag(untagged_value, value_reg); - FloatingPointHelper::ConvertIntToDouble( - masm, - untagged_value, - destination, - d0, - mantissa_reg, - exponent_reg, - scratch4, - s2); - if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatures::Scope scope(VFP3); - __ vstr(d0, scratch, 0); - } else { - __ str(mantissa_reg, MemOperand(scratch, 0)); - __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes)); - } + __ StoreNumberToDoubleElements(value_reg, + key_reg, + receiver_reg, + elements_reg, + scratch1, + scratch2, + scratch3, + scratch4, + &transition_elements_kind); __ Ret(); // Handle store cache miss, replacing the ic with the generic stub. @@ -4420,6 +4455,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ Jump(ic, RelocInfo::CODE_TARGET); + + __ bind(&transition_elements_kind); + Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); + __ Jump(ic_miss, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index 4dd23c8bb..e1d7c2064 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -201,17 +201,14 @@ function ConvertToString(x) { function ConvertToLocaleString(e) { - if (e == null) { + if (IS_NULL_OR_UNDEFINED(e)) { return ''; } else { - // e_obj's toLocaleString might be overwritten, check if it is a function. - // Call ToString if toLocaleString is not a function. - // See issue 877615. + // According to ES5, seciton 15.4.4.3, the toLocaleString conversion + // must throw a TypeError if ToObject(e).toLocaleString isn't + // callable. var e_obj = ToObject(e); - if (IS_SPEC_FUNCTION(e_obj.toLocaleString)) - return ToString(e_obj.toLocaleString()); - else - return ToString(e); + return %ToString(e_obj.toLocaleString()); } } @@ -381,18 +378,31 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) { function ArrayToString() { - if (!IS_ARRAY(this)) { - throw new $TypeError('Array.prototype.toString is not generic'); + var array; + var func; + if (IS_ARRAY(this)) { + func = this.join; + if (func === ArrayJoin) { + return Join(this, this.length, ',', ConvertToString); + } + array = this; + } else { + array = ToObject(this); + func = array.join; } - return Join(this, this.length, ',', ConvertToString); + if (!IS_SPEC_FUNCTION(func)) { + return %_CallFunction(array, ObjectToString); + } + return %_CallFunction(array, func); } function ArrayToLocaleString() { - if (!IS_ARRAY(this)) { - throw new $TypeError('Array.prototype.toString is not generic'); - } - return Join(this, this.length, ',', ConvertToLocaleString); + var array = ToObject(this); + var arrayLen = array.length; + var len = TO_UINT32(arrayLen); + if (len === 0) return ""; + return Join(array, len, ',', ConvertToLocaleString); } @@ -993,21 +1003,24 @@ function ArrayFilter(f, receiver) { ["Array.prototype.filter"]); } + // Pull out the length so that modifications to the length in the + // loop will not affect the looping and side effects are visible. + var array = ToObject(this); + var length = ToUint32(array.length); + if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping. - var length = ToUint32(this.length); + var result = []; var result_length = 0; for (var i = 0; i < length; i++) { - var current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { - if (%_CallFunction(receiver, current, i, this, f)) { + var current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { + if (%_CallFunction(receiver, current, i, array, f)) { result[result_length++] = current; } } @@ -1022,19 +1035,22 @@ function ArrayForEach(f, receiver) { ["Array.prototype.forEach"]); } + // Pull out the length so that modifications to the length in the + // loop will not affect the looping and side effects are visible. + var array = ToObject(this); + var length = TO_UINT32(array.length); + if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping. - var length = TO_UINT32(this.length); + for (var i = 0; i < length; i++) { - var current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { - %_CallFunction(receiver, current, i, this, f); + var current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { + %_CallFunction(receiver, current, i, array, f); } } } @@ -1048,19 +1064,22 @@ function ArraySome(f, receiver) { ["Array.prototype.some"]); } + // Pull out the length so that modifications to the length in the + // loop will not affect the looping and side effects are visible. + var array = ToObject(this); + var length = TO_UINT32(array.length); + if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping. - var length = TO_UINT32(this.length); + for (var i = 0; i < length; i++) { - var current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { - if (%_CallFunction(receiver, current, i, this, f)) return true; + var current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { + if (%_CallFunction(receiver, current, i, array, f)) return true; } } return false; @@ -1073,19 +1092,22 @@ function ArrayEvery(f, receiver) { ["Array.prototype.every"]); } + // Pull out the length so that modifications to the length in the + // loop will not affect the looping and side effects are visible. + var array = ToObject(this); + var length = TO_UINT32(array.length); + if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping. - var length = TO_UINT32(this.length); + for (var i = 0; i < length; i++) { - var current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { - if (!%_CallFunction(receiver, current, i, this, f)) return false; + var current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { + if (!%_CallFunction(receiver, current, i, array, f)) return false; } } return true; @@ -1097,21 +1119,24 @@ function ArrayMap(f, receiver) { ["Array.prototype.map"]); } + // Pull out the length so that modifications to the length in the + // loop will not affect the looping and side effects are visible. + var array = ToObject(this); + var length = TO_UINT32(array.length); + if (!IS_SPEC_FUNCTION(f)) { throw MakeTypeError('called_non_callable', [ f ]); } if (IS_NULL_OR_UNDEFINED(receiver)) { receiver = %GetDefaultReceiver(f) || receiver; } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping. - var length = TO_UINT32(this.length); + var result = new $Array(); var accumulator = new InternalArray(length); for (var i = 0; i < length; i++) { - var current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { - accumulator[i] = %_CallFunction(receiver, current, i, this, f); + var current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { + accumulator[i] = %_CallFunction(receiver, current, i, array, f); } } %MoveArrayContents(accumulator, result); @@ -1245,19 +1270,20 @@ function ArrayReduce(callback, current) { ["Array.prototype.reduce"]); } + // Pull out the length so that modifications to the length in the + // loop will not affect the looping and side effects are visible. + var array = ToObject(this); + var length = ToUint32(array.length); + if (!IS_SPEC_FUNCTION(callback)) { throw MakeTypeError('called_non_callable', [callback]); } - // Pull out the length so that modifications to the length in the - // loop will not affect the looping. - var length = ToUint32(this.length); var i = 0; - find_initial: if (%_ArgumentsLength() < 2) { for (; i < length; i++) { - current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { + current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { i++; break find_initial; } @@ -1267,9 +1293,9 @@ function ArrayReduce(callback, current) { var receiver = %GetDefaultReceiver(callback); for (; i < length; i++) { - var element = this[i]; - if (!IS_UNDEFINED(element) || i in this) { - current = %_CallFunction(receiver, current, element, i, this, callback); + var element = array[i]; + if (!IS_UNDEFINED(element) || i in array) { + current = %_CallFunction(receiver, current, element, i, array, callback); } } return current; @@ -1281,15 +1307,20 @@ function ArrayReduceRight(callback, current) { ["Array.prototype.reduceRight"]); } + // Pull out the length so that side effects are visible before the + // callback function is checked. + var array = ToObject(this); + var length = ToUint32(array.length); + if (!IS_SPEC_FUNCTION(callback)) { throw MakeTypeError('called_non_callable', [callback]); } - var i = ToUint32(this.length) - 1; + var i = length - 1; find_initial: if (%_ArgumentsLength() < 2) { for (; i >= 0; i--) { - current = this[i]; - if (!IS_UNDEFINED(current) || i in this) { + current = array[i]; + if (!IS_UNDEFINED(current) || i in array) { i--; break find_initial; } @@ -1299,9 +1330,9 @@ function ArrayReduceRight(callback, current) { var receiver = %GetDefaultReceiver(callback); for (; i >= 0; i--) { - var element = this[i]; - if (!IS_UNDEFINED(element) || i in this) { - current = %_CallFunction(receiver, current, element, i, this, callback); + var element = array[i]; + if (!IS_UNDEFINED(element) || i in array) { + current = %_CallFunction(receiver, current, element, i, array, callback); } } return current; diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index ad5f35081..bda85e69d 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -38,6 +38,7 @@ #include "deoptimizer.h" #include "execution.h" #include "ic-inl.h" +#include "incremental-marking.h" #include "factory.h" #include "runtime.h" #include "runtime-profiler.h" @@ -47,6 +48,7 @@ #include "ast.h" #include "regexp-macro-assembler.h" #include "platform.h" +#include "store-buffer.h" // Include native regexp-macro-assembler. #ifndef V8_INTERPRETED_REGEXP #if V8_TARGET_ARCH_IA32 @@ -516,6 +518,7 @@ void RelocIterator::next() { RelocIterator::RelocIterator(Code* code, int mode_mask) { + rinfo_.host_ = code; rinfo_.pc_ = code->instruction_start(); rinfo_.data_ = 0; // Relocation info is read backwards. @@ -736,9 +739,38 @@ ExternalReference::ExternalReference(const SCTableReference& table_ref) : address_(table_ref.address()) {} +ExternalReference ExternalReference:: + incremental_marking_record_write_function(Isolate* isolate) { + return ExternalReference(Redirect( + isolate, + FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode))); +} + + +ExternalReference ExternalReference:: + incremental_evacuation_record_write_function(Isolate* isolate) { + return ExternalReference(Redirect( + isolate, + FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode))); +} + + +ExternalReference ExternalReference:: + store_buffer_overflow_function(Isolate* isolate) { + return ExternalReference(Redirect( + isolate, + FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow))); +} + + +ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) { + return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache))); +} + + ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) { - return ExternalReference(Redirect(isolate, - FUNCTION_ADDR(Runtime::PerformGC))); + return + ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC))); } @@ -802,17 +834,6 @@ ExternalReference ExternalReference::keyed_lookup_cache_field_offsets( } -ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) { - return ExternalReference(isolate->factory()->the_hole_value().location()); -} - - -ExternalReference ExternalReference::arguments_marker_location( - Isolate* isolate) { - return ExternalReference(isolate->factory()->arguments_marker().location()); -} - - ExternalReference ExternalReference::roots_address(Isolate* isolate) { return ExternalReference(isolate->heap()->roots_address()); } @@ -840,9 +861,14 @@ ExternalReference ExternalReference::new_space_start(Isolate* isolate) { } +ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) { + return ExternalReference(isolate->heap()->store_buffer()->TopAddress()); +} + + ExternalReference ExternalReference::new_space_mask(Isolate* isolate) { - Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask()); - return ExternalReference(mask); + return ExternalReference(reinterpret_cast<Address>( + isolate->heap()->NewSpaceMask())); } diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index d58034df0..e5661c9f1 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -143,6 +143,9 @@ class Label BASE_EMBEDDED { }; +enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs }; + + // ----------------------------------------------------------------------------- // Relocation information @@ -216,8 +219,9 @@ class RelocInfo BASE_EMBEDDED { RelocInfo() {} - RelocInfo(byte* pc, Mode rmode, intptr_t data) - : pc_(pc), rmode_(rmode), data_(data) { + + RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host) + : pc_(pc), rmode_(rmode), data_(data), host_(host) { } static inline bool IsConstructCall(Mode mode) { @@ -226,6 +230,9 @@ class RelocInfo BASE_EMBEDDED { static inline bool IsCodeTarget(Mode mode) { return mode <= LAST_CODE_ENUM; } + static inline bool IsEmbeddedObject(Mode mode) { + return mode == EMBEDDED_OBJECT; + } // Is the relocation mode affected by GC? static inline bool IsGCRelocMode(Mode mode) { return mode <= LAST_GCED_ENUM; @@ -258,6 +265,7 @@ class RelocInfo BASE_EMBEDDED { void set_pc(byte* pc) { pc_ = pc; } Mode rmode() const { return rmode_; } intptr_t data() const { return data_; } + Code* host() const { return host_; } // Apply a relocation by delta bytes INLINE(void apply(intptr_t delta)); @@ -353,6 +361,7 @@ class RelocInfo BASE_EMBEDDED { byte* pc_; Mode rmode_; intptr_t data_; + Code* host_; #ifdef V8_TARGET_ARCH_MIPS // Code and Embedded Object pointers in mips are stored split // across two consecutive 32-bit instructions. Heap management @@ -561,6 +570,13 @@ class ExternalReference BASE_EMBEDDED { // pattern. This means that they have to be added to the // ExternalReferenceTable in serialize.cc manually. + static ExternalReference incremental_marking_record_write_function( + Isolate* isolate); + static ExternalReference incremental_evacuation_record_write_function( + Isolate* isolate); + static ExternalReference store_buffer_overflow_function( + Isolate* isolate); + static ExternalReference flush_icache_function(Isolate* isolate); static ExternalReference perform_gc_function(Isolate* isolate); static ExternalReference fill_heap_number_with_random_function( Isolate* isolate); @@ -577,12 +593,6 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference keyed_lookup_cache_keys(Isolate* isolate); static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate); - // Static variable Factory::the_hole_value.location() - static ExternalReference the_hole_value_location(Isolate* isolate); - - // Static variable Factory::arguments_marker.location() - static ExternalReference arguments_marker_location(Isolate* isolate); - // Static variable Heap::roots_address() static ExternalReference roots_address(Isolate* isolate); @@ -606,6 +616,10 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference new_space_start(Isolate* isolate); static ExternalReference new_space_mask(Isolate* isolate); static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate); + static ExternalReference new_space_mark_bits(Isolate* isolate); + + // Write barrier. + static ExternalReference store_buffer_top(Isolate* isolate); // Used for fast allocation in generated code. static ExternalReference new_space_allocation_top_address(Isolate* isolate); diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 418cc432b..d49381454 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -327,56 +327,77 @@ bool BinaryOperation::ResultOverwriteAllowed() { } -bool CompareOperation::IsLiteralCompareTypeof(Expression** expr, - Handle<String>* check) { - if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false; - - UnaryOperation* left_unary = left_->AsUnaryOperation(); - UnaryOperation* right_unary = right_->AsUnaryOperation(); - Literal* left_literal = left_->AsLiteral(); - Literal* right_literal = right_->AsLiteral(); - - // Check for the pattern: typeof <expression> == <string literal>. - if (left_unary != NULL && left_unary->op() == Token::TYPEOF && - right_literal != NULL && right_literal->handle()->IsString()) { - *expr = left_unary->expression(); - *check = Handle<String>::cast(right_literal->handle()); +static bool IsTypeof(Expression* expr) { + UnaryOperation* maybe_unary = expr->AsUnaryOperation(); + return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF; +} + + +// Check for the pattern: typeof <expression> equals <string literal>. +static bool MatchLiteralCompareTypeof(Expression* left, + Token::Value op, + Expression* right, + Expression** expr, + Handle<String>* check) { + if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) { + *expr = left->AsUnaryOperation()->expression(); + *check = Handle<String>::cast(right->AsLiteral()->handle()); return true; } + return false; +} + - // Check for the pattern: <string literal> == typeof <expression>. - if (right_unary != NULL && right_unary->op() == Token::TYPEOF && - left_literal != NULL && left_literal->handle()->IsString()) { - *expr = right_unary->expression(); - *check = Handle<String>::cast(left_literal->handle()); +bool CompareOperation::IsLiteralCompareTypeof(Expression** expr, + Handle<String>* check) { + return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) || + MatchLiteralCompareTypeof(right_, op_, left_, expr, check); +} + + +static bool IsVoidOfLiteral(Expression* expr) { + UnaryOperation* maybe_unary = expr->AsUnaryOperation(); + return maybe_unary != NULL && + maybe_unary->op() == Token::VOID && + maybe_unary->expression()->AsLiteral() != NULL; +} + + +// Check for the pattern: void <literal> equals <expression> +static bool MatchLiteralCompareUndefined(Expression* left, + Token::Value op, + Expression* right, + Expression** expr) { + if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) { + *expr = right; return true; } - return false; } bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) { - if (op_ != Token::EQ_STRICT) return false; + return MatchLiteralCompareUndefined(left_, op_, right_, expr) || + MatchLiteralCompareUndefined(right_, op_, left_, expr); +} - UnaryOperation* left_unary = left_->AsUnaryOperation(); - UnaryOperation* right_unary = right_->AsUnaryOperation(); - // Check for the pattern: <expression> === void <literal>. - if (right_unary != NULL && right_unary->op() == Token::VOID && - right_unary->expression()->AsLiteral() != NULL) { - *expr = left_; +// Check for the pattern: null equals <expression> +static bool MatchLiteralCompareNull(Expression* left, + Token::Value op, + Expression* right, + Expression** expr) { + if (left->IsNullLiteral() && Token::IsEqualityOp(op)) { + *expr = right; return true; } + return false; +} - // Check for the pattern: void <literal> === <expression>. - if (left_unary != NULL && left_unary->op() == Token::VOID && - left_unary->expression()->AsLiteral() != NULL) { - *expr = right_; - return true; - } - return false; +bool CompareOperation::IsLiteralCompareNull(Expression** expr) { + return MatchLiteralCompareNull(left_, op_, right_, expr) || + MatchLiteralCompareNull(right_, op_, left_, expr); } @@ -529,7 +550,9 @@ bool Conditional::IsInlineable() const { bool VariableProxy::IsInlineable() const { - return var()->IsUnallocated() || var()->IsStackAllocated(); + return var()->IsUnallocated() + || var()->IsStackAllocated() + || var()->IsContextSlot(); } @@ -598,11 +621,6 @@ bool CompareOperation::IsInlineable() const { } -bool CompareToNull::IsInlineable() const { - return expression()->IsInlineable(); -} - - bool CountOperation::IsInlineable() const { return expression()->IsInlineable(); } @@ -746,37 +764,41 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global, void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle, CallKind call_kind) { + is_monomorphic_ = oracle->CallIsMonomorphic(this); Property* property = expression()->AsProperty(); - ASSERT(property != NULL); - // Specialize for the receiver types seen at runtime. - Literal* key = property->key()->AsLiteral(); - ASSERT(key != NULL && key->handle()->IsString()); - Handle<String> name = Handle<String>::cast(key->handle()); - receiver_types_.Clear(); - oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_); + if (property == NULL) { + // Function call. Specialize for monomorphic calls. + if (is_monomorphic_) target_ = oracle->GetCallTarget(this); + } else { + // Method call. Specialize for the receiver types seen at runtime. + Literal* key = property->key()->AsLiteral(); + ASSERT(key != NULL && key->handle()->IsString()); + Handle<String> name = Handle<String>::cast(key->handle()); + receiver_types_.Clear(); + oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_); #ifdef DEBUG - if (FLAG_enable_slow_asserts) { - int length = receiver_types_.length(); - for (int i = 0; i < length; i++) { - Handle<Map> map = receiver_types_.at(i); - ASSERT(!map.is_null() && *map != NULL); + if (FLAG_enable_slow_asserts) { + int length = receiver_types_.length(); + for (int i = 0; i < length; i++) { + Handle<Map> map = receiver_types_.at(i); + ASSERT(!map.is_null() && *map != NULL); + } } - } #endif - is_monomorphic_ = oracle->CallIsMonomorphic(this); - check_type_ = oracle->GetCallCheckType(this); - if (is_monomorphic_) { - Handle<Map> map; - if (receiver_types_.length() > 0) { - ASSERT(check_type_ == RECEIVER_MAP_CHECK); - map = receiver_types_.at(0); - } else { - ASSERT(check_type_ != RECEIVER_MAP_CHECK); - holder_ = Handle<JSObject>( - oracle->GetPrototypeForPrimitiveCheck(check_type_)); - map = Handle<Map>(holder_->map()); + check_type_ = oracle->GetCallCheckType(this); + if (is_monomorphic_) { + Handle<Map> map; + if (receiver_types_.length() > 0) { + ASSERT(check_type_ == RECEIVER_MAP_CHECK); + map = receiver_types_.at(0); + } else { + ASSERT(check_type_ != RECEIVER_MAP_CHECK); + holder_ = Handle<JSObject>( + oracle->GetPrototypeForPrimitiveCheck(check_type_)); + map = Handle<Map>(holder_->map()); + } + is_monomorphic_ = ComputeTarget(map, name); } - is_monomorphic_ = ComputeTarget(map, name); } } diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index b56205f9a..0efc4835c 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -90,7 +90,6 @@ namespace internal { V(CountOperation) \ V(BinaryOperation) \ V(CompareOperation) \ - V(CompareToNull) \ V(ThisFunction) #define AST_NODE_LIST(V) \ @@ -289,6 +288,12 @@ class Expression: public AstNode { // True iff the expression is a literal represented as a smi. virtual bool IsSmiLiteral() { return false; } + // True iff the expression is a string literal. + virtual bool IsStringLiteral() { return false; } + + // True iff the expression is the null literal. + virtual bool IsNullLiteral() { return false; } + // Type feedback information for assignments and properties. virtual bool IsMonomorphic() { UNREACHABLE(); @@ -393,31 +398,29 @@ class Block: public BreakableStatement { class Declaration: public AstNode { public: Declaration(VariableProxy* proxy, - Variable::Mode mode, + VariableMode mode, FunctionLiteral* fun, Scope* scope) : proxy_(proxy), mode_(mode), fun_(fun), scope_(scope) { - ASSERT(mode == Variable::VAR || - mode == Variable::CONST || - mode == Variable::LET); + ASSERT(mode == VAR || mode == CONST || mode == LET); // At the moment there are no "const functions"'s in JavaScript... - ASSERT(fun == NULL || mode == Variable::VAR || mode == Variable::LET); + ASSERT(fun == NULL || mode == VAR || mode == LET); } DECLARE_NODE_TYPE(Declaration) VariableProxy* proxy() const { return proxy_; } - Variable::Mode mode() const { return mode_; } + VariableMode mode() const { return mode_; } FunctionLiteral* fun() const { return fun_; } // may be NULL virtual bool IsInlineable() const; Scope* scope() const { return scope_; } private: VariableProxy* proxy_; - Variable::Mode mode_; + VariableMode mode_; FunctionLiteral* fun_; // Nested scope from which the declaration originated. @@ -891,6 +894,8 @@ class Literal: public Expression { virtual bool IsTrivial() { return true; } virtual bool IsSmiLiteral() { return handle_->IsSmi(); } + virtual bool IsStringLiteral() { return handle_->IsString(); } + virtual bool IsNullLiteral() { return handle_->IsNull(); } // Check if this literal is identical to the other literal. bool IsIdenticalTo(const Literal* other) const { @@ -1465,6 +1470,7 @@ class CompareOperation: public Expression { // Match special cases. bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check); bool IsLiteralCompareUndefined(Expression** expr); + bool IsLiteralCompareNull(Expression** expr); private: Token::Value op_; @@ -1477,25 +1483,6 @@ class CompareOperation: public Expression { }; -class CompareToNull: public Expression { - public: - CompareToNull(Isolate* isolate, bool is_strict, Expression* expression) - : Expression(isolate), is_strict_(is_strict), expression_(expression) { } - - DECLARE_NODE_TYPE(CompareToNull) - - virtual bool IsInlineable() const; - - bool is_strict() const { return is_strict_; } - Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; } - Expression* expression() const { return expression_; } - - private: - bool is_strict_; - Expression* expression_; -}; - - class Conditional: public Expression { public: Conditional(Isolate* isolate, diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index f07e625ec..dc722cb74 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -34,6 +34,7 @@ #include "debug.h" #include "execution.h" #include "global-handles.h" +#include "isolate-inl.h" #include "macro-assembler.h" #include "natives.h" #include "objects-visiting.h" @@ -995,6 +996,26 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, initial_map->instance_size() + 5 * kPointerSize); initial_map->set_instance_descriptors(*descriptors); initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map)); + + // RegExp prototype object is itself a RegExp. + Handle<Map> proto_map = factory->CopyMapDropTransitions(initial_map); + proto_map->set_prototype(global_context()->initial_object_prototype()); + Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map); + proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, + heap->empty_string()); + proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, + heap->false_value()); + proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, + heap->false_value()); + proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, + heap->false_value()); + proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex, + Smi::FromInt(0), + SKIP_WRITE_BARRIER); // It's a Smi. + initial_map->set_prototype(*proto); + factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto), + JSRegExp::IRREGEXP, factory->empty_string(), + JSRegExp::Flags(0), 0); } { // -- J S O N @@ -1076,6 +1097,11 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, elements->set(0, *array); array = factory->NewFixedArray(0); elements->set(1, *array); + Handle<Map> non_strict_arguments_elements_map = + factory->GetElementsTransitionMap(result, + NON_STRICT_ARGUMENTS_ELEMENTS); + result->set_map(*non_strict_arguments_elements_map); + ASSERT(result->HasNonStrictArgumentsElements()); result->set_elements(*elements); global_context()->set_aliased_arguments_boilerplate(*result); } @@ -1327,6 +1353,8 @@ void Genesis::InstallNativeFunctions() { configure_instance_fun); INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun); INSTALL_NATIVE(JSObject, "functionCache", function_cache); + INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor", + to_complete_property_descriptor); } void Genesis::InstallExperimentalNativeFunctions() { @@ -1555,6 +1583,18 @@ bool Genesis::InstallNatives() { isolate()->builtins()->builtin(Builtins::kArrayConstructCode)); array_function->shared()->DontAdaptArguments(); + // InternalArrays should not use Smi-Only array optimizations. There are too + // many places in the C++ runtime code (e.g. RegEx) that assume that + // elements in InternalArrays can be set to non-Smi values without going + // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT + // transition easy to trap. Moreover, they rarely are smi-only. + MaybeObject* maybe_map = + array_function->initial_map()->CopyDropTransitions(); + Map* new_map; + if (!maybe_map->To<Map>(&new_map)) return maybe_map; + new_map->set_elements_kind(FAST_ELEMENTS); + array_function->set_initial_map(new_map); + // Make "length" magic on instances. Handle<DescriptorArray> array_descriptors = factory()->CopyAppendForeignDescriptor( @@ -1938,14 +1978,15 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) { if (!InstallExtension(extension->dependencies()[i])) return false; } Isolate* isolate = Isolate::Current(); - Vector<const char> source = CStrVector(extension->source()); - Handle<String> source_code = isolate->factory()->NewStringFromAscii(source); - bool result = CompileScriptCached(CStrVector(extension->name()), - source_code, - isolate->bootstrapper()->extensions_cache(), - extension, - Handle<Context>(isolate->context()), - false); + Handle<String> source_code = + isolate->factory()->NewExternalStringFromAscii(extension->source()); + bool result = CompileScriptCached( + CStrVector(extension->name()), + source_code, + isolate->bootstrapper()->extensions_cache(), + extension, + Handle<Context>(isolate->context()), + false); ASSERT(isolate->has_pending_exception() != result); if (!result) { isolate->clear_pending_exception(); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index e6a0699f0..d513200f0 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -33,6 +33,7 @@ #include "builtins.h" #include "gdb-jit.h" #include "ic-inl.h" +#include "mark-compact.h" #include "vm-state-inl.h" namespace v8 { @@ -202,7 +203,7 @@ BUILTIN(ArrayCodeGeneric) { } // 'array' now contains the JSArray we should initialize. - ASSERT(array->HasFastElements()); + ASSERT(array->HasFastTypeElements()); // Optimize the case where there is one argument and the argument is a // small smi. @@ -215,7 +216,8 @@ BUILTIN(ArrayCodeGeneric) { { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } - array->SetContent(FixedArray::cast(obj)); + MaybeObject* maybe_obj = array->SetContent(FixedArray::cast(obj)); + if (maybe_obj->IsFailure()) return maybe_obj; return array; } } @@ -239,6 +241,11 @@ BUILTIN(ArrayCodeGeneric) { if (!maybe_obj->ToObject(&obj)) return maybe_obj; } + // Set length and elements on the array. + MaybeObject* maybe_object = + array->EnsureCanContainElements(FixedArray::cast(obj)); + if (maybe_object->IsFailure()) return maybe_object; + AssertNoAllocation no_gc; FixedArray* elms = FixedArray::cast(obj); WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); @@ -247,7 +254,6 @@ BUILTIN(ArrayCodeGeneric) { elms->set(index, args[index+1], mode); } - // Set length and elements on the array. array->set_elements(FixedArray::cast(obj)); array->set_length(len); @@ -295,6 +301,7 @@ static void CopyElements(Heap* heap, if (mode == UPDATE_WRITE_BARRIER) { heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len); } + heap->incremental_marking()->RecordWrites(dst); } @@ -313,6 +320,7 @@ static void MoveElements(Heap* heap, if (mode == UPDATE_WRITE_BARRIER) { heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len); } + heap->incremental_marking()->RecordWrites(dst); } @@ -358,6 +366,14 @@ static FixedArray* LeftTrimFixedArray(Heap* heap, former_start[to_trim] = heap->fixed_array_map(); former_start[to_trim + 1] = Smi::FromInt(len - to_trim); + // Maintain marking consistency for HeapObjectIterator and + // IncrementalMarking. + int size_delta = to_trim * kPointerSize; + if (heap->marking()->TransferMark(elms->address(), + elms->address() + size_delta)) { + MemoryChunk::IncrementLiveBytes(elms->address(), -size_delta); + } + return FixedArray::cast(HeapObject::FromAddress( elms->address() + to_trim * kPointerSize)); } @@ -384,20 +400,42 @@ static bool ArrayPrototypeHasNoElements(Heap* heap, MUST_USE_RESULT static inline MaybeObject* EnsureJSArrayWithWritableFastElements( - Heap* heap, Object* receiver) { + Heap* heap, Object* receiver, Arguments* args, int first_added_arg) { if (!receiver->IsJSArray()) return NULL; JSArray* array = JSArray::cast(receiver); HeapObject* elms = array->elements(); - if (elms->map() == heap->fixed_array_map()) return elms; - if (elms->map() == heap->fixed_cow_array_map()) { - return array->EnsureWritableFastElements(); + Map* map = elms->map(); + if (map == heap->fixed_array_map()) { + if (args == NULL || !array->HasFastSmiOnlyElements()) { + return elms; + } + } else if (map == heap->fixed_cow_array_map()) { + MaybeObject* maybe_writable_result = array->EnsureWritableFastElements(); + if (args == NULL || !array->HasFastSmiOnlyElements() || + maybe_writable_result->IsFailure()) { + return maybe_writable_result; + } + } else { + return NULL; } - return NULL; + + // Need to ensure that the arguments passed in args can be contained in + // the array. + int args_length = args->length(); + if (first_added_arg >= args_length) return array->elements(); + + MaybeObject* maybe_array = array->EnsureCanContainElements( + args, + first_added_arg, + args_length - first_added_arg); + if (maybe_array->IsFailure()) return maybe_array; + return array->elements(); } static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap, JSArray* receiver) { + if (!FLAG_clever_optimizations) return false; Context* global_context = heap->isolate()->context()->global_context(); JSObject* array_proto = JSObject::cast(global_context->array_function()->prototype()); @@ -413,20 +451,18 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin( HandleScope handleScope(isolate); Handle<Object> js_builtin = - GetProperty(Handle<JSObject>( - isolate->global_context()->builtins()), - name); - ASSERT(js_builtin->IsJSFunction()); - Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin)); - ScopedVector<Object**> argv(args.length() - 1); - int n_args = args.length() - 1; - for (int i = 0; i < n_args; i++) { - argv[i] = args.at<Object>(i + 1).location(); - } - bool pending_exception = false; + GetProperty(Handle<JSObject>(isolate->global_context()->builtins()), + name); + Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin); + int argc = args.length() - 1; + ScopedVector<Handle<Object> > argv(argc); + for (int i = 0; i < argc; ++i) { + argv[i] = args.at<Object>(i + 1); + } + bool pending_exception; Handle<Object> result = Execution::Call(function, args.receiver(), - n_args, + argc, argv.start(), &pending_exception); if (pending_exception) return Failure::Exception(); @@ -439,7 +475,7 @@ BUILTIN(ArrayPush) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver); + EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1); if (maybe_elms_obj == NULL) { return CallJsBuiltin(isolate, "ArrayPush", args); } @@ -475,7 +511,6 @@ BUILTIN(ArrayPush) { FillWithHoles(heap, new_elms, new_length, capacity); elms = new_elms; - array->set_elements(elms); } // Add the provided values. @@ -485,6 +520,10 @@ BUILTIN(ArrayPush) { elms->set(index + len, args[index + 1], mode); } + if (elms != array->elements()) { + array->set_elements(elms); + } + // Set the length. array->set_length(Smi::FromInt(new_length)); return Smi::FromInt(new_length); @@ -496,7 +535,7 @@ BUILTIN(ArrayPop) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver); + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args); if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; } @@ -529,7 +568,7 @@ BUILTIN(ArrayShift) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver); + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayShift", args); if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; @@ -539,7 +578,7 @@ BUILTIN(ArrayShift) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastElements()); + ASSERT(array->HasFastTypeElements()); int len = Smi::cast(array->length())->value(); if (len == 0) return heap->undefined_value(); @@ -551,9 +590,7 @@ BUILTIN(ArrayShift) { } if (!heap->lo_space()->Contains(elms)) { - // As elms still in the same space they used to be, - // there is no need to update region dirty mark. - array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER); + array->set_elements(LeftTrimFixedArray(heap, elms, 1)); } else { // Shift the elements. AssertNoAllocation no_gc; @@ -573,7 +610,7 @@ BUILTIN(ArrayUnshift) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver); + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayUnshift", args); if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; @@ -583,7 +620,7 @@ BUILTIN(ArrayUnshift) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastElements()); + ASSERT(array->HasFastTypeElements()); int len = Smi::cast(array->length())->value(); int to_add = args.length() - 1; @@ -592,6 +629,10 @@ BUILTIN(ArrayUnshift) { // we should never hit this case. ASSERT(to_add <= (Smi::kMaxValue - len)); + MaybeObject* maybe_object = + array->EnsureCanContainElements(&args, 1, to_add); + if (maybe_object->IsFailure()) return maybe_object; + if (new_length > elms->length()) { // New backing storage is needed. int capacity = new_length + (new_length >> 1) + 16; @@ -600,13 +641,11 @@ BUILTIN(ArrayUnshift) { if (!maybe_obj->ToObject(&obj)) return maybe_obj; } FixedArray* new_elms = FixedArray::cast(obj); - AssertNoAllocation no_gc; if (len > 0) { CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len); } FillWithHoles(heap, new_elms, new_length, capacity); - elms = new_elms; array->set_elements(elms); } else { @@ -634,7 +673,7 @@ BUILTIN(ArraySlice) { int len = -1; if (receiver->IsJSArray()) { JSArray* array = JSArray::cast(receiver); - if (!array->HasFastElements() || + if (!array->HasFastTypeElements() || !IsJSArrayFastElementMovingAllowed(heap, array)) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -650,7 +689,7 @@ BUILTIN(ArraySlice) { bool is_arguments_object_with_fast_elements = receiver->IsJSObject() && JSObject::cast(receiver)->map() == arguments_map - && JSObject::cast(receiver)->HasFastElements(); + && JSObject::cast(receiver)->HasFastTypeElements(); if (!is_arguments_object_with_fast_elements) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -721,6 +760,10 @@ BUILTIN(ArraySlice) { } FixedArray* result_elms = FixedArray::cast(result); + MaybeObject* maybe_object = + result_array->EnsureCanContainElements(result_elms); + if (maybe_object->IsFailure()) return maybe_object; + AssertNoAllocation no_gc; CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len); @@ -738,7 +781,7 @@ BUILTIN(ArraySplice) { Object* receiver = *args.receiver(); Object* elms_obj; { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver); + EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3); if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArraySplice", args); if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; @@ -748,7 +791,7 @@ BUILTIN(ArraySplice) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastElements()); + ASSERT(array->HasFastTypeElements()); int len = Smi::cast(array->length())->value(); @@ -825,9 +868,9 @@ BUILTIN(ArraySplice) { } int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; - int new_length = len - actual_delete_count + item_count; + bool elms_changed = false; if (item_count < actual_delete_count) { // Shrink the array. const bool trim_array = !heap->lo_space()->Contains(elms) && @@ -842,7 +885,8 @@ BUILTIN(ArraySplice) { } elms = LeftTrimFixedArray(heap, elms, delta); - array->set_elements(elms, SKIP_WRITE_BARRIER); + + elms_changed = true; } else { AssertNoAllocation no_gc; MoveElements(heap, &no_gc, @@ -882,7 +926,7 @@ BUILTIN(ArraySplice) { FillWithHoles(heap, new_elms, new_length, capacity); elms = new_elms; - array->set_elements(elms); + elms_changed = true; } else { AssertNoAllocation no_gc; MoveElements(heap, &no_gc, @@ -898,6 +942,10 @@ BUILTIN(ArraySplice) { elms->set(k, args[3 + k - actual_start], mode); } + if (elms_changed) { + array->set_elements(elms); + } + // Set the length. array->set_length(Smi::FromInt(new_length)); @@ -920,7 +968,7 @@ BUILTIN(ArrayConcat) { int result_len = 0; for (int i = 0; i < n_arguments; i++) { Object* arg = args[i]; - if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements() + if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements() || JSArray::cast(arg)->GetPrototype() != array_proto) { return CallJsBuiltin(isolate, "ArrayConcat", args); } @@ -956,6 +1004,17 @@ BUILTIN(ArrayConcat) { } FixedArray* result_elms = FixedArray::cast(result); + // Ensure element type transitions happen before copying elements in. + if (result_array->HasFastSmiOnlyElements()) { + for (int i = 0; i < n_arguments; i++) { + JSArray* array = JSArray::cast(args[i]); + if (!array->HasFastSmiOnlyElements()) { + result_array->EnsureCanContainNonSmiElements(); + break; + } + } + } + // Copy data. AssertNoAllocation no_gc; int start_pos = 0; @@ -1607,20 +1666,22 @@ void Builtins::Setup(bool create_heap_objects) { const BuiltinDesc* functions = BuiltinFunctionTable::functions(); // For now we generate builtin adaptor code into a stack-allocated - // buffer, before copying it into individual code objects. - byte buffer[4*KB]; + // buffer, before copying it into individual code objects. Be careful + // with alignment, some platforms don't like unaligned code. + union { int force_alignment; byte buffer[4*KB]; } u; // Traverse the list of builtins and generate an adaptor in a // separate code object for each one. for (int i = 0; i < builtin_count; i++) { if (create_heap_objects) { - MacroAssembler masm(isolate, buffer, sizeof buffer); + MacroAssembler masm(isolate, u.buffer, sizeof u.buffer); // Generate the code/adaptor. typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments); Generator g = FUNCTION_CAST<Generator>(functions[i].generator); // We pass all arguments to the generator, but it may not use all of // them. This works because the first arguments are on top of the // stack. + ASSERT(!masm.has_frame()); g(&masm, functions[i].name, functions[i].extra_args); // Move the code into the object heap. CodeDesc desc; diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc index 30a67a661..9241d2658 100644 --- a/deps/v8/src/cached-powers.cc +++ b/deps/v8/src/cached-powers.cc @@ -134,14 +134,12 @@ static const CachedPower kCachedPowers[] = { }; static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers); -static const int kCachedPowersOffset = -kCachedPowers[0].decimal_exponent; +static const int kCachedPowersOffset = 348; // -1 * the first decimal_exponent. static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10) -const int PowersOfTenCache::kDecimalExponentDistance = - kCachedPowers[1].decimal_exponent - kCachedPowers[0].decimal_exponent; -const int PowersOfTenCache::kMinDecimalExponent = - kCachedPowers[0].decimal_exponent; -const int PowersOfTenCache::kMaxDecimalExponent = - kCachedPowers[kCachedPowersLength - 1].decimal_exponent; +// Difference between the decimal exponents in the table above. +const int PowersOfTenCache::kDecimalExponentDistance = 8; +const int PowersOfTenCache::kMinDecimalExponent = -348; +const int PowersOfTenCache::kMaxDecimalExponent = 340; void PowersOfTenCache::GetCachedPowerForBinaryExponentRange( int min_exponent, diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 00da4cba6..4bc2603c5 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -52,11 +52,12 @@ void CodeStub::GenerateCode(MacroAssembler* masm) { // Update the static counter each time a new code stub is generated. masm->isolate()->counters()->code_stubs()->Increment(); - // Nested stubs are not allowed for leafs. - AllowStubCallsScope allow_scope(masm, AllowsStubCalls()); + // Nested stubs are not allowed for leaves. + AllowStubCallsScope allow_scope(masm, false); // Generate the code for the stub. masm->set_generating_stub(true); + NoCurrentFrameScope scope(masm); Generate(masm); } @@ -127,8 +128,10 @@ Handle<Code> CodeStub::GetCode() { GetKey(), new_object); heap->public_set_code_stubs(*dict); - code = *new_object; + Activate(code); + } else { + CHECK(IsPregenerated() == code->is_pregenerated()); } ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code)); @@ -166,7 +169,11 @@ MaybeObject* CodeStub::TryGetCode() { heap->code_stubs()->AtNumberPut(GetKey(), code); if (maybe_new_object->ToObject(&new_object)) { heap->public_set_code_stubs(NumberDictionary::cast(new_object)); + } else if (MustBeInStubCache()) { + return maybe_new_object; } + + Activate(code); } return code; @@ -188,6 +195,11 @@ const char* CodeStub::MajorName(CodeStub::Major major_key, } +void CodeStub::PrintName(StringStream* stream) { + stream->Add("%s", MajorName(MajorKey(), false)); +} + + int ICCompareStub::MinorKey() { return OpField::encode(op_ - Token::EQ) | StateField::encode(state_); } @@ -245,6 +257,7 @@ void InstanceofStub::PrintName(StringStream* stream) { void KeyedLoadElementStub::Generate(MacroAssembler* masm) { switch (elements_kind_) { case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: KeyedLoadStubCompiler::GenerateLoadFastElement(masm); break; case FAST_DOUBLE_ELEMENTS: @@ -274,7 +287,11 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) { void KeyedStoreElementStub::Generate(MacroAssembler* masm) { switch (elements_kind_) { case FAST_ELEMENTS: - KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_); + case FAST_SMI_ONLY_ELEMENTS: { + KeyedStoreStubCompiler::GenerateStoreFastElement(masm, + is_js_array_, + elements_kind_); + } break; case FAST_DOUBLE_ELEMENTS: KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, @@ -302,24 +319,20 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) { void ArgumentsAccessStub::PrintName(StringStream* stream) { - const char* type_name = NULL; // Make g++ happy. + stream->Add("ArgumentsAccessStub_"); switch (type_) { - case READ_ELEMENT: type_name = "ReadElement"; break; - case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break; - case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break; - case NEW_STRICT: type_name = "NewStrict"; break; + case READ_ELEMENT: stream->Add("ReadElement"); break; + case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break; + case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break; + case NEW_STRICT: stream->Add("NewStrict"); break; } - stream->Add("ArgumentsAccessStub_%s", type_name); } void CallFunctionStub::PrintName(StringStream* stream) { - const char* flags_name = NULL; // Make g++ happy. - switch (flags_) { - case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break; - case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break; - } - stream->Add("CallFunctionStub_Args%d%s", argc_, flags_name); + stream->Add("CallFunctionStub_Args%d", argc_); + if (ReceiverMightBeImplicit()) stream->Add("_Implicit"); + if (RecordCallTarget()) stream->Add("_Recording"); } diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 64c89b93d..acfbd469f 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -45,27 +45,23 @@ namespace internal { V(Compare) \ V(CompareIC) \ V(MathPow) \ + V(RecordWrite) \ + V(StoreBufferOverflow) \ + V(RegExpExec) \ V(TranscendentalCache) \ V(Instanceof) \ - /* All stubs above this line only exist in a few versions, which are */ \ - /* generated ahead of time. Therefore compiling a call to one of */ \ - /* them can't cause a new stub to be compiled, so compiling a call to */ \ - /* them is GC safe. The ones below this line exist in many variants */ \ - /* so code compiling a call to one can cause a GC. This means they */ \ - /* can't be called from other stubs, since stub generation code is */ \ - /* not GC safe. */ \ V(ConvertToDouble) \ V(WriteInt32ToHeapNumber) \ V(StackCheck) \ V(FastNewClosure) \ V(FastNewContext) \ + V(FastNewBlockContext) \ V(FastCloneShallowArray) \ V(RevertToNumber) \ V(ToBoolean) \ V(ToNumber) \ V(CounterOp) \ V(ArgumentsAccess) \ - V(RegExpExec) \ V(RegExpConstructResult) \ V(NumberToString) \ V(CEntry) \ @@ -73,7 +69,7 @@ namespace internal { V(KeyedLoadElement) \ V(KeyedStoreElement) \ V(DebuggerStatement) \ - V(StringDictionaryNegativeLookup) + V(StringDictionaryLookup) // List of code stubs only used on ARM platforms. #ifdef V8_TARGET_ARCH_ARM @@ -142,6 +138,27 @@ class CodeStub BASE_EMBEDDED { virtual ~CodeStub() {} + bool CompilingCallsToThisStubIsGCSafe() { + bool is_pregenerated = IsPregenerated(); + Code* code = NULL; + CHECK(!is_pregenerated || FindCodeInCache(&code)); + return is_pregenerated; + } + + // See comment above, where Instanceof is defined. + virtual bool IsPregenerated() { return false; } + + static void GenerateStubsAheadOfTime(); + static void GenerateFPStubs(); + + // Some stubs put untagged junk on the stack that cannot be scanned by the + // GC. This means that we must be statically sure that no GC can occur while + // they are running. If that is the case they should override this to return + // true, which will cause an assertion if we try to call something that can + // GC or if we try to put a stack frame on top of the junk, which would not + // result in a traversable stack. + virtual bool SometimesSetsUpAFrame() { return true; } + protected: static const int kMajorBits = 6; static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits; @@ -164,6 +181,14 @@ class CodeStub BASE_EMBEDDED { // Finish the code object after it has been generated. virtual void FinishCode(Code* code) { } + // Returns true if TryGetCode should fail if it failed + // to register newly generated stub in the stub cache. + virtual bool MustBeInStubCache() { return false; } + + // Activate newly generated stub. Is called after + // registering stub in the stub cache. + virtual void Activate(Code* code) { } + // Returns information for computing the number key. virtual Major MajorKey() = 0; virtual int MinorKey() = 0; @@ -178,9 +203,7 @@ class CodeStub BASE_EMBEDDED { // Returns a name for logging/debugging purposes. SmartArrayPointer<const char> GetName(); - virtual void PrintName(StringStream* stream) { - stream->Add("%s", MajorName(MajorKey(), false)); - } + virtual void PrintName(StringStream* stream); // Returns whether the code generated for this stub needs to be allocated as // a fixed (non-moveable) code object. @@ -193,9 +216,6 @@ class CodeStub BASE_EMBEDDED { MajorKeyBits::encode(MajorKey()); } - // See comment above, where Instanceof is defined. - bool AllowsStubCalls() { return MajorKey() <= Instanceof; } - class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {}; class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {}; @@ -304,7 +324,7 @@ class FastNewContextStub : public CodeStub { static const int kMaximumSlots = 64; explicit FastNewContextStub(int slots) : slots_(slots) { - ASSERT(slots_ > 0 && slots <= kMaximumSlots); + ASSERT(slots_ > 0 && slots_ <= kMaximumSlots); } void Generate(MacroAssembler* masm); @@ -317,6 +337,24 @@ class FastNewContextStub : public CodeStub { }; +class FastNewBlockContextStub : public CodeStub { + public: + static const int kMaximumSlots = 64; + + explicit FastNewBlockContextStub(int slots) : slots_(slots) { + ASSERT(slots_ > 0 && slots_ <= kMaximumSlots); + } + + void Generate(MacroAssembler* masm); + + private: + int slots_; + + Major MajorKey() { return FastNewBlockContext; } + int MinorKey() { return slots_; } +}; + + class FastCloneShallowArrayStub : public CodeStub { public: // Maximum length of copied elements array. @@ -531,11 +569,18 @@ class CompareStub: public CodeStub { class CEntryStub : public CodeStub { public: - explicit CEntryStub(int result_size) - : result_size_(result_size), save_doubles_(false) { } + explicit CEntryStub(int result_size, + SaveFPRegsMode save_doubles = kDontSaveFPRegs) + : result_size_(result_size), save_doubles_(save_doubles) { } void Generate(MacroAssembler* masm); - void SaveDoubles() { save_doubles_ = true; } + + // The version of this stub that doesn't save doubles is generated ahead of + // time, so it's OK to call it from other stubs that can't cope with GC during + // their code generation. On machines that always have gp registers (x64) we + // can generate both variants ahead of time. + virtual bool IsPregenerated(); + static void GenerateAheadOfTime(); private: void GenerateCore(MacroAssembler* masm, @@ -550,7 +595,7 @@ class CEntryStub : public CodeStub { // Number of pointers/values returned. const int result_size_; - bool save_doubles_; + SaveFPRegsMode save_doubles_; Major MajorKey() { return CEntry; } int MinorKey(); @@ -647,10 +692,32 @@ class CallFunctionStub: public CodeStub { void Generate(MacroAssembler* masm); + virtual void FinishCode(Code* code); + + static void Clear(Heap* heap, Address address); + + static Object* GetCachedValue(Address address); + static int ExtractArgcFromMinorKey(int minor_key) { return ArgcBits::decode(minor_key); } + // The object that indicates an uninitialized cache. + static Handle<Object> UninitializedSentinel(Isolate* isolate) { + return isolate->factory()->the_hole_value(); + } + + // A raw version of the uninitialized sentinel that's safe to read during + // garbage collection (e.g., for patching the cache). + static Object* RawUninitializedSentinel(Heap* heap) { + return heap->raw_unchecked_the_hole_value(); + } + + // The object that indicates a megamorphic state. + static Handle<Object> MegamorphicSentinel(Isolate* isolate) { + return isolate->factory()->undefined_value(); + } + private: int argc_; CallFunctionFlags flags_; @@ -658,8 +725,8 @@ class CallFunctionStub: public CodeStub { virtual void PrintName(StringStream* stream); // Minor key encoding in 32 bits with Bitfield <Type, shift, size>. - class FlagBits: public BitField<CallFunctionFlags, 0, 1> {}; - class ArgcBits: public BitField<unsigned, 1, 32 - 1> {}; + class FlagBits: public BitField<CallFunctionFlags, 0, 2> {}; + class ArgcBits: public BitField<unsigned, 2, 32 - 2> {}; Major MajorKey() { return CallFunction; } int MinorKey() { @@ -670,6 +737,10 @@ class CallFunctionStub: public CodeStub { bool ReceiverMightBeImplicit() { return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0; } + + bool RecordCallTarget() { + return (flags_ & RECORD_CALL_TARGET) != 0; + } }; @@ -934,6 +1005,8 @@ class ToBooleanStub: public CodeStub { virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; } virtual void PrintName(StringStream* stream); + virtual bool SometimesSetsUpAFrame() { return false; } + private: Major MajorKey() { return ToBoolean; } int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); } diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index cdc9ba155..ceea7b9fe 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -218,8 +218,8 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) { int CEntryStub::MinorKey() { + int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0; ASSERT(result_size_ == 1 || result_size_ == 2); - int result = save_doubles_ ? 1 : 0; #ifdef _WIN64 return result | ((result_size_ == 1) ? 0 : 2); #else diff --git a/deps/v8/src/compiler-intrinsics.h b/deps/v8/src/compiler-intrinsics.h new file mode 100644 index 000000000..3b9c59ea5 --- /dev/null +++ b/deps/v8/src/compiler-intrinsics.h @@ -0,0 +1,77 @@ +// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_COMPILER_INTRINSICS_H_ +#define V8_COMPILER_INTRINSICS_H_ + +namespace v8 { +namespace internal { + +class CompilerIntrinsics { + public: + // Returns number of zero bits preceding least significant 1 bit. + // Undefined for zero value. + INLINE(static int CountTrailingZeros(uint32_t value)); + + // Returns number of zero bits following most significant 1 bit. + // Undefined for zero value. + INLINE(static int CountLeadingZeros(uint32_t value)); +}; + +#ifdef __GNUC__ +int CompilerIntrinsics::CountTrailingZeros(uint32_t value) { + return __builtin_ctz(value); +} + +int CompilerIntrinsics::CountLeadingZeros(uint32_t value) { + return __builtin_clz(value); +} + +#elif defined(_MSC_VER) + +#pragma intrinsic(_BitScanForward) +#pragma intrinsic(_BitScanReverse) + +int CompilerIntrinsics::CountTrailingZeros(uint32_t value) { + unsigned long result; //NOLINT + _BitScanForward(&result, static_cast<long>(value)); //NOLINT + return static_cast<int>(result); +} + +int CompilerIntrinsics::CountLeadingZeros(uint32_t value) { + unsigned long result; //NOLINT + _BitScanReverse(&result, static_cast<long>(value)); //NOLINT + return 31 - static_cast<int>(result); +} + +#else +#error Unsupported compiler +#endif + +} } // namespace v8::internal + +#endif // V8_COMPILER_INTRINSICS_H_ diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index ba6bb42bf..4979a7f86 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -36,6 +36,7 @@ #include "full-codegen.h" #include "gdb-jit.h" #include "hydrogen.h" +#include "isolate-inl.h" #include "lithium.h" #include "liveedit.h" #include "parser.h" @@ -275,7 +276,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { } Handle<Context> global_context(info->closure()->context()->global_context()); - TypeFeedbackOracle oracle(code, global_context); + TypeFeedbackOracle oracle(code, global_context, info->isolate()); HGraphBuilder builder(info, &oracle); HPhase phase(HPhase::kTotal); HGraph* graph = builder.CreateGraph(); @@ -479,8 +480,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source, // that would be compiled lazily anyway, so we skip the preparse step // in that case too. ScriptDataImpl* pre_data = input_pre_data; - bool harmony_block_scoping = natives != NATIVES_CODE && - FLAG_harmony_block_scoping; + bool harmony_scoping = natives != NATIVES_CODE && FLAG_harmony_scoping; if (pre_data == NULL && source_length >= FLAG_min_preparse_length) { if (source->IsExternalTwoByteString()) { @@ -488,12 +488,12 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source, Handle<ExternalTwoByteString>::cast(source), 0, source->length()); pre_data = ParserApi::PartialPreParse(&stream, extension, - harmony_block_scoping); + harmony_scoping); } else { GenericStringUC16CharacterStream stream(source, 0, source->length()); pre_data = ParserApi::PartialPreParse(&stream, extension, - harmony_block_scoping); + harmony_scoping); } } diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc index 4f93abdff..0cda43049 100644 --- a/deps/v8/src/contexts.cc +++ b/deps/v8/src/contexts.cc @@ -86,14 +86,14 @@ void Context::set_global_proxy(JSObject* object) { Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags, - int* index_, + int* index, PropertyAttributes* attributes, BindingFlags* binding_flags) { Isolate* isolate = GetIsolate(); Handle<Context> context(this, isolate); bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0; - *index_ = -1; + *index = -1; *attributes = ABSENT; *binding_flags = MISSING_BINDING; @@ -110,70 +110,50 @@ Handle<Object> Context::Lookup(Handle<String> name, PrintF("\n"); } - // Check extension/with/global object. - if (!context->IsBlockContext() && context->has_extension()) { - if (context->IsCatchContext()) { - // Catch contexts have the variable name in the extension slot. - if (name->Equals(String::cast(context->extension()))) { - if (FLAG_trace_contexts) { - PrintF("=> found in catch context\n"); - } - *index_ = Context::THROWN_OBJECT_INDEX; - *attributes = NONE; - *binding_flags = MUTABLE_IS_INITIALIZED; - return context; - } + // 1. Check global objects, subjects of with, and extension objects. + if (context->IsGlobalContext() || + context->IsWithContext() || + (context->IsFunctionContext() && context->has_extension())) { + Handle<JSObject> object(JSObject::cast(context->extension()), isolate); + // Context extension objects needs to behave as if they have no + // prototype. So even if we want to follow prototype chains, we need + // to only do a local lookup for context extension objects. + if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 || + object->IsJSContextExtensionObject()) { + *attributes = object->GetLocalPropertyAttribute(*name); } else { - ASSERT(context->IsGlobalContext() || - context->IsFunctionContext() || - context->IsWithContext()); - // Global, function, and with contexts may have an object in the - // extension slot. - Handle<JSObject> extension(JSObject::cast(context->extension()), - isolate); - // Context extension objects needs to behave as if they have no - // prototype. So even if we want to follow prototype chains, we - // need to only do a local lookup for context extension objects. - if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 || - extension->IsJSContextExtensionObject()) { - *attributes = extension->GetLocalPropertyAttribute(*name); - } else { - *attributes = extension->GetPropertyAttribute(*name); - } - if (*attributes != ABSENT) { - // property found - if (FLAG_trace_contexts) { - PrintF("=> found property in context object %p\n", - reinterpret_cast<void*>(*extension)); - } - return extension; + *attributes = object->GetPropertyAttribute(*name); + } + if (*attributes != ABSENT) { + if (FLAG_trace_contexts) { + PrintF("=> found property in context object %p\n", + reinterpret_cast<void*>(*object)); } + return object; } } - // Check serialized scope information of functions and blocks. Only - // functions can have parameters, and a function name. + // 2. Check the context proper if it has slots. if (context->IsFunctionContext() || context->IsBlockContext()) { - // We may have context-local slots. Check locals in the context. + // Use serialized scope information of functions and blocks to search + // for the context index. Handle<SerializedScopeInfo> scope_info; if (context->IsFunctionContext()) { scope_info = Handle<SerializedScopeInfo>( context->closure()->shared()->scope_info(), isolate); } else { - ASSERT(context->IsBlockContext()); scope_info = Handle<SerializedScopeInfo>( SerializedScopeInfo::cast(context->extension()), isolate); } - - Variable::Mode mode; - int index = scope_info->ContextSlotIndex(*name, &mode); - ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS); - if (index >= 0) { + VariableMode mode; + int slot_index = scope_info->ContextSlotIndex(*name, &mode); + ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS); + if (slot_index >= 0) { if (FLAG_trace_contexts) { PrintF("=> found local in context slot %d (mode = %d)\n", - index, mode); + slot_index, mode); } - *index_ = index; + *index = slot_index; // Note: Fixed context slots are statically allocated by the compiler. // Statically allocated variables always have a statically known mode, // which is the mode with which they were declared when added to the @@ -181,23 +161,23 @@ Handle<Object> Context::Lookup(Handle<String> name, // declared variables that were introduced through declaration nodes) // must not appear here. switch (mode) { - case Variable::INTERNAL: // Fall through. - case Variable::VAR: + case INTERNAL: // Fall through. + case VAR: *attributes = NONE; *binding_flags = MUTABLE_IS_INITIALIZED; break; - case Variable::LET: + case LET: *attributes = NONE; *binding_flags = MUTABLE_CHECK_INITIALIZED; break; - case Variable::CONST: + case CONST: *attributes = READ_ONLY; *binding_flags = IMMUTABLE_CHECK_INITIALIZED; break; - case Variable::DYNAMIC: - case Variable::DYNAMIC_GLOBAL: - case Variable::DYNAMIC_LOCAL: - case Variable::TEMPORARY: + case DYNAMIC: + case DYNAMIC_GLOBAL: + case DYNAMIC_LOCAL: + case TEMPORARY: UNREACHABLE(); break; } @@ -206,22 +186,34 @@ Handle<Object> Context::Lookup(Handle<String> name, // Check the slot corresponding to the intermediate context holding // only the function name variable. - if (follow_context_chain) { - int index = scope_info->FunctionContextSlotIndex(*name); - if (index >= 0) { + if (follow_context_chain && context->IsFunctionContext()) { + int function_index = scope_info->FunctionContextSlotIndex(*name); + if (function_index >= 0) { if (FLAG_trace_contexts) { PrintF("=> found intermediate function in context slot %d\n", - index); + function_index); } - *index_ = index; + *index = function_index; *attributes = READ_ONLY; *binding_flags = IMMUTABLE_IS_INITIALIZED; return context; } } + + } else if (context->IsCatchContext()) { + // Catch contexts have the variable name in the extension slot. + if (name->Equals(String::cast(context->extension()))) { + if (FLAG_trace_contexts) { + PrintF("=> found in catch context\n"); + } + *index = Context::THROWN_OBJECT_INDEX; + *attributes = NONE; + *binding_flags = MUTABLE_IS_INITIALIZED; + return context; + } } - // Proceed with the previous context. + // 3. Prepare to continue with the previous (next outermost) context. if (context->IsGlobalContext()) { follow_context_chain = false; } else { @@ -253,7 +245,7 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) { // Check non-parameter locals. Handle<SerializedScopeInfo> scope_info( context->closure()->shared()->scope_info()); - Variable::Mode mode; + VariableMode mode; int index = scope_info->ContextSlotIndex(*name, &mode); ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS); if (index >= 0) return false; diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index 505f86c8c..b80475f0f 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -134,6 +134,8 @@ enum BindingFlags { V(MAP_CACHE_INDEX, Object, map_cache) \ V(CONTEXT_DATA_INDEX, Object, data) \ V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \ + V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \ + to_complete_property_descriptor) \ V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \ V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \ V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) @@ -252,6 +254,7 @@ class Context: public FixedArray { OUT_OF_MEMORY_INDEX, CONTEXT_DATA_INDEX, ALLOW_CODE_GEN_FROM_STRINGS_INDEX, + TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, DERIVED_HAS_TRAP_INDEX, DERIVED_GET_TRAP_INDEX, DERIVED_SET_TRAP_INDEX, @@ -330,12 +333,6 @@ class Context: public FixedArray { // Mark the global context with out of memory. inline void mark_out_of_memory(); - // The exception holder is the object used as a with object in - // the implementation of a catch block. - bool is_exception_holder(Object* object) { - return IsCatchContext() && extension() == object; - } - // A global context hold a list of all functions which have been optimized. void AddOptimizedFunction(JSFunction* function); void RemoveOptimizedFunction(JSFunction* function); @@ -355,29 +352,25 @@ class Context: public FixedArray { #undef GLOBAL_CONTEXT_FIELD_ACCESSORS // Lookup the the slot called name, starting with the current context. - // There are 4 possible outcomes: - // - // 1) index_ >= 0 && result->IsContext(): - // most common case, the result is a Context, and index is the - // context slot index, and the slot exists. - // attributes == READ_ONLY for the function name variable, NONE otherwise. + // There are three possibilities: // - // 2) index_ >= 0 && result->IsJSObject(): - // the result is the JSObject arguments object, the index is the parameter - // index, i.e., key into the arguments object, and the property exists. - // attributes != ABSENT. + // 1) result->IsContext(): + // The binding was found in a context. *index is always the + // non-negative slot index. *attributes is NONE for var and let + // declarations, READ_ONLY for const declarations (never ABSENT). // - // 3) index_ < 0 && result->IsJSObject(): - // the result is the JSObject extension context or the global object, - // and the name is the property name, and the property exists. - // attributes != ABSENT. + // 2) result->IsJSObject(): + // The binding was found as a named property in a context extension + // object (i.e., was introduced via eval), as a property on the subject + // of with, or as a property of the global object. *index is -1 and + // *attributes is not ABSENT. // - // 4) index_ < 0 && result.is_null(): - // there was no context found with the corresponding property. - // attributes == ABSENT. + // 3) result.is_null(): + // There was no binding found, *index is always -1 and *attributes is + // always ABSENT. Handle<Object> Lookup(Handle<String> name, ContextLookupFlags flags, - int* index_, + int* index, PropertyAttributes* attributes, BindingFlags* binding_flags); diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h index 41cf0d54c..8bc11bf83 100644 --- a/deps/v8/src/conversions-inl.h +++ b/deps/v8/src/conversions-inl.h @@ -47,7 +47,7 @@ namespace v8 { namespace internal { static inline double JunkStringValue() { - return std::numeric_limits<double>::quiet_NaN(); + return BitCast<double, uint64_t>(kQuietNaNMask); } diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h index e51ad6501..31aaf6b73 100644 --- a/deps/v8/src/conversions.h +++ b/deps/v8/src/conversions.h @@ -28,8 +28,6 @@ #ifndef V8_CONVERSIONS_H_ #define V8_CONVERSIONS_H_ -#include <limits> - #include "utils.h" namespace v8 { diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index 65490285e..d74c034ac 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -551,12 +551,12 @@ void CpuProfiler::StopProcessor() { sampler->Stop(); need_to_stop_sampler_ = false; } + NoBarrier_Store(&is_profiling_, false); processor_->Stop(); processor_->Join(); delete processor_; delete generator_; processor_ = NULL; - NoBarrier_Store(&is_profiling_, false); generator_ = NULL; logger->logging_nesting_ = saved_logging_nesting_; } diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc index adefba732..8fbc876da 100644 --- a/deps/v8/src/d8-debug.cc +++ b/deps/v8/src/d8-debug.cc @@ -1,4 +1,4 @@ -// Copyright 2008 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -25,6 +25,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#ifdef ENABLE_DEBUGGER_SUPPORT #include "d8.h" #include "d8-debug.h" @@ -367,3 +368,5 @@ void KeyboardThread::Run() { } // namespace v8 + +#endif // ENABLE_DEBUGGER_SUPPORT diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index 55f0d4c2a..a516576fa 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -146,11 +146,11 @@ bool Shell::ExecuteString(Handle<String> source, Handle<Value> name, bool print_result, bool report_exceptions) { -#ifndef V8_SHARED +#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) bool FLAG_debugger = i::FLAG_debugger; #else bool FLAG_debugger = false; -#endif // V8_SHARED +#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT HandleScope handle_scope; TryCatch try_catch; options.script_executed = true; @@ -594,6 +594,7 @@ void Shell::InstallUtilityScript() { Context::Scope utility_scope(utility_context_); #ifdef ENABLE_DEBUGGER_SUPPORT + if (i::FLAG_debugger) printf("JavaScript debugger enabled\n"); // Install the debugger object in the utility scope i::Debug* debug = i::Isolate::Current()->debug(); debug->Load(); @@ -816,7 +817,7 @@ void Shell::OnExit() { static FILE* FOpen(const char* path, const char* mode) { -#if (defined(_WIN32) || defined(_WIN64)) +#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64)) FILE* result; if (fopen_s(&result, path, mode) == 0) { return result; @@ -900,9 +901,6 @@ void Shell::RunShell() { #ifndef V8_SHARED console = LineEditor::Get(); printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name()); - if (i::FLAG_debugger) { - printf("JavaScript debugger enabled\n"); - } console->Open(); while (true) { i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt); @@ -1253,14 +1251,22 @@ int Shell::RunMain(int argc, char* argv[]) { Locker lock; HandleScope scope; Persistent<Context> context = CreateEvaluationContext(); + if (options.last_run) { + // Keep using the same context in the interactive shell. + evaluation_context_ = context; +#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) + // If the interactive debugger is enabled make sure to activate + // it before running the files passed on the command line. + if (i::FLAG_debugger) { + InstallUtilityScript(); + } +#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT + } { Context::Scope cscope(context); options.isolate_sources[0].Execute(); } - if (options.last_run) { - // Keep using the same context in the interactive shell - evaluation_context_ = context; - } else { + if (!options.last_run) { context.Dispose(); } @@ -1331,9 +1337,11 @@ int Shell::Main(int argc, char* argv[]) { if (( options.interactive_shell || !options.script_executed ) && !options.test_shell ) { -#ifndef V8_SHARED - InstallUtilityScript(); -#endif // V8_SHARED +#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) + if (!i::FLAG_debugger) { + InstallUtilityScript(); + } +#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT RunShell(); } diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index a229d39c3..3d79485b5 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -40,6 +40,7 @@ #include "global-handles.h" #include "ic.h" #include "ic-inl.h" +#include "isolate-inl.h" #include "list.h" #include "messages.h" #include "natives.h" @@ -401,15 +402,15 @@ void BreakLocationIterator::PrepareStepIn() { // Step in can only be prepared if currently positioned on an IC call, // construct call or CallFunction stub call. Address target = rinfo()->target_address(); - Handle<Code> code(Code::GetCodeFromTargetAddress(target)); - if (code->is_call_stub() || code->is_keyed_call_stub()) { + Handle<Code> target_code(Code::GetCodeFromTargetAddress(target)); + if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) { // Step in through IC call is handled by the runtime system. Therefore make // sure that the any current IC is cleared and the runtime system is // called. If the executing code has a debug break at the location change // the call in the original code as it is the code there that will be // executed in place of the debug break call. - Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count(), - code->kind()); + Handle<Code> stub = ComputeCallDebugPrepareStepIn( + target_code->arguments_count(), target_code->kind()); if (IsDebugBreak()) { original_rinfo()->set_target_address(stub->entry()); } else { @@ -419,7 +420,7 @@ void BreakLocationIterator::PrepareStepIn() { #ifdef DEBUG // All the following stuff is needed only for assertion checks so the code // is wrapped in ifdef. - Handle<Code> maybe_call_function_stub = code; + Handle<Code> maybe_call_function_stub = target_code; if (IsDebugBreak()) { Address original_target = original_rinfo()->target_address(); maybe_call_function_stub = @@ -436,8 +437,9 @@ void BreakLocationIterator::PrepareStepIn() { // Step in through CallFunction stub should also be prepared by caller of // this function (Debug::PrepareStep) which should flood target function // with breakpoints. - ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub() - || is_call_function_stub); + ASSERT(RelocInfo::IsConstructCall(rmode()) || + target_code->is_inline_cache_stub() || + is_call_function_stub); #endif } } @@ -474,11 +476,11 @@ void BreakLocationIterator::SetDebugBreakAtIC() { RelocInfo::Mode mode = rmode(); if (RelocInfo::IsCodeTarget(mode)) { Address target = rinfo()->target_address(); - Handle<Code> code(Code::GetCodeFromTargetAddress(target)); + Handle<Code> target_code(Code::GetCodeFromTargetAddress(target)); // Patch the code to invoke the builtin debug break function matching the // calling convention used by the call site. - Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode)); + Handle<Code> dbgbrk_code(Debug::FindDebugBreak(target_code, mode)); rinfo()->set_target_address(dbgbrk_code->entry()); } } @@ -772,7 +774,7 @@ bool Debug::CompileDebuggerScript(int index) { // Execute the shared function in the debugger context. Handle<Context> context = isolate->global_context(); - bool caught_exception = false; + bool caught_exception; Handle<JSFunction> function = factory->NewFunctionFromSharedFunctionInfo(function_info, context); @@ -1103,14 +1105,13 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) { Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id()); // Call HandleBreakPointx. - bool caught_exception = false; - const int argc = 2; - Object** argv[argc] = { - break_id.location(), - reinterpret_cast<Object**>(break_point_object.location()) - }; + bool caught_exception; + Handle<Object> argv[] = { break_id, break_point_object }; Handle<Object> result = Execution::TryCall(check_break_point, - isolate_->js_builtins_object(), argc, argv, &caught_exception); + isolate_->js_builtins_object(), + ARRAY_SIZE(argv), + argv, + &caught_exception); // If exception or non boolean result handle as not triggered if (caught_exception || !result->IsBoolean()) { @@ -1732,6 +1733,10 @@ void Debug::PrepareForBreakPoints() { if (!has_break_points_) { Deoptimizer::DeoptimizeAll(); + // We are going to iterate heap to find all functions without + // debug break slots. + isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask); + AssertNoAllocation no_allocation; Builtins* builtins = isolate_->builtins(); Code* lazy_compile = builtins->builtin(Builtins::kLazyCompile); @@ -1997,9 +2002,10 @@ void Debug::CreateScriptCache() { // Perform two GCs to get rid of all unreferenced scripts. The first GC gets // rid of all the cached script wrappers and the second gets rid of the - // scripts which are no longer referenced. - heap->CollectAllGarbage(false); - heap->CollectAllGarbage(false); + // scripts which are no longer referenced. The second also sweeps precisely, + // which saves us doing yet another GC to make the heap iterable. + heap->CollectAllGarbage(Heap::kNoGCFlags); + heap->CollectAllGarbage(Heap::kMakeHeapIterableMask); ASSERT(script_cache_ == NULL); script_cache_ = new ScriptCache(); @@ -2007,6 +2013,8 @@ void Debug::CreateScriptCache() { // Scan heap for Script objects. int count = 0; HeapIterator iterator; + AssertNoAllocation no_allocation; + for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (obj->IsScript() && Script::cast(obj)->HasValidSource()) { script_cache_->Add(Handle<Script>(Script::cast(obj))); @@ -2047,7 +2055,7 @@ Handle<FixedArray> Debug::GetLoadedScripts() { // Perform GC to get unreferenced scripts evicted from the cache before // returning the content. - isolate_->heap()->CollectAllGarbage(false); + isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags); // Get the scripts from the cache. return script_cache_->GetScripts(); @@ -2093,7 +2101,8 @@ Debugger::~Debugger() { Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name, - int argc, Object*** argv, + int argc, + Handle<Object> argv[], bool* caught_exception) { ASSERT(isolate_->context() == *isolate_->debug()->debug_context()); @@ -2110,7 +2119,9 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name, Handle<Object> js_object = Execution::TryCall( Handle<JSFunction>::cast(constructor), Handle<JSObject>(isolate_->debug()->debug_context()->global()), - argc, argv, caught_exception); + argc, + argv, + caught_exception); return js_object; } @@ -2119,10 +2130,11 @@ Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) { // Create the execution state object. Handle<Object> break_id = isolate_->factory()->NewNumberFromInt( isolate_->debug()->break_id()); - const int argc = 1; - Object** argv[argc] = { break_id.location() }; + Handle<Object> argv[] = { break_id }; return MakeJSObject(CStrVector("MakeExecutionState"), - argc, argv, caught_exception); + ARRAY_SIZE(argv), + argv, + caught_exception); } @@ -2130,11 +2142,9 @@ Handle<Object> Debugger::MakeBreakEvent(Handle<Object> exec_state, Handle<Object> break_points_hit, bool* caught_exception) { // Create the new break event object. - const int argc = 2; - Object** argv[argc] = { exec_state.location(), - break_points_hit.location() }; + Handle<Object> argv[] = { exec_state, break_points_hit }; return MakeJSObject(CStrVector("MakeBreakEvent"), - argc, + ARRAY_SIZE(argv), argv, caught_exception); } @@ -2146,23 +2156,24 @@ Handle<Object> Debugger::MakeExceptionEvent(Handle<Object> exec_state, bool* caught_exception) { Factory* factory = isolate_->factory(); // Create the new exception event object. - const int argc = 3; - Object** argv[argc] = { exec_state.location(), - exception.location(), - uncaught ? factory->true_value().location() : - factory->false_value().location()}; + Handle<Object> argv[] = { exec_state, + exception, + factory->ToBoolean(uncaught) }; return MakeJSObject(CStrVector("MakeExceptionEvent"), - argc, argv, caught_exception); + ARRAY_SIZE(argv), + argv, + caught_exception); } Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function, bool* caught_exception) { // Create the new function event object. - const int argc = 1; - Object** argv[argc] = { function.location() }; + Handle<Object> argv[] = { function }; return MakeJSObject(CStrVector("MakeNewFunctionEvent"), - argc, argv, caught_exception); + ARRAY_SIZE(argv), + argv, + caught_exception); } @@ -2173,14 +2184,11 @@ Handle<Object> Debugger::MakeCompileEvent(Handle<Script> script, // Create the compile event object. Handle<Object> exec_state = MakeExecutionState(caught_exception); Handle<Object> script_wrapper = GetScriptWrapper(script); - const int argc = 3; - Object** argv[argc] = { exec_state.location(), - script_wrapper.location(), - before ? factory->true_value().location() : - factory->false_value().location() }; - + Handle<Object> argv[] = { exec_state, + script_wrapper, + factory->ToBoolean(before) }; return MakeJSObject(CStrVector("MakeCompileEvent"), - argc, + ARRAY_SIZE(argv), argv, caught_exception); } @@ -2191,11 +2199,10 @@ Handle<Object> Debugger::MakeScriptCollectedEvent(int id, // Create the script collected event object. Handle<Object> exec_state = MakeExecutionState(caught_exception); Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id)); - const int argc = 2; - Object** argv[argc] = { exec_state.location(), id_object.location() }; + Handle<Object> argv[] = { exec_state, id_object }; return MakeJSObject(CStrVector("MakeScriptCollectedEvent"), - argc, + ARRAY_SIZE(argv), argv, caught_exception); } @@ -2345,12 +2352,13 @@ void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSValue> wrapper = GetScriptWrapper(script); // Call UpdateScriptBreakPoints expect no exceptions. - bool caught_exception = false; - const int argc = 1; - Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) }; + bool caught_exception; + Handle<Object> argv[] = { wrapper }; Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points), - Isolate::Current()->js_builtins_object(), argc, argv, - &caught_exception); + Isolate::Current()->js_builtins_object(), + ARRAY_SIZE(argv), + argv, + &caught_exception); if (caught_exception) { return; } @@ -2481,13 +2489,16 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event, Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_)); // Invoke the JavaScript debug event listener. - const int argc = 4; - Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(), - exec_state.location(), - Handle<Object>::cast(event_data).location(), - event_listener_data_.location() }; - bool caught_exception = false; - Execution::TryCall(fun, isolate_->global(), argc, argv, &caught_exception); + Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event)), + exec_state, + event_data, + event_listener_data_ }; + bool caught_exception; + Execution::TryCall(fun, + isolate_->global(), + ARRAY_SIZE(argv), + argv, + &caught_exception); // Silently ignore exceptions from debug event listeners. } @@ -2856,12 +2867,11 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun, return isolate_->factory()->undefined_value(); } - static const int kArgc = 2; - Object** argv[kArgc] = { exec_state.location(), data.location() }; + Handle<Object> argv[] = { exec_state, data }; Handle<Object> result = Execution::Call( fun, Handle<Object>(isolate_->debug()->debug_context_->global_proxy()), - kArgc, + ARRAY_SIZE(argv), argv, pending_exception); return result; @@ -2929,6 +2939,94 @@ void Debugger::CallMessageDispatchHandler() { } +EnterDebugger::EnterDebugger() + : isolate_(Isolate::Current()), + prev_(isolate_->debug()->debugger_entry()), + it_(isolate_), + has_js_frames_(!it_.done()), + save_(isolate_) { + Debug* debug = isolate_->debug(); + ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT)); + ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK)); + + // Link recursive debugger entry. + debug->set_debugger_entry(this); + + // Store the previous break id and frame id. + break_id_ = debug->break_id(); + break_frame_id_ = debug->break_frame_id(); + + // Create the new break info. If there is no JavaScript frames there is no + // break frame id. + if (has_js_frames_) { + debug->NewBreak(it_.frame()->id()); + } else { + debug->NewBreak(StackFrame::NO_ID); + } + + // Make sure that debugger is loaded and enter the debugger context. + load_failed_ = !debug->Load(); + if (!load_failed_) { + // NOTE the member variable save which saves the previous context before + // this change. + isolate_->set_context(*debug->debug_context()); + } +} + + +EnterDebugger::~EnterDebugger() { + ASSERT(Isolate::Current() == isolate_); + Debug* debug = isolate_->debug(); + + // Restore to the previous break state. + debug->SetBreak(break_frame_id_, break_id_); + + // Check for leaving the debugger. + if (prev_ == NULL) { + // Clear mirror cache when leaving the debugger. Skip this if there is a + // pending exception as clearing the mirror cache calls back into + // JavaScript. This can happen if the v8::Debug::Call is used in which + // case the exception should end up in the calling code. + if (!isolate_->has_pending_exception()) { + // Try to avoid any pending debug break breaking in the clear mirror + // cache JavaScript code. + if (isolate_->stack_guard()->IsDebugBreak()) { + debug->set_interrupts_pending(DEBUGBREAK); + isolate_->stack_guard()->Continue(DEBUGBREAK); + } + debug->ClearMirrorCache(); + } + + // Request preemption and debug break when leaving the last debugger entry + // if any of these where recorded while debugging. + if (debug->is_interrupt_pending(PREEMPT)) { + // This re-scheduling of preemption is to avoid starvation in some + // debugging scenarios. + debug->clear_interrupt_pending(PREEMPT); + isolate_->stack_guard()->Preempt(); + } + if (debug->is_interrupt_pending(DEBUGBREAK)) { + debug->clear_interrupt_pending(DEBUGBREAK); + isolate_->stack_guard()->DebugBreak(); + } + + // If there are commands in the queue when leaving the debugger request + // that these commands are processed. + if (isolate_->debugger()->HasCommands()) { + isolate_->stack_guard()->DebugCommand(); + } + + // If leaving the debugger with the debugger no longer active unload it. + if (!isolate_->debugger()->IsDebuggerActive()) { + isolate_->debugger()->UnloadDebugger(); + } + } + + // Leaving this debugger entry. + debug->set_debugger_entry(prev_); +} + + MessageImpl MessageImpl::NewEvent(DebugEvent event, bool running, Handle<JSObject> exec_state, diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h index a098040c0..f01ef393f 100644 --- a/deps/v8/src/debug.h +++ b/deps/v8/src/debug.h @@ -705,7 +705,8 @@ class Debugger { void DebugRequest(const uint16_t* json_request, int length); Handle<Object> MakeJSObject(Vector<const char> constructor_name, - int argc, Object*** argv, + int argc, + Handle<Object> argv[], bool* caught_exception); Handle<Object> MakeExecutionState(bool* caught_exception); Handle<Object> MakeBreakEvent(Handle<Object> exec_state, @@ -869,91 +870,8 @@ class Debugger { // some reason could not be entered FailedToEnter will return true. class EnterDebugger BASE_EMBEDDED { public: - EnterDebugger() - : isolate_(Isolate::Current()), - prev_(isolate_->debug()->debugger_entry()), - it_(isolate_), - has_js_frames_(!it_.done()), - save_(isolate_) { - Debug* debug = isolate_->debug(); - ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT)); - ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK)); - - // Link recursive debugger entry. - debug->set_debugger_entry(this); - - // Store the previous break id and frame id. - break_id_ = debug->break_id(); - break_frame_id_ = debug->break_frame_id(); - - // Create the new break info. If there is no JavaScript frames there is no - // break frame id. - if (has_js_frames_) { - debug->NewBreak(it_.frame()->id()); - } else { - debug->NewBreak(StackFrame::NO_ID); - } - - // Make sure that debugger is loaded and enter the debugger context. - load_failed_ = !debug->Load(); - if (!load_failed_) { - // NOTE the member variable save which saves the previous context before - // this change. - isolate_->set_context(*debug->debug_context()); - } - } - - ~EnterDebugger() { - ASSERT(Isolate::Current() == isolate_); - Debug* debug = isolate_->debug(); - - // Restore to the previous break state. - debug->SetBreak(break_frame_id_, break_id_); - - // Check for leaving the debugger. - if (prev_ == NULL) { - // Clear mirror cache when leaving the debugger. Skip this if there is a - // pending exception as clearing the mirror cache calls back into - // JavaScript. This can happen if the v8::Debug::Call is used in which - // case the exception should end up in the calling code. - if (!isolate_->has_pending_exception()) { - // Try to avoid any pending debug break breaking in the clear mirror - // cache JavaScript code. - if (isolate_->stack_guard()->IsDebugBreak()) { - debug->set_interrupts_pending(DEBUGBREAK); - isolate_->stack_guard()->Continue(DEBUGBREAK); - } - debug->ClearMirrorCache(); - } - - // Request preemption and debug break when leaving the last debugger entry - // if any of these where recorded while debugging. - if (debug->is_interrupt_pending(PREEMPT)) { - // This re-scheduling of preemption is to avoid starvation in some - // debugging scenarios. - debug->clear_interrupt_pending(PREEMPT); - isolate_->stack_guard()->Preempt(); - } - if (debug->is_interrupt_pending(DEBUGBREAK)) { - debug->clear_interrupt_pending(DEBUGBREAK); - isolate_->stack_guard()->DebugBreak(); - } - - // If there are commands in the queue when leaving the debugger request - // that these commands are processed. - if (isolate_->debugger()->HasCommands()) { - isolate_->stack_guard()->DebugCommand(); - } - - // If leaving the debugger with the debugger no longer active unload it. - if (!isolate_->debugger()->IsDebuggerActive()) { - isolate_->debugger()->UnloadDebugger(); - } - } - - // Leaving this debugger entry. - debug->set_debugger_entry(prev_); - } + EnterDebugger(); + ~EnterDebugger(); // Check whether the debugger could be entered. inline bool FailedToEnter() { return load_failed_; } diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc index 5feb73d73..b0522757e 100644 --- a/deps/v8/src/deoptimizer.cc +++ b/deps/v8/src/deoptimizer.cc @@ -52,11 +52,13 @@ DeoptimizerData::DeoptimizerData() { DeoptimizerData::~DeoptimizerData() { if (eager_deoptimization_entry_code_ != NULL) { - eager_deoptimization_entry_code_->Free(EXECUTABLE); + Isolate::Current()->memory_allocator()->Free( + eager_deoptimization_entry_code_); eager_deoptimization_entry_code_ = NULL; } if (lazy_deoptimization_entry_code_ != NULL) { - lazy_deoptimization_entry_code_->Free(EXECUTABLE); + Isolate::Current()->memory_allocator()->Free( + lazy_deoptimization_entry_code_); lazy_deoptimization_entry_code_ = NULL; } } @@ -71,6 +73,8 @@ void DeoptimizerData::Iterate(ObjectVisitor* v) { #endif +// We rely on this function not causing a GC. It is called from generated code +// without having a real stack frame in place. Deoptimizer* Deoptimizer::New(JSFunction* function, BailoutType type, unsigned bailout_id, @@ -319,6 +323,8 @@ Deoptimizer::Deoptimizer(Isolate* isolate, input_(NULL), output_count_(0), output_(NULL), + frame_alignment_marker_(isolate->heap()->frame_alignment_marker()), + has_alignment_padding_(0), deferred_heap_numbers_(0) { if (FLAG_trace_deopt && type != OSR) { if (type == DEBUGGER) { @@ -343,6 +349,26 @@ Deoptimizer::Deoptimizer(Isolate* isolate, if (type == EAGER) { ASSERT(from == NULL); optimized_code_ = function_->code(); + if (FLAG_trace_deopt && FLAG_code_comments) { + // Print instruction associated with this bailout. + const char* last_comment = NULL; + int mask = RelocInfo::ModeMask(RelocInfo::COMMENT) + | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY); + for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) { + RelocInfo* info = it.rinfo(); + if (info->rmode() == RelocInfo::COMMENT) { + last_comment = reinterpret_cast<const char*>(info->data()); + } + if (info->rmode() == RelocInfo::RUNTIME_ENTRY) { + unsigned id = Deoptimizer::GetDeoptimizationId( + info->target_address(), Deoptimizer::EAGER); + if (id == bailout_id && last_comment != NULL) { + PrintF(" %s\n", last_comment); + break; + } + } + } + } } else if (type == LAZY) { optimized_code_ = FindDeoptimizingCodeFromAddress(from); ASSERT(optimized_code_ != NULL); @@ -386,7 +412,7 @@ void Deoptimizer::DeleteFrameDescriptions() { Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) { ASSERT(id >= 0); if (id >= kNumberOfEntries) return NULL; - LargeObjectChunk* base = NULL; + MemoryChunk* base = NULL; DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); if (type == EAGER) { if (data->eager_deoptimization_entry_code_ == NULL) { @@ -400,12 +426,12 @@ Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) { base = data->lazy_deoptimization_entry_code_; } return - static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_); + static_cast<Address>(base->body()) + (id * table_entry_size_); } int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) { - LargeObjectChunk* base = NULL; + MemoryChunk* base = NULL; DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); if (type == EAGER) { base = data->eager_deoptimization_entry_code_; @@ -413,14 +439,14 @@ int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) { base = data->lazy_deoptimization_entry_code_; } if (base == NULL || - addr < base->GetStartAddress() || - addr >= base->GetStartAddress() + + addr < base->body() || + addr >= base->body() + (kNumberOfEntries * table_entry_size_)) { return kNotDeoptimizationEntry; } ASSERT_EQ(0, - static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_); - return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_; + static_cast<int>(addr - base->body()) % table_entry_size_); + return static_cast<int>(addr - base->body()) / table_entry_size_; } @@ -462,6 +488,8 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) { } +// We rely on this function not causing a GC. It is called from generated code +// without having a real stack frame in place. void Deoptimizer::DoComputeOutputFrames() { if (bailout_type_ == OSR) { DoComputeOsrOutputFrame(); @@ -613,11 +641,13 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, intptr_t input_value = input_->GetRegister(input_reg); if (FLAG_trace_deopt) { PrintF( - " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n", + " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ", output_[frame_index]->GetTop() + output_offset, output_offset, input_value, converter.NameOfCPURegister(input_reg)); + reinterpret_cast<Object*>(input_value)->ShortPrint(); + PrintF("\n"); } output_[frame_index]->SetFrameSlot(output_offset, input_value); return; @@ -675,10 +705,12 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, if (FLAG_trace_deopt) { PrintF(" 0x%08" V8PRIxPTR ": ", output_[frame_index]->GetTop() + output_offset); - PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n", + PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ", output_offset, input_value, input_offset); + reinterpret_cast<Object*>(input_value)->ShortPrint(); + PrintF("\n"); } output_[frame_index]->SetFrameSlot(output_offset, input_value); return; @@ -953,7 +985,10 @@ void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code, for (uint32_t i = 0; i < table_length; ++i) { uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize); Address pc_after = unoptimized_code->instruction_start() + pc_offset; - PatchStackCheckCodeAt(pc_after, check_code, replacement_code); + PatchStackCheckCodeAt(unoptimized_code, + pc_after, + check_code, + replacement_code); stack_check_cursor += 2 * kIntSize; } } @@ -1039,7 +1074,7 @@ void Deoptimizer::AddDoubleValue(intptr_t slot_address, } -LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) { +MemoryChunk* Deoptimizer::CreateCode(BailoutType type) { // We cannot run this if the serializer is enabled because this will // cause us to emit relocation information for the external // references. This is fine because the deoptimizer's code section @@ -1053,12 +1088,15 @@ LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) { masm.GetCode(&desc); ASSERT(desc.reloc_size == 0); - LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE); + MemoryChunk* chunk = + Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size, + EXECUTABLE, + NULL); if (chunk == NULL) { V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table"); } - memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size); - CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size); + memcpy(chunk->body(), desc.buffer, desc.instr_size); + CPU::FlushICache(chunk->body(), desc.instr_size); return chunk; } diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h index 8641261b1..3cf70466c 100644 --- a/deps/v8/src/deoptimizer.h +++ b/deps/v8/src/deoptimizer.h @@ -86,8 +86,8 @@ class DeoptimizerData { #endif private: - LargeObjectChunk* eager_deoptimization_entry_code_; - LargeObjectChunk* lazy_deoptimization_entry_code_; + MemoryChunk* eager_deoptimization_entry_code_; + MemoryChunk* lazy_deoptimization_entry_code_; Deoptimizer* current_; #ifdef ENABLE_DEBUGGER_SUPPORT @@ -173,7 +173,8 @@ class Deoptimizer : public Malloced { // Patch stack guard check at instruction before pc_after in // the unoptimized code to unconditionally call replacement_code. - static void PatchStackCheckCodeAt(Address pc_after, + static void PatchStackCheckCodeAt(Code* unoptimized_code, + Address pc_after, Code* check_code, Code* replacement_code); @@ -211,6 +212,11 @@ class Deoptimizer : public Malloced { return OFFSET_OF(Deoptimizer, output_count_); } static int output_offset() { return OFFSET_OF(Deoptimizer, output_); } + static int frame_alignment_marker_offset() { + return OFFSET_OF(Deoptimizer, frame_alignment_marker_); } + static int has_alignment_padding_offset() { + return OFFSET_OF(Deoptimizer, has_alignment_padding_); + } static int GetDeoptimizedCodeCount(Isolate* isolate); @@ -285,7 +291,7 @@ class Deoptimizer : public Malloced { void AddDoubleValue(intptr_t slot_address, double value); - static LargeObjectChunk* CreateCode(BailoutType type); + static MemoryChunk* CreateCode(BailoutType type); static void GenerateDeoptimizationEntries( MacroAssembler* masm, int count, BailoutType type); @@ -315,6 +321,10 @@ class Deoptimizer : public Malloced { // Array of output frame descriptions. FrameDescription** output_; + // Frames can be dynamically padded on ia32 to align untagged doubles. + Object* frame_alignment_marker_; + intptr_t has_alignment_padding_; + List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_; static const int table_entry_size_; diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc index 1e67b4cb6..e3b40ab93 100644 --- a/deps/v8/src/disassembler.cc +++ b/deps/v8/src/disassembler.cc @@ -200,7 +200,7 @@ static int DecodeIt(FILE* f, // Print all the reloc info for this instruction which are not comments. for (int i = 0; i < pcs.length(); i++) { // Put together the reloc info - RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]); + RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL); // Indent the printing of the reloc info. if (i == 0) { diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc index e4ecfe8dd..5e7a84e38 100644 --- a/deps/v8/src/elements.cc +++ b/deps/v8/src/elements.cc @@ -227,7 +227,9 @@ class FastElementsAccessor public: static MaybeObject* DeleteCommon(JSObject* obj, uint32_t key) { - ASSERT(obj->HasFastElements() || obj->HasFastArgumentsElements()); + ASSERT(obj->HasFastElements() || + obj->HasFastSmiOnlyElements() || + obj->HasFastArgumentsElements()); Heap* heap = obj->GetHeap(); FixedArray* backing_store = FixedArray::cast(obj->elements()); if (backing_store->map() == heap->non_strict_arguments_elements_map()) { @@ -596,6 +598,9 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) { void ElementsAccessor::InitializeOncePerProcess() { static struct ConcreteElementsAccessors { + // Use the fast element handler for smi-only arrays. The implementation is + // currently identical. + FastElementsAccessor fast_smi_elements_handler; FastElementsAccessor fast_elements_handler; FastDoubleElementsAccessor fast_double_elements_handler; DictionaryElementsAccessor dictionary_elements_handler; @@ -612,6 +617,7 @@ void ElementsAccessor::InitializeOncePerProcess() { } element_accessors; static ElementsAccessor* accessor_array[] = { + &element_accessors.fast_smi_elements_handler, &element_accessors.fast_elements_handler, &element_accessors.fast_double_elements_handler, &element_accessors.dictionary_elements_handler, @@ -627,6 +633,9 @@ void ElementsAccessor::InitializeOncePerProcess() { &element_accessors.pixel_elements_handler }; + STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) == + kElementsKindCount); + elements_accessors_ = accessor_array; } diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc index f36d4e491..29955faff 100644 --- a/deps/v8/src/execution.cc +++ b/deps/v8/src/execution.cc @@ -33,6 +33,7 @@ #include "bootstrapper.h" #include "codegen.h" #include "debug.h" +#include "isolate-inl.h" #include "runtime-profiler.h" #include "simulator.h" #include "v8threads.h" @@ -65,13 +66,13 @@ void StackGuard::reset_limits(const ExecutionAccess& lock) { } -static Handle<Object> Invoke(bool construct, - Handle<JSFunction> func, +static Handle<Object> Invoke(bool is_construct, + Handle<JSFunction> function, Handle<Object> receiver, int argc, - Object*** args, + Handle<Object> args[], bool* has_pending_exception) { - Isolate* isolate = func->GetIsolate(); + Isolate* isolate = function->GetIsolate(); // Entering JavaScript. VMState state(isolate, JS); @@ -79,21 +80,15 @@ static Handle<Object> Invoke(bool construct, // Placeholder for return value. MaybeObject* value = reinterpret_cast<Object*>(kZapValue); - typedef Object* (*JSEntryFunction)( - byte* entry, - Object* function, - Object* receiver, - int argc, - Object*** args); - - Handle<Code> code; - if (construct) { - JSConstructEntryStub stub; - code = stub.GetCode(); - } else { - JSEntryStub stub; - code = stub.GetCode(); - } + typedef Object* (*JSEntryFunction)(byte* entry, + Object* function, + Object* receiver, + int argc, + Object*** args); + + Handle<Code> code = is_construct + ? isolate->factory()->js_construct_entry_code() + : isolate->factory()->js_entry_code(); // Convert calls on global objects to be calls on the global // receiver instead to avoid having a 'this' pointer which refers @@ -105,21 +100,22 @@ static Handle<Object> Invoke(bool construct, // Make sure that the global object of the context we're about to // make the current one is indeed a global object. - ASSERT(func->context()->global()->IsGlobalObject()); + ASSERT(function->context()->global()->IsGlobalObject()); { // Save and restore context around invocation and block the // allocation of handles without explicit handle scopes. SaveContext save(isolate); NoHandleAllocation na; - JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry()); + JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry()); // Call the function through the right JS entry stub. - byte* entry_address = func->code()->entry(); - JSFunction* function = *func; - Object* receiver_pointer = *receiver; - value = CALL_GENERATED_CODE(entry, entry_address, function, - receiver_pointer, argc, args); + byte* function_entry = function->code()->entry(); + JSFunction* func = *function; + Object* recv = *receiver; + Object*** argv = reinterpret_cast<Object***>(args); + value = + CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv); } #ifdef DEBUG @@ -148,9 +144,11 @@ static Handle<Object> Invoke(bool construct, Handle<Object> Execution::Call(Handle<Object> callable, Handle<Object> receiver, int argc, - Object*** args, + Handle<Object> argv[], bool* pending_exception, bool convert_receiver) { + *pending_exception = false; + if (!callable->IsJSFunction()) { callable = TryGetFunctionDelegate(callable, pending_exception); if (*pending_exception) return callable; @@ -172,13 +170,15 @@ Handle<Object> Execution::Call(Handle<Object> callable, if (*pending_exception) return callable; } - return Invoke(false, func, receiver, argc, args, pending_exception); + return Invoke(false, func, receiver, argc, argv, pending_exception); } -Handle<Object> Execution::New(Handle<JSFunction> func, int argc, - Object*** args, bool* pending_exception) { - return Invoke(true, func, Isolate::Current()->global(), argc, args, +Handle<Object> Execution::New(Handle<JSFunction> func, + int argc, + Handle<Object> argv[], + bool* pending_exception) { + return Invoke(true, func, Isolate::Current()->global(), argc, argv, pending_exception); } @@ -186,7 +186,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func, int argc, Handle<Object> Execution::TryCall(Handle<JSFunction> func, Handle<Object> receiver, int argc, - Object*** args, + Handle<Object> args[], bool* caught_exception) { // Enter a try-block while executing the JavaScript code. To avoid // duplicate error printing it must be non-verbose. Also, to avoid @@ -195,6 +195,7 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func, v8::TryCatch catcher; catcher.SetVerbose(false); catcher.SetCaptureMessage(false); + *caught_exception = false; Handle<Object> result = Invoke(false, func, receiver, argc, args, caught_exception); @@ -377,7 +378,7 @@ void StackGuard::DisableInterrupts() { bool StackGuard::IsInterrupted() { ExecutionAccess access(isolate_); - return thread_local_.interrupt_flags_ & INTERRUPT; + return (thread_local_.interrupt_flags_ & INTERRUPT) != 0; } @@ -403,7 +404,7 @@ void StackGuard::Preempt() { bool StackGuard::IsTerminateExecution() { ExecutionAccess access(isolate_); - return thread_local_.interrupt_flags_ & TERMINATE; + return (thread_local_.interrupt_flags_ & TERMINATE) != 0; } @@ -416,7 +417,7 @@ void StackGuard::TerminateExecution() { bool StackGuard::IsRuntimeProfilerTick() { ExecutionAccess access(isolate_); - return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK; + return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0; } @@ -433,6 +434,22 @@ void StackGuard::RequestRuntimeProfilerTick() { } +bool StackGuard::IsGCRequest() { + ExecutionAccess access(isolate_); + return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0; +} + + +void StackGuard::RequestGC() { + ExecutionAccess access(isolate_); + thread_local_.interrupt_flags_ |= GC_REQUEST; + if (thread_local_.postpone_interrupts_nesting_ == 0) { + thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit; + isolate_->heap()->SetStackLimits(); + } +} + + #ifdef ENABLE_DEBUGGER_SUPPORT bool StackGuard::IsDebugBreak() { ExecutionAccess access(isolate_); @@ -555,14 +572,15 @@ void StackGuard::InitThread(const ExecutionAccess& lock) { // --- C a l l s t o n a t i v e s --- -#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception) \ - do { \ - Isolate* isolate = Isolate::Current(); \ - Object** args[argc] = argv; \ - ASSERT(has_pending_exception != NULL); \ - return Call(isolate->name##_fun(), \ - isolate->js_builtins_object(), argc, args, \ - has_pending_exception); \ +#define RETURN_NATIVE_CALL(name, args, has_pending_exception) \ + do { \ + Isolate* isolate = Isolate::Current(); \ + Handle<Object> argv[] = args; \ + ASSERT(has_pending_exception != NULL); \ + return Call(isolate->name##_fun(), \ + isolate->js_builtins_object(), \ + ARRAY_SIZE(argv), argv, \ + has_pending_exception); \ } while (false) @@ -583,44 +601,44 @@ Handle<Object> Execution::ToBoolean(Handle<Object> obj) { Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) { - RETURN_NATIVE_CALL(to_number, 1, { obj.location() }, exc); + RETURN_NATIVE_CALL(to_number, { obj }, exc); } Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) { - RETURN_NATIVE_CALL(to_string, 1, { obj.location() }, exc); + RETURN_NATIVE_CALL(to_string, { obj }, exc); } Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) { - RETURN_NATIVE_CALL(to_detail_string, 1, { obj.location() }, exc); + RETURN_NATIVE_CALL(to_detail_string, { obj }, exc); } Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) { if (obj->IsSpecObject()) return obj; - RETURN_NATIVE_CALL(to_object, 1, { obj.location() }, exc); + RETURN_NATIVE_CALL(to_object, { obj }, exc); } Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) { - RETURN_NATIVE_CALL(to_integer, 1, { obj.location() }, exc); + RETURN_NATIVE_CALL(to_integer, { obj }, exc); } Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) { - RETURN_NATIVE_CALL(to_uint32, 1, { obj.location() }, exc); + RETURN_NATIVE_CALL(to_uint32, { obj }, exc); } Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) { - RETURN_NATIVE_CALL(to_int32, 1, { obj.location() }, exc); + RETURN_NATIVE_CALL(to_int32, { obj }, exc); } Handle<Object> Execution::NewDate(double time, bool* exc) { Handle<Object> time_obj = FACTORY->NewNumber(time); - RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc); + RETURN_NATIVE_CALL(create_date, { time_obj }, exc); } @@ -657,7 +675,7 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) { bool caught_exception; Handle<Object> index_object = factory->NewNumberFromInt(int_index); - Object** index_arg[] = { index_object.location() }; + Handle<Object> index_arg[] = { index_object }; Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at), string, ARRAY_SIZE(index_arg), @@ -671,7 +689,8 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) { Handle<JSFunction> Execution::InstantiateFunction( - Handle<FunctionTemplateInfo> data, bool* exc) { + Handle<FunctionTemplateInfo> data, + bool* exc) { Isolate* isolate = data->GetIsolate(); // Fast case: see if the function has already been instantiated int serial_number = Smi::cast(data->serial_number())->value(); @@ -680,10 +699,12 @@ Handle<JSFunction> Execution::InstantiateFunction( GetElementNoExceptionThrown(serial_number); if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm)); // The function has not yet been instantiated in this context; do it. - Object** args[1] = { Handle<Object>::cast(data).location() }; - Handle<Object> result = - Call(isolate->instantiate_fun(), - isolate->js_builtins_object(), 1, args, exc); + Handle<Object> args[] = { data }; + Handle<Object> result = Call(isolate->instantiate_fun(), + isolate->js_builtins_object(), + ARRAY_SIZE(args), + args, + exc); if (*exc) return Handle<JSFunction>::null(); return Handle<JSFunction>::cast(result); } @@ -710,10 +731,12 @@ Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data, ASSERT(!*exc); return Handle<JSObject>(JSObject::cast(result)); } else { - Object** args[1] = { Handle<Object>::cast(data).location() }; - Handle<Object> result = - Call(isolate->instantiate_fun(), - isolate->js_builtins_object(), 1, args, exc); + Handle<Object> args[] = { data }; + Handle<Object> result = Call(isolate->instantiate_fun(), + isolate->js_builtins_object(), + ARRAY_SIZE(args), + args, + exc); if (*exc) return Handle<JSObject>::null(); return Handle<JSObject>::cast(result); } @@ -724,9 +747,12 @@ void Execution::ConfigureInstance(Handle<Object> instance, Handle<Object> instance_template, bool* exc) { Isolate* isolate = Isolate::Current(); - Object** args[2] = { instance.location(), instance_template.location() }; + Handle<Object> args[] = { instance, instance_template }; Execution::Call(isolate->configure_instance_fun(), - isolate->js_builtins_object(), 2, args, exc); + isolate->js_builtins_object(), + ARRAY_SIZE(args), + args, + exc); } @@ -735,16 +761,13 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv, Handle<Object> pos, Handle<Object> is_global) { Isolate* isolate = fun->GetIsolate(); - const int argc = 4; - Object** args[argc] = { recv.location(), - Handle<Object>::cast(fun).location(), - pos.location(), - is_global.location() }; - bool caught_exception = false; - Handle<Object> result = - TryCall(isolate->get_stack_trace_line_fun(), - isolate->js_builtins_object(), argc, args, - &caught_exception); + Handle<Object> args[] = { recv, fun, pos, is_global }; + bool caught_exception; + Handle<Object> result = TryCall(isolate->get_stack_trace_line_fun(), + isolate->js_builtins_object(), + ARRAY_SIZE(args), + args, + &caught_exception); if (caught_exception || !result->IsString()) { return isolate->factory()->empty_symbol(); } @@ -852,6 +875,12 @@ void Execution::ProcessDebugMesssages(bool debug_command_only) { MaybeObject* Execution::HandleStackGuardInterrupt() { Isolate* isolate = Isolate::Current(); StackGuard* stack_guard = isolate->stack_guard(); + + if (stack_guard->IsGCRequest()) { + isolate->heap()->CollectAllGarbage(false); + stack_guard->Continue(GC_REQUEST); + } + isolate->counters()->stack_interrupts()->Increment(); if (stack_guard->IsRuntimeProfilerTick()) { isolate->counters()->runtime_profiler_ticks()->Increment(); diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h index 5cd7141fc..f2d17d079 100644 --- a/deps/v8/src/execution.h +++ b/deps/v8/src/execution.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -41,7 +41,8 @@ enum InterruptFlag { DEBUGCOMMAND = 1 << 2, PREEMPT = 1 << 3, TERMINATE = 1 << 4, - RUNTIME_PROFILER_TICK = 1 << 5 + RUNTIME_PROFILER_TICK = 1 << 5, + GC_REQUEST = 1 << 6 }; class Execution : public AllStatic { @@ -60,7 +61,7 @@ class Execution : public AllStatic { static Handle<Object> Call(Handle<Object> callable, Handle<Object> receiver, int argc, - Object*** args, + Handle<Object> argv[], bool* pending_exception, bool convert_receiver = false); @@ -73,7 +74,7 @@ class Execution : public AllStatic { // static Handle<Object> New(Handle<JSFunction> func, int argc, - Object*** args, + Handle<Object> argv[], bool* pending_exception); // Call a function, just like Call(), but make sure to silently catch @@ -83,7 +84,7 @@ class Execution : public AllStatic { static Handle<Object> TryCall(Handle<JSFunction> func, Handle<Object> receiver, int argc, - Object*** args, + Handle<Object> argv[], bool* caught_exception); // ECMA-262 9.2 @@ -196,6 +197,8 @@ class StackGuard { bool IsDebugCommand(); void DebugCommand(); #endif + bool IsGCRequest(); + void RequestGC(); void Continue(InterruptFlag after_what); // This provides an asynchronous read of the stack limits for the current diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc index 3740c27aa..48e8c4205 100644 --- a/deps/v8/src/extensions/gc-extension.cc +++ b/deps/v8/src/extensions/gc-extension.cc @@ -40,12 +40,7 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction( v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) { - bool compact = false; - // All allocation spaces other than NEW_SPACE have the same effect. - if (args.Length() >= 1 && args[0]->IsBoolean()) { - compact = args[0]->BooleanValue(); - } - HEAP->CollectAllGarbage(compact); + HEAP->CollectAllGarbage(Heap::kNoGCFlags); return v8::Undefined(); } diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index 97289266e..143b34208 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -234,7 +234,7 @@ Handle<String> Factory::NewProperSubString(Handle<String> str, Handle<String> Factory::NewExternalStringFromAscii( - ExternalAsciiString::Resource* resource) { + const ExternalAsciiString::Resource* resource) { CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateExternalStringFromAscii(resource), @@ -243,7 +243,7 @@ Handle<String> Factory::NewExternalStringFromAscii( Handle<String> Factory::NewExternalStringFromTwoByte( - ExternalTwoByteString::Resource* resource) { + const ExternalTwoByteString::Resource* resource) { CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateExternalStringFromTwoByte(resource), @@ -404,10 +404,12 @@ Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell( } -Handle<Map> Factory::NewMap(InstanceType type, int instance_size) { +Handle<Map> Factory::NewMap(InstanceType type, + int instance_size, + ElementsKind elements_kind) { CALL_HEAP_FUNCTION( isolate(), - isolate()->heap()->AllocateMap(type, instance_size), + isolate()->heap()->AllocateMap(type, instance_size, elements_kind), Map); } @@ -455,23 +457,11 @@ Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) { } -Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) { - CALL_HEAP_FUNCTION(isolate(), src->GetFastElementsMap(), Map); -} - - -Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) { - CALL_HEAP_FUNCTION(isolate(), src->GetSlowElementsMap(), Map); -} - - Handle<Map> Factory::GetElementsTransitionMap( - Handle<Map> src, - ElementsKind elements_kind, - bool safe_to_add_transition) { + Handle<JSObject> src, + ElementsKind elements_kind) { CALL_HEAP_FUNCTION(isolate(), - src->GetElementsTransitionMap(elements_kind, - safe_to_add_transition), + src->GetElementsTransitionMap(elements_kind), Map); } @@ -641,14 +631,16 @@ Handle<Object> Factory::NewError(const char* maker, return undefined_value(); Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj); Handle<Object> type_obj = LookupAsciiSymbol(type); - Object** argv[2] = { type_obj.location(), - Handle<Object>::cast(args).location() }; + Handle<Object> argv[] = { type_obj, args }; // Invoke the JavaScript factory method. If an exception is thrown while // running the factory method, use the exception as the result. bool caught_exception; Handle<Object> result = Execution::TryCall(fun, - isolate()->js_builtins_object(), 2, argv, &caught_exception); + isolate()->js_builtins_object(), + ARRAY_SIZE(argv), + argv, + &caught_exception); return result; } @@ -664,13 +656,16 @@ Handle<Object> Factory::NewError(const char* constructor, Handle<JSFunction> fun = Handle<JSFunction>( JSFunction::cast(isolate()->js_builtins_object()-> GetPropertyNoExceptionThrown(*constr))); - Object** argv[1] = { Handle<Object>::cast(message).location() }; + Handle<Object> argv[] = { message }; // Invoke the JavaScript factory method. If an exception is thrown while // running the factory method, use the exception as the result. bool caught_exception; Handle<Object> result = Execution::TryCall(fun, - isolate()->js_builtins_object(), 1, argv, &caught_exception); + isolate()->js_builtins_object(), + ARRAY_SIZE(argv), + argv, + &caught_exception); return result; } @@ -722,7 +717,12 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name, if (force_initial_map || type != JS_OBJECT_TYPE || instance_size != JSObject::kHeaderSize) { - Handle<Map> initial_map = NewMap(type, instance_size); + ElementsKind default_elements_kind = FLAG_smi_only_arrays + ? FAST_SMI_ONLY_ELEMENTS + : FAST_ELEMENTS; + Handle<Map> initial_map = NewMap(type, + instance_size, + default_elements_kind); function->set_initial_map(*initial_map); initial_map->set_constructor(*function); } @@ -908,11 +908,26 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements, Handle<JSArray> result = Handle<JSArray>::cast(NewJSObject(isolate()->array_function(), pretenure)); - result->SetContent(*elements); + SetContent(result, elements); return result; } +void Factory::SetContent(Handle<JSArray> array, + Handle<FixedArray> elements) { + CALL_HEAP_FUNCTION_VOID( + isolate(), + array->SetContent(*elements)); +} + + +void Factory::EnsureCanContainNonSmiElements(Handle<JSArray> array) { + CALL_HEAP_FUNCTION_VOID( + isolate(), + array->EnsureCanContainNonSmiElements()); +} + + Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler, Handle<Object> prototype) { CALL_HEAP_FUNCTION( @@ -938,6 +953,13 @@ void Factory::BecomeJSFunction(Handle<JSReceiver> object) { } +void Factory::SetIdentityHash(Handle<JSObject> object, Object* hash) { + CALL_HEAP_FUNCTION_VOID( + isolate(), + object->SetIdentityHash(hash, ALLOW_CREATION)); +} + + Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo( Handle<String> name, int number_of_literals, @@ -990,6 +1012,12 @@ Handle<String> Factory::NumberToString(Handle<Object> number) { } +Handle<String> Factory::Uint32ToString(uint32_t value) { + CALL_HEAP_FUNCTION(isolate(), + isolate()->heap()->Uint32ToString(value), String); +} + + Handle<NumberDictionary> Factory::DictionaryAtNumberPut( Handle<NumberDictionary> dictionary, uint32_t key, @@ -1299,4 +1327,20 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc, } +Handle<Object> Factory::GlobalConstantFor(Handle<String> name) { + Heap* h = isolate()->heap(); + if (name->Equals(h->undefined_symbol())) return undefined_value(); + if (name->Equals(h->nan_symbol())) return nan_value(); + if (name->Equals(h->infinity_symbol())) return infinity_value(); + return Handle<Object>::null(); +} + + +Handle<Object> Factory::ToBoolean(bool value) { + return Handle<Object>(value + ? isolate()->heap()->true_value() + : isolate()->heap()->false_value()); +} + + } } // namespace v8::internal diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index 71ae750b3..a3615f2a0 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -145,9 +145,9 @@ class Factory { // not make sense to have a UTF-8 factory function for external strings, // because we cannot change the underlying buffer. Handle<String> NewExternalStringFromAscii( - ExternalAsciiString::Resource* resource); + const ExternalAsciiString::Resource* resource); Handle<String> NewExternalStringFromTwoByte( - ExternalTwoByteString::Resource* resource); + const ExternalTwoByteString::Resource* resource); // Create a global (but otherwise uninitialized) context. Handle<Context> NewGlobalContext(); @@ -203,7 +203,9 @@ class Factory { Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell( Handle<Object> value); - Handle<Map> NewMap(InstanceType type, int instance_size); + Handle<Map> NewMap(InstanceType type, + int instance_size, + ElementsKind elements_kind = FAST_ELEMENTS); Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function); @@ -215,13 +217,8 @@ class Factory { Handle<Map> CopyMapDropTransitions(Handle<Map> map); - Handle<Map> GetFastElementsMap(Handle<Map> map); - - Handle<Map> GetSlowElementsMap(Handle<Map> map); - - Handle<Map> GetElementsTransitionMap(Handle<Map> map, - ElementsKind elements_kind, - bool safe_to_add_transition); + Handle<Map> GetElementsTransitionMap(Handle<JSObject> object, + ElementsKind elements_kind); Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array); @@ -258,12 +255,18 @@ class Factory { Handle<FixedArray> elements, PretenureFlag pretenure = NOT_TENURED); + void SetContent(Handle<JSArray> array, Handle<FixedArray> elements); + + void EnsureCanContainNonSmiElements(Handle<JSArray> array); + Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype); // Change the type of the argument into a JS object/function and reinitialize. void BecomeJSObject(Handle<JSReceiver> object); void BecomeJSFunction(Handle<JSReceiver> object); + void SetIdentityHash(Handle<JSObject> object, Object* hash); + Handle<JSFunction> NewFunction(Handle<String> name, Handle<Object> prototype); @@ -356,6 +359,7 @@ class Factory { PropertyAttributes attributes); Handle<String> NumberToString(Handle<Object> number); + Handle<String> Uint32ToString(uint32_t value); enum ApiInstanceType { JavaScriptObject, @@ -442,6 +446,14 @@ class Factory { JSRegExp::Flags flags, int capture_count); + // Returns the value for a known global constant (a property of the global + // object which is neither configurable nor writable) like 'undefined'. + // Returns a null handle when the given name is unknown. + Handle<Object> GlobalConstantFor(Handle<String> name); + + // Converts the given boolean condition to JavaScript boolean value. + Handle<Object> ToBoolean(bool value); + private: Isolate* isolate() { return reinterpret_cast<Isolate*>(this); } diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 7df2b0bf0..58fab14e1 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -98,13 +98,19 @@ private: // Flags for experimental language features. DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof") +DEFINE_bool(harmony_scoping, false, "enable harmony block scoping") DEFINE_bool(harmony_proxies, false, "enable harmony proxies") DEFINE_bool(harmony_weakmaps, false, "enable harmony weak maps") -DEFINE_bool(harmony_block_scoping, false, "enable harmony block scoping") +DEFINE_bool(harmony, false, "enable all harmony features") // Flags for experimental implementation features. DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles") -DEFINE_bool(string_slices, false, "use string slices") +DEFINE_bool(smi_only_arrays, false, "tracks arrays with only smi values") +DEFINE_bool(string_slices, true, "use string slices") + +DEFINE_bool(clever_optimizations, + true, + "Optimize object size, Array shift, DOM strings and string +") // Flags for Crankshaft. #ifdef V8_TARGET_ARCH_MIPS @@ -253,10 +259,16 @@ DEFINE_bool(print_cumulative_gc_stat, false, "print cumulative GC statistics in name=value format on exit") DEFINE_bool(trace_gc_verbose, false, "print more details following each garbage collection") +DEFINE_bool(trace_fragmentation, false, + "report fragmentation for old pointer and data pages") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(flush_code, true, "flush code that we expect not to use again before full gc") +DEFINE_bool(incremental_marking, true, "use incremental marking") +DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") +DEFINE_bool(trace_incremental_marking, false, + "trace progress of the incremental marking") // v8.cc DEFINE_bool(use_idle_notification, true, @@ -276,8 +288,13 @@ DEFINE_bool(native_code_counters, false, // mark-compact.cc DEFINE_bool(always_compact, false, "Perform compaction on every full GC") +DEFINE_bool(lazy_sweeping, true, + "Use lazy sweeping for old pointer and data spaces") +DEFINE_bool(cleanup_caches_in_maps_at_gc, true, + "Flush code caches in maps during mark compact cycle.") DEFINE_bool(never_compact, false, "Never perform compaction on full GC - testing only") +DEFINE_bool(compact_code_space, false, "Compact code space") DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and " "flush code caches in maps during mark compact cycle.") @@ -288,9 +305,6 @@ DEFINE_int(random_seed, 0, DEFINE_bool(canonicalize_object_literal_maps, true, "Canonicalize maps for object literals.") -DEFINE_bool(use_big_map_space, true, - "Use big map space, but don't compact if it grew too big.") - DEFINE_int(max_map_space_pages, MapSpace::kMaxMapPageIndex - 1, "Maximum number of pages in map space which still allows to encode " "forwarding pointers. That's actually a constant, but it's useful " @@ -326,7 +340,6 @@ DEFINE_bool(preemption, false, // Regexp DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") -DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp") // Testing flags test/cctest/test-{flags,api,serialization}.cc DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") @@ -348,11 +361,15 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes", DEFINE_bool(help, false, "Print usage message, including flags, on console") DEFINE_bool(dump_counters, false, "Dump counters on exit") + +#ifdef ENABLE_DEBUGGER_SUPPORT DEFINE_bool(debugger, false, "Enable JavaScript debugger") DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the " "debugger agent in another process") DEFINE_bool(debugger_agent, false, "Enable debugger agent") DEFINE_int(debugger_port, 5858, "Port to use for remote debugging") +#endif // ENABLE_DEBUGGER_SUPPORT + DEFINE_string(map_counters, "", "Map counters to a file") DEFINE_args(js_arguments, JSArguments(), "Pass all remaining arguments to the script. Alias for \"--\".") @@ -378,6 +395,15 @@ DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk") DEFINE_string(gdbjit_dump_filter, "", "dump only objects containing this substring") +// mark-compact.cc +DEFINE_bool(force_marking_deque_overflows, false, + "force overflows of marking deque by reducing it's size " + "to 64 words") + +DEFINE_bool(stress_compaction, false, + "stress the GC compactor to flush out bugs (implies " + "--force_marking_deque_overflows)") + // // Debug only flags // diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h index 7ba79bf1b..94c745cfc 100644 --- a/deps/v8/src/frames-inl.h +++ b/deps/v8/src/frames-inl.h @@ -77,6 +77,21 @@ inline StackHandler* StackHandler::FromAddress(Address address) { } +inline bool StackHandler::is_entry() const { + return state() == ENTRY; +} + + +inline bool StackHandler::is_try_catch() const { + return state() == TRY_CATCH; +} + + +inline bool StackHandler::is_try_finally() const { + return state() == TRY_FINALLY; +} + + inline StackHandler::State StackHandler::state() const { const int offset = StackHandlerConstants::kStateOffset; return static_cast<State>(Memory::int_at(address() + offset)); @@ -105,8 +120,33 @@ inline StackHandler* StackFrame::top_handler() const { } +inline Code* StackFrame::LookupCode() const { + return GetContainingCode(isolate(), pc()); +} + + inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) { - return isolate->pc_to_code_cache()->GetCacheEntry(pc)->code; + return isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code; +} + + +inline EntryFrame::EntryFrame(StackFrameIterator* iterator) + : StackFrame(iterator) { +} + + +inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator) + : EntryFrame(iterator) { +} + + +inline ExitFrame::ExitFrame(StackFrameIterator* iterator) + : StackFrame(iterator) { +} + + +inline StandardFrame::StandardFrame(StackFrameIterator* iterator) + : StackFrame(iterator) { } @@ -155,6 +195,11 @@ inline bool StandardFrame::IsConstructFrame(Address fp) { } +inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator) + : StandardFrame(iterator) { +} + + Address JavaScriptFrame::GetParameterSlot(int index) const { int param_count = ComputeParametersCount(); ASSERT(-1 <= index && index < param_count); @@ -190,6 +235,26 @@ inline Object* JavaScriptFrame::function() const { } +inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator) + : JavaScriptFrame(iterator) { +} + + +inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame( + StackFrameIterator* iterator) : JavaScriptFrame(iterator) { +} + + +inline InternalFrame::InternalFrame(StackFrameIterator* iterator) + : StandardFrame(iterator) { +} + + +inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator) + : InternalFrame(iterator) { +} + + template<typename Iterator> inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp( Isolate* isolate) @@ -197,6 +262,15 @@ inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp( if (!done()) Advance(); } + +template<typename Iterator> +inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp( + Isolate* isolate, ThreadLocalTop* top) + : iterator_(isolate, top) { + if (!done()) Advance(); +} + + template<typename Iterator> inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const { // TODO(1233797): The frame hierarchy needs to change. It's diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc index bebd10a80..412a59cc7 100644 --- a/deps/v8/src/frames.cc +++ b/deps/v8/src/frames.cc @@ -366,16 +366,17 @@ void SafeStackTraceFrameIterator::Advance() { Code* StackFrame::GetSafepointData(Isolate* isolate, - Address pc, + Address inner_pointer, SafepointEntry* safepoint_entry, unsigned* stack_slots) { - PcToCodeCache::PcToCodeCacheEntry* entry = - isolate->pc_to_code_cache()->GetCacheEntry(pc); + InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry = + isolate->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer); if (!entry->safepoint_entry.is_valid()) { - entry->safepoint_entry = entry->code->GetSafepointEntry(pc); + entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer); ASSERT(entry->safepoint_entry.is_valid()); } else { - ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc))); + ASSERT(entry->safepoint_entry.Equals( + entry->code->GetSafepointEntry(inner_pointer))); } // Fill in the results and return the code. @@ -392,11 +393,16 @@ bool StackFrame::HasHandler() const { } +#ifdef DEBUG +static bool GcSafeCodeContains(HeapObject* object, Address addr); +#endif + + void StackFrame::IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder) { Address pc = *pc_address; - ASSERT(holder->contains(pc)); + ASSERT(GcSafeCodeContains(holder, pc)); unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start()); Object* code = holder; v->VisitPointer(&code); @@ -819,7 +825,8 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData( // back to a slow search in this case to find the original optimized // code object. if (!code->contains(pc())) { - code = isolate()->pc_to_code_cache()->GcSafeFindCodeForPc(pc()); + code = isolate()->inner_pointer_to_code_cache()-> + GcSafeFindCodeForInnerPointer(pc()); } ASSERT(code != NULL); ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); @@ -881,6 +888,11 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) { } +int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const { + return Smi::cast(GetExpression(0))->value(); +} + + Address ArgumentsAdaptorFrame::GetCallerStackPointer() const { return fp() + StandardFrameConstants::kCallerSPOffset; } @@ -1155,52 +1167,89 @@ JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) { // ------------------------------------------------------------------------- -Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) { +static Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) { + MapWord map_word = object->map_word(); + return map_word.IsForwardingAddress() ? + map_word.ToForwardingAddress()->map() : map_word.ToMap(); +} + + +static int GcSafeSizeOfCodeSpaceObject(HeapObject* object) { + return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object)); +} + + +#ifdef DEBUG +static bool GcSafeCodeContains(HeapObject* code, Address addr) { + Map* map = GcSafeMapOfCodeSpaceObject(code); + ASSERT(map == code->GetHeap()->code_map()); + Address start = code->address(); + Address end = code->address() + code->SizeFromMap(map); + return start <= addr && addr < end; +} +#endif + + +Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object, + Address inner_pointer) { Code* code = reinterpret_cast<Code*>(object); - ASSERT(code != NULL && code->contains(pc)); + ASSERT(code != NULL && GcSafeCodeContains(code, inner_pointer)); return code; } -Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) { +Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer( + Address inner_pointer) { Heap* heap = isolate_->heap(); - // Check if the pc points into a large object chunk. - LargeObjectChunk* chunk = heap->lo_space()->FindChunkContainingPc(pc); - if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc); - - // Iterate through the 8K page until we reach the end or find an - // object starting after the pc. - Page* page = Page::FromAddress(pc); - HeapObjectIterator iterator(page, heap->GcSafeSizeOfOldObjectFunction()); - HeapObject* previous = NULL; + // Check if the inner pointer points into a large object chunk. + LargePage* large_page = heap->lo_space()->FindPageContainingPc(inner_pointer); + if (large_page != NULL) { + return GcSafeCastToCode(large_page->GetObject(), inner_pointer); + } + + // Iterate through the page until we reach the end or find an object starting + // after the inner pointer. + Page* page = Page::FromAddress(inner_pointer); + + Address addr = page->skip_list()->StartFor(inner_pointer); + + Address top = heap->code_space()->top(); + Address limit = heap->code_space()->limit(); + while (true) { - HeapObject* next = iterator.next(); - if (next == NULL || next->address() >= pc) { - return GcSafeCastToCode(previous, pc); + if (addr == top && addr != limit) { + addr = limit; + continue; } - previous = next; + + HeapObject* obj = HeapObject::FromAddress(addr); + int obj_size = GcSafeSizeOfCodeSpaceObject(obj); + Address next_addr = addr + obj_size; + if (next_addr > inner_pointer) return GcSafeCastToCode(obj, inner_pointer); + addr = next_addr; } } -PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) { +InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* + InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) { isolate_->counters()->pc_to_code()->Increment(); - ASSERT(IsPowerOf2(kPcToCodeCacheSize)); + ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize)); uint32_t hash = ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc))); - uint32_t index = hash & (kPcToCodeCacheSize - 1); - PcToCodeCacheEntry* entry = cache(index); - if (entry->pc == pc) { + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer))); + uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1); + InnerPointerToCodeCacheEntry* entry = cache(index); + if (entry->inner_pointer == inner_pointer) { isolate_->counters()->pc_to_code_cached()->Increment(); - ASSERT(entry->code == GcSafeFindCodeForPc(pc)); + ASSERT(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer)); } else { // Because this code may be interrupted by a profiling signal that - // also queries the cache, we cannot update pc before the code has - // been set. Otherwise, we risk trying to use a cache entry before + // also queries the cache, we cannot update inner_pointer before the code + // has been set. Otherwise, we risk trying to use a cache entry before // the code has been computed. - entry->code = GcSafeFindCodeForPc(pc); + entry->code = GcSafeFindCodeForInnerPointer(inner_pointer); entry->safepoint_entry.Reset(); - entry->pc = pc; + entry->inner_pointer = inner_pointer; } return entry; } diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h index fed11c4fa..ca19b053a 100644 --- a/deps/v8/src/frames.h +++ b/deps/v8/src/frames.h @@ -49,36 +49,36 @@ class StackFrameIterator; class ThreadLocalTop; class Isolate; -class PcToCodeCache { +class InnerPointerToCodeCache { public: - struct PcToCodeCacheEntry { - Address pc; + struct InnerPointerToCodeCacheEntry { + Address inner_pointer; Code* code; SafepointEntry safepoint_entry; }; - explicit PcToCodeCache(Isolate* isolate) : isolate_(isolate) { + explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) { Flush(); } - Code* GcSafeFindCodeForPc(Address pc); - Code* GcSafeCastToCode(HeapObject* object, Address pc); + Code* GcSafeFindCodeForInnerPointer(Address inner_pointer); + Code* GcSafeCastToCode(HeapObject* object, Address inner_pointer); void Flush() { memset(&cache_[0], 0, sizeof(cache_)); } - PcToCodeCacheEntry* GetCacheEntry(Address pc); + InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer); private: - PcToCodeCacheEntry* cache(int index) { return &cache_[index]; } + InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; } Isolate* isolate_; - static const int kPcToCodeCacheSize = 1024; - PcToCodeCacheEntry cache_[kPcToCodeCacheSize]; + static const int kInnerPointerToCodeCacheSize = 1024; + InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize]; - DISALLOW_COPY_AND_ASSIGN(PcToCodeCache); + DISALLOW_COPY_AND_ASSIGN(InnerPointerToCodeCache); }; @@ -106,9 +106,9 @@ class StackHandler BASE_EMBEDDED { static inline StackHandler* FromAddress(Address address); // Testers - bool is_entry() { return state() == ENTRY; } - bool is_try_catch() { return state() == TRY_CATCH; } - bool is_try_finally() { return state() == TRY_FINALLY; } + inline bool is_entry() const; + inline bool is_try_catch() const; + inline bool is_try_finally() const; private: // Accessors. @@ -139,7 +139,10 @@ class StackFrame BASE_EMBEDDED { enum Type { NONE = 0, STACK_FRAME_TYPE_LIST(DECLARE_TYPE) - NUMBER_OF_TYPES + NUMBER_OF_TYPES, + // Used by FrameScope to indicate that the stack frame is constructed + // manually and the FrameScope does not need to emit code. + MANUAL }; #undef DECLARE_TYPE @@ -215,9 +218,7 @@ class StackFrame BASE_EMBEDDED { virtual Code* unchecked_code() const = 0; // Get the code associated with this frame. - Code* LookupCode() const { - return GetContainingCode(isolate(), pc()); - } + inline Code* LookupCode() const; // Get the code object that contains the given pc. static inline Code* GetContainingCode(Isolate* isolate, Address pc); @@ -299,7 +300,7 @@ class EntryFrame: public StackFrame { virtual void SetCallerFp(Address caller_fp); protected: - explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { } + inline explicit EntryFrame(StackFrameIterator* iterator); // The caller stack pointer for entry frames is always zero. The // real information about the caller frame is available through the @@ -326,8 +327,7 @@ class EntryConstructFrame: public EntryFrame { } protected: - explicit EntryConstructFrame(StackFrameIterator* iterator) - : EntryFrame(iterator) { } + inline explicit EntryConstructFrame(StackFrameIterator* iterator); private: friend class StackFrameIterator; @@ -361,7 +361,7 @@ class ExitFrame: public StackFrame { static void FillState(Address fp, Address sp, State* state); protected: - explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { } + inline explicit ExitFrame(StackFrameIterator* iterator); virtual Address GetCallerStackPointer() const; @@ -394,8 +394,7 @@ class StandardFrame: public StackFrame { } protected: - explicit StandardFrame(StackFrameIterator* iterator) - : StackFrame(iterator) { } + inline explicit StandardFrame(StackFrameIterator* iterator); virtual void ComputeCallerState(State* state) const; @@ -514,8 +513,7 @@ class JavaScriptFrame: public StandardFrame { } protected: - explicit JavaScriptFrame(StackFrameIterator* iterator) - : StandardFrame(iterator) { } + inline explicit JavaScriptFrame(StackFrameIterator* iterator); virtual Address GetCallerStackPointer() const; @@ -552,8 +550,7 @@ class OptimizedFrame : public JavaScriptFrame { DeoptimizationInputData* GetDeoptimizationData(int* deopt_index); protected: - explicit OptimizedFrame(StackFrameIterator* iterator) - : JavaScriptFrame(iterator) { } + inline explicit OptimizedFrame(StackFrameIterator* iterator); private: friend class StackFrameIterator; @@ -581,12 +578,9 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame { int index) const; protected: - explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator) - : JavaScriptFrame(iterator) { } + inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator); - virtual int GetNumberOfIncomingArguments() const { - return Smi::cast(GetExpression(0))->value(); - } + virtual int GetNumberOfIncomingArguments() const; virtual Address GetCallerStackPointer() const; @@ -611,8 +605,7 @@ class InternalFrame: public StandardFrame { } protected: - explicit InternalFrame(StackFrameIterator* iterator) - : StandardFrame(iterator) { } + inline explicit InternalFrame(StackFrameIterator* iterator); virtual Address GetCallerStackPointer() const; @@ -633,8 +626,7 @@ class ConstructFrame: public InternalFrame { } protected: - explicit ConstructFrame(StackFrameIterator* iterator) - : InternalFrame(iterator) { } + inline explicit ConstructFrame(StackFrameIterator* iterator); private: friend class StackFrameIterator; @@ -710,20 +702,26 @@ class JavaScriptFrameIteratorTemp BASE_EMBEDDED { inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate); + inline JavaScriptFrameIteratorTemp(Isolate* isolate, ThreadLocalTop* top); + // Skip frames until the frame with the given id is reached. explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); } inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id); - JavaScriptFrameIteratorTemp(Address fp, Address sp, - Address low_bound, Address high_bound) : + JavaScriptFrameIteratorTemp(Address fp, + Address sp, + Address low_bound, + Address high_bound) : iterator_(fp, sp, low_bound, high_bound) { if (!done()) Advance(); } JavaScriptFrameIteratorTemp(Isolate* isolate, - Address fp, Address sp, - Address low_bound, Address high_bound) : + Address fp, + Address sp, + Address low_bound, + Address high_bound) : iterator_(isolate, fp, sp, low_bound, high_bound) { if (!done()) Advance(); } diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index 807387413..083675d13 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -244,11 +244,6 @@ void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) { } -void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) { - Visit(expr->expression()); -} - - void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) { Visit(expr->left()); Visit(expr->right()); @@ -291,8 +286,10 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { code->set_optimizable(info->IsOptimizable()); cgen.PopulateDeoptimizationData(code); code->set_has_deoptimization_support(info->HasDeoptimizationSupport()); +#ifdef ENABLE_DEBUGGER_SUPPORT code->set_has_debug_break_slots( info->isolate()->debugger()->IsDebuggerActive()); +#endif // ENABLE_DEBUGGER_SUPPORT code->set_allow_osr_at_loop_nesting_level(0); code->set_stack_check_table_offset(table_offset); CodeGenerator::PrintCode(code, info); @@ -523,7 +520,7 @@ void FullCodeGenerator::VisitDeclarations( if (var->IsUnallocated()) { array->set(j++, *(var->name())); if (decl->fun() == NULL) { - if (var->mode() == Variable::CONST) { + if (var->mode() == CONST) { // In case this is const property use the hole. array->set_the_hole(j++); } else { @@ -823,9 +820,19 @@ void FullCodeGenerator::VisitBlock(Block* stmt) { if (stmt->block_scope() != NULL) { { Comment cmnt(masm_, "[ Extend block context"); scope_ = stmt->block_scope(); - __ Push(scope_->GetSerializedScopeInfo()); + Handle<SerializedScopeInfo> scope_info = scope_->GetSerializedScopeInfo(); + int heap_slots = + scope_info->NumberOfContextSlots() - Context::MIN_CONTEXT_SLOTS; + __ Push(scope_info); PushFunctionArgumentForContextAllocation(); - __ CallRuntime(Runtime::kPushBlockContext, 2); + if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) { + FastNewBlockContextStub stub(heap_slots); + __ CallStub(&stub); + } else { + __ CallRuntime(Runtime::kPushBlockContext, 2); + } + + // Replace the context stored in the frame. StoreToFrameField(StandardFrameConstants::kContextOffset, context_register()); } @@ -1321,19 +1328,21 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryCatch::Exit( } -bool FullCodeGenerator::TryLiteralCompare(CompareOperation* compare, - Label* if_true, - Label* if_false, - Label* fall_through) { - Expression *expr; +bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) { + Expression *sub_expr; Handle<String> check; - if (compare->IsLiteralCompareTypeof(&expr, &check)) { - EmitLiteralCompareTypeof(expr, check, if_true, if_false, fall_through); + if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) { + EmitLiteralCompareTypeof(sub_expr, check); + return true; + } + + if (expr->IsLiteralCompareUndefined(&sub_expr)) { + EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue); return true; } - if (compare->IsLiteralCompareUndefined(&expr)) { - EmitLiteralCompareUndefined(expr, if_true, if_false, fall_through); + if (expr->IsLiteralCompareNull(&sub_expr)) { + EmitLiteralCompareNil(expr, sub_expr, kNullValue); return true; } diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h index 803c61873..081192a54 100644 --- a/deps/v8/src/full-codegen.h +++ b/deps/v8/src/full-codegen.h @@ -391,25 +391,16 @@ class FullCodeGenerator: public AstVisitor { // Try to perform a comparison as a fast inlined literal compare if // the operands allow it. Returns true if the compare operations // has been matched and all code generated; false otherwise. - bool TryLiteralCompare(CompareOperation* compare, - Label* if_true, - Label* if_false, - Label* fall_through); + bool TryLiteralCompare(CompareOperation* compare); // Platform-specific code for comparing the type of a value with // a given literal string. - void EmitLiteralCompareTypeof(Expression* expr, - Handle<String> check, - Label* if_true, - Label* if_false, - Label* fall_through); - - // Platform-specific code for strict equality comparison with - // the undefined value. - void EmitLiteralCompareUndefined(Expression* expr, - Label* if_true, - Label* if_false, - Label* fall_through); + void EmitLiteralCompareTypeof(Expression* expr, Handle<String> check); + + // Platform-specific code for equality comparison with a nil-like value. + void EmitLiteralCompareNil(CompareOperation* expr, + Expression* sub_expr, + NilValue nil); // Bailout support. void PrepareForBailout(Expression* node, State state); @@ -432,7 +423,7 @@ class FullCodeGenerator: public AstVisitor { // Platform-specific code for a variable, constant, or function // declaration. Functions have an initial value. void EmitDeclaration(VariableProxy* proxy, - Variable::Mode mode, + VariableMode mode, FunctionLiteral* function, int* global_count); diff --git a/deps/v8/src/func-name-inferrer.h b/deps/v8/src/func-name-inferrer.h index bec3a5cf9..1a5726832 100644 --- a/deps/v8/src/func-name-inferrer.h +++ b/deps/v8/src/func-name-inferrer.h @@ -70,6 +70,12 @@ class FuncNameInferrer : public ZoneObject { } } + void RemoveLastFunction() { + if (IsOpen() && !funcs_to_infer_.is_empty()) { + funcs_to_infer_.RemoveLast(); + } + } + // Infers a function name and leaves names collection state. void Infer() { ASSERT(IsOpen()); diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index 6c6966aee..d0c78d6e2 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -255,6 +255,10 @@ const int kBinary32MinExponent = 0x01; const int kBinary32MantissaBits = 23; const int kBinary32ExponentShift = 23; +// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no +// other bits set. +const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51; + // ASCII/UC16 constants // Code-point values in Unicode 4.0 are 21 bits wide. typedef uint16_t uc16; diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index 35c363c10..57f5d1b66 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -190,7 +190,11 @@ static int ExpectedNofPropertiesFromEstimate(int estimate) { // Inobject slack tracking will reclaim redundant inobject space later, // so we can afford to adjust the estimate generously. - return estimate + 8; + if (FLAG_clever_optimizations) { + return estimate + 8; + } else { + return estimate + 3; + } } @@ -421,17 +425,18 @@ Handle<Object> PreventExtensions(Handle<JSObject> object) { } -Handle<Object> GetHiddenProperties(Handle<JSObject> obj, - JSObject::HiddenPropertiesFlag flag) { +Handle<Object> SetHiddenProperty(Handle<JSObject> obj, + Handle<String> key, + Handle<Object> value) { CALL_HEAP_FUNCTION(obj->GetIsolate(), - obj->GetHiddenProperties(flag), + obj->SetHiddenProperty(*key, *value), Object); } -int GetIdentityHash(Handle<JSObject> obj) { +int GetIdentityHash(Handle<JSReceiver> obj) { CALL_AND_RETRY(obj->GetIsolate(), - obj->GetIdentityHash(JSObject::ALLOW_CREATION), + obj->GetIdentityHash(ALLOW_CREATION), return Smi::cast(__object__)->value(), return 0); } @@ -886,7 +891,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table, - Handle<JSObject> key, + Handle<JSReceiver> key, Handle<Object> value) { CALL_HEAP_FUNCTION(table->GetIsolate(), table->Put(*key, *value), diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h index 7eaf4de92..d5521f89c 100644 --- a/deps/v8/src/handles.h +++ b/deps/v8/src/handles.h @@ -263,14 +263,13 @@ Handle<Object> GetPrototype(Handle<Object> obj); Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value); -// Return the object's hidden properties object. If the object has no hidden -// properties and HiddenPropertiesFlag::ALLOW_CREATION is passed, then a new -// hidden property object will be allocated. Otherwise Heap::undefined_value -// is returned. -Handle<Object> GetHiddenProperties(Handle<JSObject> obj, - JSObject::HiddenPropertiesFlag flag); +// Sets a hidden property on an object. Returns obj on success, undefined +// if trying to set the property on a detached proxy. +Handle<Object> SetHiddenProperty(Handle<JSObject> obj, + Handle<String> key, + Handle<Object> value); -int GetIdentityHash(Handle<JSObject> obj); +int GetIdentityHash(Handle<JSReceiver> obj); Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index); Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop); @@ -348,7 +347,7 @@ Handle<Object> SetPrototype(Handle<JSFunction> function, Handle<Object> PreventExtensions(Handle<JSObject> object); Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table, - Handle<JSObject> key, + Handle<JSReceiver> key, Handle<Object> value); // Does lazy compilation of the given function. Returns true on success and diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h index 7b666af5b..4bd893e8e 100644 --- a/deps/v8/src/heap-inl.h +++ b/deps/v8/src/heap-inl.h @@ -33,15 +33,26 @@ #include "list-inl.h" #include "objects.h" #include "v8-counters.h" +#include "store-buffer.h" +#include "store-buffer-inl.h" namespace v8 { namespace internal { void PromotionQueue::insert(HeapObject* target, int size) { + if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) { + NewSpacePage* rear_page = + NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_)); + ASSERT(!rear_page->prev_page()->is_anchor()); + rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit()); + } *(--rear_) = reinterpret_cast<intptr_t>(target); *(--rear_) = size; // Assert no overflow into live objects. - ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top()); +#ifdef DEBUG + SemiSpace::AssertValidRange(HEAP->new_space()->top(), + reinterpret_cast<Address>(rear_)); +#endif } @@ -84,7 +95,7 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str, // Allocate string. Object* result; { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace()) - ? lo_space_->AllocateRaw(size) + ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE) : old_data_space_->AllocateRaw(size); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -117,7 +128,7 @@ MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str, // Allocate string. Object* result; { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace()) - ? lo_space_->AllocateRaw(size) + ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE) : old_data_space_->AllocateRaw(size); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -181,7 +192,7 @@ MaybeObject* Heap::AllocateRaw(int size_in_bytes, } else if (CODE_SPACE == space) { result = code_space_->AllocateRaw(size_in_bytes); } else if (LO_SPACE == space) { - result = lo_space_->AllocateRaw(size_in_bytes); + result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE); } else if (CELL_SPACE == space) { result = cell_space_->AllocateRaw(size_in_bytes); } else { @@ -265,6 +276,11 @@ bool Heap::InNewSpace(Object* object) { } +bool Heap::InNewSpace(Address addr) { + return new_space_.Contains(addr); +} + + bool Heap::InFromSpace(Object* object) { return new_space_.FromSpaceContains(object); } @@ -275,29 +291,36 @@ bool Heap::InToSpace(Object* object) { } +bool Heap::OldGenerationAllocationLimitReached() { + if (!incremental_marking()->IsStopped()) return false; + return OldGenerationSpaceAvailable() < 0; +} + + bool Heap::ShouldBePromoted(Address old_address, int object_size) { // An object should be promoted if: // - the object has survived a scavenge operation or // - to space is already 25% full. - return old_address < new_space_.age_mark() - || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2); + NewSpacePage* page = NewSpacePage::FromAddress(old_address); + Address age_mark = new_space_.age_mark(); + bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) && + (!page->ContainsLimit(age_mark) || old_address < age_mark); + return below_mark || (new_space_.Size() + object_size) >= + (new_space_.EffectiveCapacity() >> 2); } void Heap::RecordWrite(Address address, int offset) { - if (new_space_.Contains(address)) return; - ASSERT(!new_space_.FromSpaceContains(address)); - SLOW_ASSERT(Contains(address + offset)); - Page::FromAddress(address)->MarkRegionDirty(address + offset); + if (!InNewSpace(address)) store_buffer_.Mark(address + offset); } void Heap::RecordWrites(Address address, int start, int len) { - if (new_space_.Contains(address)) return; - ASSERT(!new_space_.FromSpaceContains(address)); - Page* page = Page::FromAddress(address); - page->SetRegionMarks(page->GetRegionMarks() | - page->GetRegionMaskForSpan(address + start, len * kPointerSize)); + if (!InNewSpace(address)) { + for (int i = 0; i < len; i++) { + store_buffer_.Mark(address + start + i * kPointerSize); + } + } } @@ -343,31 +366,6 @@ void Heap::CopyBlock(Address dst, Address src, int byte_size) { } -void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst, - Address src, - int byte_size) { - ASSERT(IsAligned(byte_size, kPointerSize)); - - Page* page = Page::FromAddress(dst); - uint32_t marks = page->GetRegionMarks(); - - for (int remaining = byte_size / kPointerSize; - remaining > 0; - remaining--) { - Memory::Object_at(dst) = Memory::Object_at(src); - - if (InNewSpace(Memory::Object_at(dst))) { - marks |= page->GetRegionMaskForAddress(dst); - } - - dst += kPointerSize; - src += kPointerSize; - } - - page->SetRegionMarks(marks); -} - - void Heap::MoveBlock(Address dst, Address src, int byte_size) { ASSERT(IsAligned(byte_size, kPointerSize)); @@ -387,16 +385,6 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) { } -void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst, - Address src, - int byte_size) { - ASSERT(IsAligned(byte_size, kPointerSize)); - ASSERT((dst < src) || (dst >= (src + byte_size))); - - CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size); -} - - void Heap::ScavengePointer(HeapObject** p) { ScavengeObject(p, *p); } @@ -414,7 +402,9 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) { // If the first word is a forwarding address, the object has already been // copied. if (first_word.IsForwardingAddress()) { - *p = first_word.ToForwardingAddress(); + HeapObject* dest = first_word.ToForwardingAddress(); + ASSERT(HEAP->InFromSpace(*p)); + *p = dest; return; } @@ -459,7 +449,7 @@ int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) { amount_of_external_allocated_memory_ - amount_of_external_allocated_memory_at_last_global_gc_; if (amount_since_last_global_gc > external_allocation_limit_) { - CollectAllGarbage(false); + CollectAllGarbage(kNoGCFlags); } } else { // Avoid underflow. @@ -476,6 +466,7 @@ void Heap::SetLastScriptId(Object* last_script_id) { roots_[kLastScriptIdRootIndex] = last_script_id; } + Isolate* Heap::isolate() { return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) - reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4); @@ -688,15 +679,6 @@ Heap* _inline_get_heap_() { } -void MarkCompactCollector::SetMark(HeapObject* obj) { - tracer_->increment_marked_count(); -#ifdef DEBUG - UpdateLiveObjectCount(obj); -#endif - obj->SetMark(); -} - - } } // namespace v8::internal #endif // V8_HEAP_INL_H_ diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc index 7e613e917..46c63c27c 100644 --- a/deps/v8/src/heap-profiler.cc +++ b/deps/v8/src/heap-profiler.cc @@ -114,7 +114,6 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name, bool generation_completed = true; switch (s_type) { case HeapSnapshot::kFull: { - HEAP->CollectAllGarbage(true); HeapSnapshotGenerator generator(result, control); generation_completed = generator.GenerateSnapshot(); break; diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index d0185930b..c6efd6205 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -36,13 +36,16 @@ #include "deoptimizer.h" #include "global-handles.h" #include "heap-profiler.h" +#include "incremental-marking.h" #include "liveobjectlist-inl.h" #include "mark-compact.h" #include "natives.h" #include "objects-visiting.h" +#include "objects-visiting-inl.h" #include "runtime-profiler.h" #include "scopeinfo.h" #include "snapshot.h" +#include "store-buffer.h" #include "v8threads.h" #include "vm-state-inl.h" #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP @@ -58,10 +61,6 @@ namespace v8 { namespace internal { -static const intptr_t kMinimumPromotionLimit = 2 * MB; -static const intptr_t kMinimumAllocationLimit = 8 * MB; - - static Mutex* gc_initializer_mutex = OS::CreateMutex(); @@ -70,27 +69,21 @@ Heap::Heap() // semispace_size_ should be a power of 2 and old_generation_size_ should be // a multiple of Page::kPageSize. #if defined(ANDROID) - reserved_semispace_size_(2*MB), - max_semispace_size_(2*MB), - initial_semispace_size_(128*KB), - max_old_generation_size_(192*MB), - max_executable_size_(max_old_generation_size_), +#define LUMP_OF_MEMORY (128 * KB) code_range_size_(0), #elif defined(V8_TARGET_ARCH_X64) - reserved_semispace_size_(16*MB), - max_semispace_size_(16*MB), - initial_semispace_size_(1*MB), - max_old_generation_size_(1400*MB), - max_executable_size_(256*MB), +#define LUMP_OF_MEMORY (2 * MB) code_range_size_(512*MB), #else - reserved_semispace_size_(8*MB), - max_semispace_size_(8*MB), - initial_semispace_size_(512*KB), - max_old_generation_size_(700*MB), - max_executable_size_(128*MB), +#define LUMP_OF_MEMORY MB code_range_size_(0), #endif + reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)), + max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)), + initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)), + max_old_generation_size_(700ul * LUMP_OF_MEMORY), + max_executable_size_(128l * LUMP_OF_MEMORY), + // Variables set based on semispace_size_ and old_generation_size_ in // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_) // Will be 4 * reserved_semispace_size_ to ensure that young @@ -100,6 +93,7 @@ Heap::Heap() always_allocate_scope_depth_(0), linear_allocation_scope_depth_(0), contexts_disposed_(0), + scan_on_scavenge_pages_(0), new_space_(this), old_pointer_space_(NULL), old_data_space_(NULL), @@ -109,7 +103,6 @@ Heap::Heap() lo_space_(NULL), gc_state_(NOT_IN_GC), gc_post_processing_depth_(0), - mc_count_(0), ms_count_(0), gc_count_(0), unflattened_strings_length_(0), @@ -121,10 +114,13 @@ Heap::Heap() #endif // DEBUG old_gen_promotion_limit_(kMinimumPromotionLimit), old_gen_allocation_limit_(kMinimumAllocationLimit), + old_gen_limit_factor_(1), + size_of_old_gen_at_last_old_space_gc_(0), external_allocation_limit_(0), amount_of_external_allocated_memory_(0), amount_of_external_allocated_memory_at_last_global_gc_(0), old_gen_exhausted_(false), + store_buffer_rebuilder_(store_buffer()), hidden_symbol_(NULL), global_gc_prologue_callback_(NULL), global_gc_epilogue_callback_(NULL), @@ -141,12 +137,14 @@ Heap::Heap() min_in_mutator_(kMaxInt), alive_after_last_gc_(0), last_gc_end_timestamp_(0.0), - page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED), + store_buffer_(this), + marking_(this), + incremental_marking_(this), number_idle_notifications_(0), last_idle_notification_gc_count_(0), last_idle_notification_gc_count_init_(false), configured_(false), - is_safe_to_read_maps_(true) { + chunks_queued_for_free_(NULL) { // Allow build-time customization of the max semispace size. Building // V8 with snapshots and a non-default max semispace size is much // easier if you can define it as part of the build environment. @@ -224,29 +222,10 @@ bool Heap::HasBeenSetup() { int Heap::GcSafeSizeOfOldObject(HeapObject* object) { - ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects. - ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded()); - MapWord map_word = object->map_word(); - map_word.ClearMark(); - map_word.ClearOverflow(); - return object->SizeFromMap(map_word.ToMap()); -} - - -int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) { - ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects. - ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded()); - uint32_t marker = Memory::uint32_at(object->address()); - if (marker == MarkCompactCollector::kSingleFreeEncoding) { - return kIntSize; - } else if (marker == MarkCompactCollector::kMultiFreeEncoding) { - return Memory::int_at(object->address() + kIntSize); - } else { - MapWord map_word = object->map_word(); - Address map_address = map_word.DecodeMapAddress(HEAP->map_space()); - Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address)); - return object->SizeFromMap(map); + if (IntrusiveMarking::IsMarked(object)) { + return IntrusiveMarking::SizeOfMarkedObject(object); } + return object->SizeFromMap(object->map()); } @@ -400,6 +379,7 @@ void Heap::GarbageCollectionPrologue() { #endif // DEBUG LiveObjectList::GCPrologue(); + store_buffer()->GCPrologue(); } intptr_t Heap::SizeOfObjects() { @@ -412,6 +392,7 @@ intptr_t Heap::SizeOfObjects() { } void Heap::GarbageCollectionEpilogue() { + store_buffer()->GCEpilogue(); LiveObjectList::GCEpilogue(); #ifdef DEBUG allow_allocation(true); @@ -443,13 +424,13 @@ void Heap::GarbageCollectionEpilogue() { } -void Heap::CollectAllGarbage(bool force_compaction) { +void Heap::CollectAllGarbage(int flags) { // Since we are ignoring the return value, the exact choice of space does // not matter, so long as we do not specify NEW_SPACE, which would not // cause a full GC. - mark_compact_collector_.SetForceCompaction(force_compaction); + mark_compact_collector_.SetFlags(flags); CollectGarbage(OLD_POINTER_SPACE); - mark_compact_collector_.SetForceCompaction(false); + mark_compact_collector_.SetFlags(kNoGCFlags); } @@ -457,8 +438,6 @@ void Heap::CollectAllAvailableGarbage() { // Since we are ignoring the return value, the exact choice of space does // not matter, so long as we do not specify NEW_SPACE, which would not // cause a full GC. - mark_compact_collector()->SetForceCompaction(true); - // Major GC would invoke weak handle callbacks on weakly reachable // handles, but won't collect weakly reachable objects until next // major GC. Therefore if we collect aggressively and weak handle callback @@ -467,13 +446,14 @@ void Heap::CollectAllAvailableGarbage() { // Note: as weak callbacks can execute arbitrary code, we cannot // hope that eventually there will be no weak callbacks invocations. // Therefore stop recollecting after several attempts. + mark_compact_collector()->SetFlags(kMakeHeapIterableMask); const int kMaxNumberOfAttempts = 7; for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) { break; } } - mark_compact_collector()->SetForceCompaction(false); + mark_compact_collector()->SetFlags(kNoGCFlags); } @@ -490,6 +470,23 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) { allocation_timeout_ = Max(6, FLAG_gc_interval); #endif + if (collector == SCAVENGER && !incremental_marking()->IsStopped()) { + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Scavenge during marking.\n"); + } + } + + if (collector == MARK_COMPACTOR && + !mark_compact_collector()->PreciseSweepingRequired() && + !incremental_marking()->IsStopped() && + !incremental_marking()->should_hurry() && + FLAG_incremental_marking_steps) { + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Delaying MarkSweep.\n"); + } + collector = SCAVENGER; + } + bool next_gc_likely_to_collect_more = false; { GCTracer tracer(this); @@ -512,13 +509,24 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) { GarbageCollectionEpilogue(); } + ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped()); + if (incremental_marking()->IsStopped()) { + if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) { + incremental_marking()->Start(); + } + } + return next_gc_likely_to_collect_more; } void Heap::PerformScavenge() { GCTracer tracer(this); - PerformGarbageCollection(SCAVENGER, &tracer); + if (incremental_marking()->IsStopped()) { + PerformGarbageCollection(SCAVENGER, &tracer); + } else { + PerformGarbageCollection(MARK_COMPACTOR, &tracer); + } } @@ -610,13 +618,6 @@ void Heap::EnsureFromSpaceIsCommitted() { // Committing memory to from space failed. // Try shrinking and try again. - PagedSpaces spaces; - for (PagedSpace* space = spaces.next(); - space != NULL; - space = spaces.next()) { - space->RelinkPageListInChunkOrder(true); - } - Shrink(); if (new_space_.CommitFromSpaceIfNeeded()) return; @@ -647,7 +648,10 @@ void Heap::ClearJSFunctionResultCaches() { void Heap::ClearNormalizedMapCaches() { - if (isolate_->bootstrapper()->IsActive()) return; + if (isolate_->bootstrapper()->IsActive() && + !incremental_marking()->IsMarking()) { + return; + } Object* context = global_contexts_list_; while (!context->IsUndefined()) { @@ -657,24 +661,6 @@ void Heap::ClearNormalizedMapCaches() { } -#ifdef DEBUG - -enum PageWatermarkValidity { - ALL_VALID, - ALL_INVALID -}; - -static void VerifyPageWatermarkValidity(PagedSpace* space, - PageWatermarkValidity validity) { - PageIterator it(space, PageIterator::PAGES_IN_USE); - bool expected_value = (validity == ALL_VALID); - while (it.has_next()) { - Page* page = it.next(); - ASSERT(page->IsWatermarkValid() == expected_value); - } -} -#endif - void Heap::UpdateSurvivalRateTrend(int start_new_space_size) { double survival_rate = (static_cast<double>(young_survivors_after_last_gc_) * 100) / @@ -727,6 +713,13 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, int start_new_space_size = Heap::new_space()->SizeAsInt(); + if (IsHighSurvivalRate()) { + // We speed up the incremental marker if it is running so that it + // does not fall behind the rate of promotion, which would cause a + // constantly growing old space. + incremental_marking()->NotifyOfHighPromotionRate(); + } + if (collector == MARK_COMPACTOR) { // Perform mark-sweep with optional compaction. MarkCompact(tracer); @@ -736,11 +729,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, UpdateSurvivalRateTrend(start_new_space_size); - intptr_t old_gen_size = PromotedSpaceSize(); - old_gen_promotion_limit_ = - old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); - old_gen_allocation_limit_ = - old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); + size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize(); if (high_survival_rate_during_scavenges && IsStableOrIncreasingSurvivalTrend()) { @@ -750,10 +739,16 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, // In this case we aggressively raise old generation memory limits to // postpone subsequent mark-sweep collection and thus trade memory // space for the mutation speed. - old_gen_promotion_limit_ *= 2; - old_gen_allocation_limit_ *= 2; + old_gen_limit_factor_ = 2; + } else { + old_gen_limit_factor_ = 1; } + old_gen_promotion_limit_ = + OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_); + old_gen_allocation_limit_ = + OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_); + old_gen_exhausted_ = false; } else { tracer_ = tracer; @@ -782,9 +777,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, amount_of_external_allocated_memory_; } - GCCallbackFlags callback_flags = tracer->is_compacting() - ? kGCCallbackFlagCompacted - : kNoGCCallbackFlags; + GCCallbackFlags callback_flags = kNoGCCallbackFlags; for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { if (gc_type & gc_epilogue_callbacks_[i].gc_type) { gc_epilogue_callbacks_[i].callback(gc_type, callback_flags); @@ -808,34 +801,24 @@ void Heap::MarkCompact(GCTracer* tracer) { mark_compact_collector_.Prepare(tracer); - bool is_compacting = mark_compact_collector_.IsCompacting(); + ms_count_++; + tracer->set_full_gc_count(ms_count_); - if (is_compacting) { - mc_count_++; - } else { - ms_count_++; - } - tracer->set_full_gc_count(mc_count_ + ms_count_); + MarkCompactPrologue(); - MarkCompactPrologue(is_compacting); - - is_safe_to_read_maps_ = false; mark_compact_collector_.CollectGarbage(); - is_safe_to_read_maps_ = true; LOG(isolate_, ResourceEvent("markcompact", "end")); gc_state_ = NOT_IN_GC; - Shrink(); - isolate_->counters()->objs_since_last_full()->Set(0); contexts_disposed_ = 0; } -void Heap::MarkCompactPrologue(bool is_compacting) { +void Heap::MarkCompactPrologue() { // At any old GC clear the keyed lookup cache to enable collection of unused // maps. isolate_->keyed_lookup_cache()->Clear(); @@ -847,7 +830,8 @@ void Heap::MarkCompactPrologue(bool is_compacting) { CompletelyClearInstanceofCache(); - if (is_compacting) FlushNumberStringCache(); + // TODO(1605) select heuristic for flushing NumberString cache with + // FlushNumberStringCache if (FLAG_cleanup_code_caches_at_gc) { polymorphic_code_cache()->set_cache(undefined_value()); } @@ -857,13 +841,8 @@ void Heap::MarkCompactPrologue(bool is_compacting) { Object* Heap::FindCodeObject(Address a) { - Object* obj = NULL; // Initialization to please compiler. - { MaybeObject* maybe_obj = code_space_->FindObject(a); - if (!maybe_obj->ToObject(&obj)) { - obj = lo_space_->FindObject(a)->ToObjectUnchecked(); - } - } - return obj; + return isolate()->inner_pointer_to_code_cache()-> + GcSafeFindCodeForInnerPointer(a); } @@ -911,14 +890,18 @@ static void VerifyNonPointerSpacePointers() { // do not expect them. VerifyNonPointerSpacePointersVisitor v; HeapObjectIterator code_it(HEAP->code_space()); - for (HeapObject* object = code_it.next(); - object != NULL; object = code_it.next()) + for (HeapObject* object = code_it.Next(); + object != NULL; object = code_it.Next()) object->Iterate(&v); - HeapObjectIterator data_it(HEAP->old_data_space()); - for (HeapObject* object = data_it.next(); - object != NULL; object = data_it.next()) - object->Iterate(&v); + // The old data space was normally swept conservatively so that the iterator + // doesn't work, so we normally skip the next bit. + if (!HEAP->old_data_space()->was_swept_conservatively()) { + HeapObjectIterator data_it(HEAP->old_data_space()); + for (HeapObject* object = data_it.Next(); + object != NULL; object = data_it.Next()) + object->Iterate(&v); + } } #endif @@ -940,6 +923,64 @@ static bool IsUnscavengedHeapObject(Heap* heap, Object** p) { } +void Heap::ScavengeStoreBufferCallback( + Heap* heap, + MemoryChunk* page, + StoreBufferEvent event) { + heap->store_buffer_rebuilder_.Callback(page, event); +} + + +void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) { + if (event == kStoreBufferStartScanningPagesEvent) { + start_of_current_page_ = NULL; + current_page_ = NULL; + } else if (event == kStoreBufferScanningPageEvent) { + if (current_page_ != NULL) { + // If this page already overflowed the store buffer during this iteration. + if (current_page_->scan_on_scavenge()) { + // Then we should wipe out the entries that have been added for it. + store_buffer_->SetTop(start_of_current_page_); + } else if (store_buffer_->Top() - start_of_current_page_ >= + (store_buffer_->Limit() - store_buffer_->Top()) >> 2) { + // Did we find too many pointers in the previous page? The heuristic is + // that no page can take more then 1/5 the remaining slots in the store + // buffer. + current_page_->set_scan_on_scavenge(true); + store_buffer_->SetTop(start_of_current_page_); + } else { + // In this case the page we scanned took a reasonable number of slots in + // the store buffer. It has now been rehabilitated and is no longer + // marked scan_on_scavenge. + ASSERT(!current_page_->scan_on_scavenge()); + } + } + start_of_current_page_ = store_buffer_->Top(); + current_page_ = page; + } else if (event == kStoreBufferFullEvent) { + // The current page overflowed the store buffer again. Wipe out its entries + // in the store buffer and mark it scan-on-scavenge again. This may happen + // several times while scanning. + if (current_page_ == NULL) { + // Store Buffer overflowed while scanning promoted objects. These are not + // in any particular page, though they are likely to be clustered by the + // allocation routines. + store_buffer_->HandleFullness(); + } else { + // Store Buffer overflowed while scanning a particular old space page for + // pointers to new space. + ASSERT(current_page_ == page); + ASSERT(page != NULL); + current_page_->set_scan_on_scavenge(true); + ASSERT(start_of_current_page_ != store_buffer_->Top()); + store_buffer_->SetTop(start_of_current_page_); + } + } else { + UNREACHABLE(); + } +} + + void Heap::Scavenge() { #ifdef DEBUG if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); @@ -947,22 +988,6 @@ void Heap::Scavenge() { gc_state_ = SCAVENGE; - SwitchScavengingVisitorsTableIfProfilingWasEnabled(); - - Page::FlipMeaningOfInvalidatedWatermarkFlag(this); -#ifdef DEBUG - VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID); - VerifyPageWatermarkValidity(map_space_, ALL_VALID); -#endif - - // We do not update an allocation watermark of the top page during linear - // allocation to avoid overhead. So to maintain the watermark invariant - // we have to manually cache the watermark and mark the top page as having an - // invalid watermark. This guarantees that dirty regions iteration will use a - // correct watermark even if a linear allocation happens. - old_pointer_space_->FlushTopPageWatermark(); - map_space_->FlushTopPageWatermark(); - // Implements Cheney's copying algorithm LOG(isolate_, ResourceEvent("scavenge", "begin")); @@ -974,6 +999,13 @@ void Heap::Scavenge() { CheckNewSpaceExpansionCriteria(); + SelectScavengingVisitorsTable(); + + incremental_marking()->PrepareForScavenge(); + + old_pointer_space()->AdvanceSweeper(new_space_.Size()); + old_data_space()->AdvanceSweeper(new_space_.Size()); + // Flip the semispaces. After flipping, to space is empty, from space has // live objects. new_space_.Flip(); @@ -996,32 +1028,29 @@ void Heap::Scavenge() { // for the addresses of promoted objects: every object promoted // frees up its size in bytes from the top of the new space, and // objects are at least one pointer in size. - Address new_space_front = new_space_.ToSpaceLow(); - promotion_queue_.Initialize(new_space_.ToSpaceHigh()); + Address new_space_front = new_space_.ToSpaceStart(); + promotion_queue_.Initialize(new_space_.ToSpaceEnd()); + +#ifdef DEBUG + store_buffer()->Clean(); +#endif - is_safe_to_read_maps_ = false; ScavengeVisitor scavenge_visitor(this); // Copy roots. IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); - // Copy objects reachable from the old generation. By definition, - // there are no intergenerational pointers in code or data spaces. - IterateDirtyRegions(old_pointer_space_, - &Heap::IteratePointersInDirtyRegion, - &ScavengePointer, - WATERMARK_CAN_BE_INVALID); - - IterateDirtyRegions(map_space_, - &IteratePointersInDirtyMapsRegion, - &ScavengePointer, - WATERMARK_CAN_BE_INVALID); - - lo_space_->IterateDirtyRegions(&ScavengePointer); + // Copy objects reachable from the old generation. + { + StoreBufferRebuildScope scope(this, + store_buffer(), + &ScavengeStoreBufferCallback); + store_buffer()->IteratePointersToNewSpace(&ScavengeObject); + } // Copy objects reachable from cells by scavenging cell values directly. HeapObjectIterator cell_iterator(cell_space_); - for (HeapObject* cell = cell_iterator.next(); - cell != NULL; cell = cell_iterator.next()) { + for (HeapObject* cell = cell_iterator.Next(); + cell != NULL; cell = cell_iterator.Next()) { if (cell->IsJSGlobalPropertyCell()) { Address value_address = reinterpret_cast<Address>(cell) + @@ -1046,14 +1075,16 @@ void Heap::Scavenge() { LiveObjectList::UpdateReferencesForScavengeGC(); isolate()->runtime_profiler()->UpdateSamplesAfterScavenge(); + incremental_marking()->UpdateMarkingDequeAfterScavenge(); ASSERT(new_space_front == new_space_.top()); - is_safe_to_read_maps_ = true; - // Set age mark. new_space_.set_age_mark(new_space_.top()); + new_space_.LowerInlineAllocationLimit( + new_space_.inline_allocation_limit_step()); + // Update how much has survived scavenge. IncrementYoungSurvivorsCounter(static_cast<int>( (PromotedSpaceSize() - survived_watermark) + new_space_.Size())); @@ -1112,35 +1143,56 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable( } +void Heap::UpdateReferencesInExternalStringTable( + ExternalStringTableUpdaterCallback updater_func) { + + // Update old space string references. + if (external_string_table_.old_space_strings_.length() > 0) { + Object** start = &external_string_table_.old_space_strings_[0]; + Object** end = start + external_string_table_.old_space_strings_.length(); + for (Object** p = start; p < end; ++p) *p = updater_func(this, p); + } + + UpdateNewSpaceReferencesInExternalStringTable(updater_func); +} + + static Object* ProcessFunctionWeakReferences(Heap* heap, Object* function, WeakObjectRetainer* retainer) { - Object* head = heap->undefined_value(); + Object* undefined = heap->undefined_value(); + Object* head = undefined; JSFunction* tail = NULL; Object* candidate = function; - while (candidate != heap->undefined_value()) { + while (candidate != undefined) { // Check whether to keep the candidate in the list. JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate); Object* retain = retainer->RetainAs(candidate); if (retain != NULL) { - if (head == heap->undefined_value()) { + if (head == undefined) { // First element in the list. - head = candidate_function; + head = retain; } else { // Subsequent elements in the list. ASSERT(tail != NULL); - tail->set_next_function_link(candidate_function); + tail->set_next_function_link(retain); } // Retained function is new tail. + candidate_function = reinterpret_cast<JSFunction*>(retain); tail = candidate_function; + + ASSERT(retain->IsUndefined() || retain->IsJSFunction()); + + if (retain == undefined) break; } + // Move to next element in the list. candidate = candidate_function->next_function_link(); } // Terminate the list if there is one or more elements. if (tail != NULL) { - tail->set_next_function_link(heap->undefined_value()); + tail->set_next_function_link(undefined); } return head; @@ -1148,28 +1200,32 @@ static Object* ProcessFunctionWeakReferences(Heap* heap, void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) { - Object* head = undefined_value(); + Object* undefined = undefined_value(); + Object* head = undefined; Context* tail = NULL; Object* candidate = global_contexts_list_; - while (candidate != undefined_value()) { + while (candidate != undefined) { // Check whether to keep the candidate in the list. Context* candidate_context = reinterpret_cast<Context*>(candidate); Object* retain = retainer->RetainAs(candidate); if (retain != NULL) { - if (head == undefined_value()) { + if (head == undefined) { // First element in the list. - head = candidate_context; + head = retain; } else { // Subsequent elements in the list. ASSERT(tail != NULL); tail->set_unchecked(this, Context::NEXT_CONTEXT_LINK, - candidate_context, + retain, UPDATE_WRITE_BARRIER); } // Retained context is new tail. + candidate_context = reinterpret_cast<Context*>(retain); tail = candidate_context; + if (retain == undefined) break; + // Process the weak list of optimized functions for the context. Object* function_list_head = ProcessFunctionWeakReferences( @@ -1181,6 +1237,7 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) { function_list_head, UPDATE_WRITE_BARRIER); } + // Move to next element in the list. candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK); } @@ -1212,35 +1269,45 @@ class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> { Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front) { do { - ASSERT(new_space_front <= new_space_.top()); - + SemiSpace::AssertValidRange(new_space_front, new_space_.top()); // The addresses new_space_front and new_space_.top() define a // queue of unprocessed copied objects. Process them until the // queue is empty. - while (new_space_front < new_space_.top()) { - HeapObject* object = HeapObject::FromAddress(new_space_front); - new_space_front += NewSpaceScavenger::IterateBody(object->map(), object); + while (new_space_front != new_space_.top()) { + if (!NewSpacePage::IsAtEnd(new_space_front)) { + HeapObject* object = HeapObject::FromAddress(new_space_front); + new_space_front += + NewSpaceScavenger::IterateBody(object->map(), object); + } else { + new_space_front = + NewSpacePage::FromLimit(new_space_front)->next_page()->body(); + } } // Promote and process all the to-be-promoted objects. - while (!promotion_queue_.is_empty()) { - HeapObject* target; - int size; - promotion_queue_.remove(&target, &size); - - // Promoted object might be already partially visited - // during dirty regions iteration. Thus we search specificly - // for pointers to from semispace instead of looking for pointers - // to new space. - ASSERT(!target->IsMap()); - IterateAndMarkPointersToFromSpace(target->address(), - target->address() + size, - &ScavengePointer); + { + StoreBufferRebuildScope scope(this, + store_buffer(), + &ScavengeStoreBufferCallback); + while (!promotion_queue()->is_empty()) { + HeapObject* target; + int size; + promotion_queue()->remove(&target, &size); + + // Promoted object might be already partially visited + // during old space pointer iteration. Thus we search specificly + // for pointers to from semispace instead of looking for pointers + // to new space. + ASSERT(!target->IsMap()); + IterateAndMarkPointersToFromSpace(target->address(), + target->address() + size, + &ScavengeObject); + } } // Take another spin if there are now unswept objects in new space // (there are currently no more unswept promoted objects). - } while (new_space_front < new_space_.top()); + } while (new_space_front != new_space_.top()); return new_space_front; } @@ -1252,26 +1319,11 @@ enum LoggingAndProfiling { }; -typedef void (*ScavengingCallback)(Map* map, - HeapObject** slot, - HeapObject* object); - - -static Atomic32 scavenging_visitors_table_mode_; -static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; - - -INLINE(static void DoScavengeObject(Map* map, - HeapObject** slot, - HeapObject* obj)); +enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS }; -void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) { - scavenging_visitors_table_.GetVisitor(map)(map, slot, obj); -} - - -template<LoggingAndProfiling logging_and_profiling_mode> +template<MarksHandling marks_handling, + LoggingAndProfiling logging_and_profiling_mode> class ScavengingVisitor : public StaticVisitorBase { public: static void Initialize() { @@ -1306,9 +1358,13 @@ class ScavengingVisitor : public StaticVisitorBase { &ObjectEvacuationStrategy<POINTER_OBJECT>:: Visit); - table_.Register(kVisitJSFunction, - &ObjectEvacuationStrategy<POINTER_OBJECT>:: - template VisitSpecialized<JSFunction::kSize>); + if (marks_handling == IGNORE_MARKS) { + table_.Register(kVisitJSFunction, + &ObjectEvacuationStrategy<POINTER_OBJECT>:: + template VisitSpecialized<JSFunction::kSize>); + } else { + table_.Register(kVisitJSFunction, &EvacuateJSFunction); + } table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>, kVisitDataObject, @@ -1373,10 +1429,15 @@ class ScavengingVisitor : public StaticVisitorBase { } } + if (marks_handling == TRANSFER_MARKS) { + if (Marking::TransferColor(source, target)) { + MemoryChunk::IncrementLiveBytes(target->address(), size); + } + } + return target; } - template<ObjectContents object_contents, SizeRestriction size_restriction> static inline void EvacuateObject(Map* map, HeapObject** slot, @@ -1386,13 +1447,14 @@ class ScavengingVisitor : public StaticVisitorBase { (object_size <= Page::kMaxHeapObjectSize)); ASSERT(object->Size() == object_size); - Heap* heap = map->heap(); + Heap* heap = map->GetHeap(); if (heap->ShouldBePromoted(object->address(), object_size)) { MaybeObject* maybe_result; if ((size_restriction != SMALL) && (object_size > Page::kMaxHeapObjectSize)) { - maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size); + maybe_result = heap->lo_space()->AllocateRaw(object_size, + NOT_EXECUTABLE); } else { if (object_contents == DATA_OBJECT) { maybe_result = heap->old_data_space()->AllocateRaw(object_size); @@ -1414,13 +1476,36 @@ class ScavengingVisitor : public StaticVisitorBase { return; } } - Object* result = - heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked(); + MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size); + Object* result = allocation->ToObjectUnchecked(); + *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size); return; } + static inline void EvacuateJSFunction(Map* map, + HeapObject** slot, + HeapObject* object) { + ObjectEvacuationStrategy<POINTER_OBJECT>:: + template VisitSpecialized<JSFunction::kSize>(map, slot, object); + + HeapObject* target = *slot; + MarkBit mark_bit = Marking::MarkBitFrom(target); + if (Marking::IsBlack(mark_bit)) { + // This object is black and it might not be rescanned by marker. + // We should explicitly record code entry slot for compaction because + // promotion queue processing (IterateAndMarkPointersToFromSpace) will + // miss it as it is not HeapObject-tagged. + Address code_entry_slot = + target->address() + JSFunction::kCodeEntryOffset; + Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot)); + map->GetHeap()->mark_compact_collector()-> + RecordCodeEntrySlot(code_entry_slot, code); + } + } + + static inline void EvacuateFixedArray(Map* map, HeapObject** slot, HeapObject* object) { @@ -1479,14 +1564,17 @@ class ScavengingVisitor : public StaticVisitorBase { HeapObject* object) { ASSERT(IsShortcutCandidate(map->instance_type())); - if (ConsString::cast(object)->unchecked_second() == - map->heap()->empty_string()) { + Heap* heap = map->GetHeap(); + + if (marks_handling == IGNORE_MARKS && + ConsString::cast(object)->unchecked_second() == + heap->empty_string()) { HeapObject* first = HeapObject::cast(ConsString::cast(object)->unchecked_first()); *slot = first; - if (!map->heap()->InNewSpace(first)) { + if (!heap->InNewSpace(first)) { object->set_map_word(MapWord::FromForwardingAddress(first)); return; } @@ -1500,7 +1588,7 @@ class ScavengingVisitor : public StaticVisitorBase { return; } - DoScavengeObject(first->map(), slot, first); + heap->DoScavengeObject(first->map(), slot, first); object->set_map_word(MapWord::FromForwardingAddress(*slot)); return; } @@ -1531,45 +1619,60 @@ class ScavengingVisitor : public StaticVisitorBase { }; -template<LoggingAndProfiling logging_and_profiling_mode> +template<MarksHandling marks_handling, + LoggingAndProfiling logging_and_profiling_mode> VisitorDispatchTable<ScavengingCallback> - ScavengingVisitor<logging_and_profiling_mode>::table_; + ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_; static void InitializeScavengingVisitorsTables() { - ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize(); - ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize(); - scavenging_visitors_table_.CopyFrom( - ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable()); - scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED; + ScavengingVisitor<TRANSFER_MARKS, + LOGGING_AND_PROFILING_DISABLED>::Initialize(); + ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize(); + ScavengingVisitor<TRANSFER_MARKS, + LOGGING_AND_PROFILING_ENABLED>::Initialize(); + ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize(); } -void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() { - if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) { - // Table was already updated by some isolate. - return; - } - - if (isolate()->logger()->is_logging() | +void Heap::SelectScavengingVisitorsTable() { + bool logging_and_profiling = + isolate()->logger()->is_logging() || CpuProfiler::is_profiling(isolate()) || (isolate()->heap_profiler() != NULL && - isolate()->heap_profiler()->is_profiling())) { - // If one of the isolates is doing scavenge at this moment of time - // it might see this table in an inconsitent state when - // some of the callbacks point to - // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others - // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>. - // However this does not lead to any bugs as such isolate does not have - // profiling enabled and any isolate with enabled profiling is guaranteed - // to see the table in the consistent state. - scavenging_visitors_table_.CopyFrom( - ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable()); + isolate()->heap_profiler()->is_profiling()); + + if (!incremental_marking()->IsMarking()) { + if (!logging_and_profiling) { + scavenging_visitors_table_.CopyFrom( + ScavengingVisitor<IGNORE_MARKS, + LOGGING_AND_PROFILING_DISABLED>::GetTable()); + } else { + scavenging_visitors_table_.CopyFrom( + ScavengingVisitor<IGNORE_MARKS, + LOGGING_AND_PROFILING_ENABLED>::GetTable()); + } + } else { + if (!logging_and_profiling) { + scavenging_visitors_table_.CopyFrom( + ScavengingVisitor<TRANSFER_MARKS, + LOGGING_AND_PROFILING_DISABLED>::GetTable()); + } else { + scavenging_visitors_table_.CopyFrom( + ScavengingVisitor<TRANSFER_MARKS, + LOGGING_AND_PROFILING_ENABLED>::GetTable()); + } - // We use Release_Store to prevent reordering of this write before writes - // to the table. - Release_Store(&scavenging_visitors_table_mode_, - LOGGING_AND_PROFILING_ENABLED); + if (incremental_marking()->IsCompacting()) { + // When compacting forbid short-circuiting of cons-strings. + // Scavenging code relies on the fact that new space object + // can't be evacuated into evacuation candidate but + // short-circuiting violates this assumption. + scavenging_visitors_table_.Register( + StaticVisitorBase::kVisitShortcutCandidate, + scavenging_visitors_table_.GetVisitorById( + StaticVisitorBase::kVisitConsString)); + } } } @@ -1579,7 +1682,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { MapWord first_word = object->map_word(); ASSERT(!first_word.IsForwardingAddress()); Map* map = first_word.ToMap(); - DoScavengeObject(map, p, object); + map->GetHeap()->DoScavengeObject(map, p, object); } @@ -1605,7 +1708,9 @@ MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type, } -MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) { +MaybeObject* Heap::AllocateMap(InstanceType instance_type, + int instance_size, + ElementsKind elements_kind) { Object* result; { MaybeObject* maybe_result = AllocateRawMap(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -1627,7 +1732,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) { map->set_unused_property_fields(0); map->set_bit_field(0); map->set_bit_field2(1 << Map::kIsExtensible); - map->set_elements_kind(FAST_ELEMENTS); + map->set_elements_kind(elements_kind); // If the map object is aligned fill the padding area with Smi 0 objects. if (Map::kPadStart < Map::kSize) { @@ -1707,12 +1812,19 @@ bool Heap::CreateInitialMaps() { } set_empty_fixed_array(FixedArray::cast(obj)); - { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE); + { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE); if (!maybe_obj->ToObject(&obj)) return false; } - set_null_value(obj); + set_null_value(Oddball::cast(obj)); Oddball::cast(obj)->set_kind(Oddball::kNull); + { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE); + if (!maybe_obj->ToObject(&obj)) return false; + } + set_undefined_value(Oddball::cast(obj)); + Oddball::cast(obj)->set_kind(Oddball::kUndefined); + ASSERT(!InNewSpace(undefined_value())); + // Allocate the empty descriptor array. { MaybeObject* maybe_obj = AllocateEmptyFixedArray(); if (!maybe_obj->ToObject(&obj)) return false; @@ -1798,6 +1910,12 @@ bool Heap::CreateInitialMaps() { } set_byte_array_map(Map::cast(obj)); + { MaybeObject* maybe_obj = + AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel); + if (!maybe_obj->ToObject(&obj)) return false; + } + set_free_space_map(Map::cast(obj)); + { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED); if (!maybe_obj->ToObject(&obj)) return false; } @@ -1998,7 +2116,7 @@ MaybeObject* Heap::CreateOddball(const char* to_string, Object* to_number, byte kind) { Object* result; - { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE); + { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE); if (!maybe_result->ToObject(&result)) return maybe_result; } return Oddball::cast(result)->Initialize(to_string, to_number, kind); @@ -2011,7 +2129,13 @@ bool Heap::CreateApiObjects() { { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); if (!maybe_obj->ToObject(&obj)) return false; } - set_neander_map(Map::cast(obj)); + // Don't use Smi-only elements optimizations for objects with the neander + // map. There are too many cases where element values are set directly with a + // bottleneck to trap the Smi-only -> fast elements transition, and there + // appears to be no benefit for optimize this case. + Map* new_neander_map = Map::cast(obj); + new_neander_map->set_elements_kind(FAST_ELEMENTS); + set_neander_map(new_neander_map); { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map()); if (!maybe_obj->ToObject(&obj)) return false; @@ -2056,6 +2180,12 @@ void Heap::CreateFixedStubs() { // To workaround the problem, make separate functions without inlining. Heap::CreateJSEntryStub(); Heap::CreateJSConstructEntryStub(); + + // Create stubs that should be there, so we don't unexpectedly have to + // create them if we need them during the creation of another stub. + // Stub creation mixes raw pointers and handles in an unsafe manner so + // we cannot create stubs while we are creating stubs. + CodeStub::GenerateStubsAheadOfTime(); } @@ -2066,20 +2196,18 @@ bool Heap::CreateInitialObjects() { { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED); if (!maybe_obj->ToObject(&obj)) return false; } - set_minus_zero_value(obj); + set_minus_zero_value(HeapNumber::cast(obj)); ASSERT(signbit(minus_zero_value()->Number()) != 0); { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED); if (!maybe_obj->ToObject(&obj)) return false; } - set_nan_value(obj); + set_nan_value(HeapNumber::cast(obj)); - { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE); + { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED); if (!maybe_obj->ToObject(&obj)) return false; } - set_undefined_value(obj); - Oddball::cast(obj)->set_kind(Oddball::kUndefined); - ASSERT(!InNewSpace(undefined_value())); + set_infinity_value(HeapNumber::cast(obj)); // Allocate initial symbol table. { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize); @@ -2088,19 +2216,17 @@ bool Heap::CreateInitialObjects() { // Don't use set_symbol_table() due to asserts. roots_[kSymbolTableRootIndex] = obj; - // Assign the print strings for oddballs after creating symboltable. - Object* symbol; - { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined"); - if (!maybe_symbol->ToObject(&symbol)) return false; + // Finish initializing oddballs after creating symboltable. + { MaybeObject* maybe_obj = + undefined_value()->Initialize("undefined", + nan_value(), + Oddball::kUndefined); + if (!maybe_obj->ToObject(&obj)) return false; } - Oddball::cast(undefined_value())->set_to_string(String::cast(symbol)); - Oddball::cast(undefined_value())->set_to_number(nan_value()); - // Allocate the null_value + // Initialize the null_value. { MaybeObject* maybe_obj = - Oddball::cast(null_value())->Initialize("null", - Smi::FromInt(0), - Oddball::kNull); + null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull); if (!maybe_obj->ToObject(&obj)) return false; } @@ -2109,43 +2235,51 @@ bool Heap::CreateInitialObjects() { Oddball::kTrue); if (!maybe_obj->ToObject(&obj)) return false; } - set_true_value(obj); + set_true_value(Oddball::cast(obj)); { MaybeObject* maybe_obj = CreateOddball("false", Smi::FromInt(0), Oddball::kFalse); if (!maybe_obj->ToObject(&obj)) return false; } - set_false_value(obj); + set_false_value(Oddball::cast(obj)); { MaybeObject* maybe_obj = CreateOddball("hole", Smi::FromInt(-1), Oddball::kTheHole); if (!maybe_obj->ToObject(&obj)) return false; } - set_the_hole_value(obj); + set_the_hole_value(Oddball::cast(obj)); { MaybeObject* maybe_obj = CreateOddball("arguments_marker", - Smi::FromInt(-4), + Smi::FromInt(-2), Oddball::kArgumentMarker); if (!maybe_obj->ToObject(&obj)) return false; } - set_arguments_marker(obj); + set_arguments_marker(Oddball::cast(obj)); { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel", - Smi::FromInt(-2), + Smi::FromInt(-3), Oddball::kOther); if (!maybe_obj->ToObject(&obj)) return false; } set_no_interceptor_result_sentinel(obj); { MaybeObject* maybe_obj = CreateOddball("termination_exception", - Smi::FromInt(-3), + Smi::FromInt(-4), Oddball::kOther); if (!maybe_obj->ToObject(&obj)) return false; } set_termination_exception(obj); + { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker", + Smi::FromInt(-5), + Oddball::kOther); + if (!maybe_obj->ToObject(&obj)) return false; + } + set_frame_alignment_marker(Oddball::cast(obj)); + STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5); + // Allocate the empty string. { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED); if (!maybe_obj->ToObject(&obj)) return false; @@ -2422,6 +2556,15 @@ MaybeObject* Heap::NumberToString(Object* number, } +MaybeObject* Heap::Uint32ToString(uint32_t value, + bool check_number_string_cache) { + Object* number; + MaybeObject* maybe = NumberFromUint32(value); + if (!maybe->To<Object>(&number)) return maybe; + return NumberToString(number, check_number_string_cache); +} + + Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) { return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]); } @@ -2737,25 +2880,23 @@ MaybeObject* Heap::AllocateSubString(String* buffer, // Make an attempt to flatten the buffer to reduce access time. buffer = buffer->TryFlattenGetString(); - // TODO(1626): For now slicing external strings is not supported. However, - // a flat cons string can have an external string as first part in some cases. - // Therefore we have to single out this case as well. if (!FLAG_string_slices || - (buffer->IsConsString() && - (!buffer->IsFlat() || - !ConsString::cast(buffer)->first()->IsSeqString())) || - buffer->IsExternalString() || + !buffer->IsFlat() || length < SlicedString::kMinLength || pretenure == TENURED) { Object* result; - { MaybeObject* maybe_result = buffer->IsAsciiRepresentation() - ? AllocateRawAsciiString(length, pretenure) - : AllocateRawTwoByteString(length, pretenure); + // WriteToFlat takes care of the case when an indirect string has a + // different encoding from its underlying string. These encodings may + // differ because of externalization. + bool is_ascii = buffer->IsAsciiRepresentation(); + { MaybeObject* maybe_result = is_ascii + ? AllocateRawAsciiString(length, pretenure) + : AllocateRawTwoByteString(length, pretenure); if (!maybe_result->ToObject(&result)) return maybe_result; } String* string_result = String::cast(result); // Copy the characters into the new object. - if (buffer->IsAsciiRepresentation()) { + if (is_ascii) { ASSERT(string_result->IsAsciiRepresentation()); char* dest = SeqAsciiString::cast(string_result)->GetChars(); String::WriteToFlat(buffer, dest, start, end); @@ -2768,12 +2909,17 @@ MaybeObject* Heap::AllocateSubString(String* buffer, } ASSERT(buffer->IsFlat()); - ASSERT(!buffer->IsExternalString()); #if DEBUG buffer->StringVerify(); #endif Object* result; + // When slicing an indirect string we use its encoding for a newly created + // slice and don't check the encoding of the underlying string. This is safe + // even if the encodings are different because of externalization. If an + // indirect ASCII string is pointing to a two-byte string, the two-byte char + // codes of the underlying string must still fit into ASCII (because + // externalization must not change char codes). { Map* map = buffer->IsAsciiRepresentation() ? sliced_ascii_string_map() : sliced_string_map(); @@ -2799,13 +2945,14 @@ MaybeObject* Heap::AllocateSubString(String* buffer, sliced_string->set_parent(buffer); sliced_string->set_offset(start); } - ASSERT(sliced_string->parent()->IsSeqString()); + ASSERT(sliced_string->parent()->IsSeqString() || + sliced_string->parent()->IsExternalString()); return result; } MaybeObject* Heap::AllocateExternalStringFromAscii( - ExternalAsciiString::Resource* resource) { + const ExternalAsciiString::Resource* resource) { size_t length = resource->length(); if (length > static_cast<size_t>(String::kMaxLength)) { isolate()->context()->mark_out_of_memory(); @@ -2828,7 +2975,7 @@ MaybeObject* Heap::AllocateExternalStringFromAscii( MaybeObject* Heap::AllocateExternalStringFromTwoByte( - ExternalTwoByteString::Resource* resource) { + const ExternalTwoByteString::Resource* resource) { size_t length = resource->length(); if (length > static_cast<size_t>(String::kMaxLength)) { isolate()->context()->mark_out_of_memory(); @@ -2892,7 +3039,7 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) { Object* result; { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace()) ? old_data_space_->AllocateRaw(size) - : lo_space_->AllocateRaw(size); + : lo_space_->AllocateRaw(size, NOT_EXECUTABLE); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -2928,8 +3075,8 @@ void Heap::CreateFillerObjectAt(Address addr, int size) { } else if (size == 2 * kPointerSize) { filler->set_map(two_pointer_filler_map()); } else { - filler->set_map(byte_array_map()); - ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size)); + filler->set_map(free_space_map()); + FreeSpace::cast(filler)->set_size(size); } } @@ -2975,7 +3122,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc, // Large code objects and code objects which should stay at a fixed address // are allocated in large object space. if (obj_size > MaxObjectSizeInPagedSpace() || immovable) { - maybe_result = lo_space_->AllocateRawCode(obj_size); + maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); } else { maybe_result = code_space_->AllocateRaw(obj_size); } @@ -3020,7 +3167,7 @@ MaybeObject* Heap::CopyCode(Code* code) { int obj_size = code->Size(); MaybeObject* maybe_result; if (obj_size > MaxObjectSizeInPagedSpace()) { - maybe_result = lo_space_->AllocateRawCode(obj_size); + maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE); } else { maybe_result = code_space_->AllocateRaw(obj_size); } @@ -3063,7 +3210,7 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) { MaybeObject* maybe_result; if (new_obj_size > MaxObjectSizeInPagedSpace()) { - maybe_result = lo_space_->AllocateRawCode(new_obj_size); + maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE); } else { maybe_result = code_space_->AllocateRaw(new_obj_size); } @@ -3112,9 +3259,9 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) { } -MaybeObject* Heap::InitializeFunction(JSFunction* function, - SharedFunctionInfo* shared, - Object* prototype) { +void Heap::InitializeFunction(JSFunction* function, + SharedFunctionInfo* shared, + Object* prototype) { ASSERT(!prototype->IsMap()); function->initialize_properties(); function->initialize_elements(); @@ -3124,7 +3271,6 @@ MaybeObject* Heap::InitializeFunction(JSFunction* function, function->set_context(undefined_value()); function->set_literals(empty_fixed_array()); function->set_next_function_link(undefined_value()); - return function; } @@ -3134,8 +3280,18 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) { // different context. JSFunction* object_function = function->context()->global_context()->object_function(); + + // Each function prototype gets a copy of the object function map. + // This avoid unwanted sharing of maps between prototypes of different + // constructors. + Map* new_map; + ASSERT(object_function->has_initial_map()); + { MaybeObject* maybe_map = + object_function->initial_map()->CopyDropTransitions(); + if (!maybe_map->To<Map>(&new_map)) return maybe_map; + } Object* prototype; - { MaybeObject* maybe_prototype = AllocateJSObject(object_function); + { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map); if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; } // When creating the prototype for the function we must set its @@ -3160,7 +3316,8 @@ MaybeObject* Heap::AllocateFunction(Map* function_map, { MaybeObject* maybe_result = Allocate(function_map, space); if (!maybe_result->ToObject(&result)) return maybe_result; } - return InitializeFunction(JSFunction::cast(result), shared, prototype); + InitializeFunction(JSFunction::cast(result), shared, prototype); + return result; } @@ -3330,6 +3487,9 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj, // We cannot always fill with one_pointer_filler_map because objects // created from API functions expect their internal fields to be initialized // with undefined_value. + // Pre-allocated fields need to be initialized with undefined_value as well + // so that object accesses before the constructor completes (e.g. in the + // debugger) will not cause a crash. if (map->constructor()->IsJSFunction() && JSFunction::cast(map->constructor())->shared()-> IsInobjectSlackTrackingInProgress()) { @@ -3339,7 +3499,7 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj, } else { filler = Heap::undefined_value(); } - obj->InitializeBody(map->instance_size(), filler); + obj->InitializeBody(map, Heap::undefined_value(), filler); } @@ -3377,7 +3537,8 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) { InitializeJSObjectFromMap(JSObject::cast(obj), FixedArray::cast(properties), map); - ASSERT(JSObject::cast(obj)->HasFastElements()); + ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() || + JSObject::cast(obj)->HasFastElements()); return obj; } @@ -3420,6 +3581,7 @@ MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) { if (!maybe_result->To<JSProxy>(&result)) return maybe_result; result->InitializeBody(map->instance_size(), Smi::FromInt(0)); result->set_handler(handler); + result->set_hash(undefined_value()); return result; } @@ -3443,6 +3605,7 @@ MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler, if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result; result->InitializeBody(map->instance_size(), Smi::FromInt(0)); result->set_handler(handler); + result->set_hash(undefined_value()); result->set_call_trap(call_trap); result->set_construct_trap(construct_trap); return result; @@ -3559,6 +3722,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) { object_size); } + ASSERT(JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); FixedArray* properties = FixedArray::cast(source->properties()); // Update elements if necessary. @@ -3591,13 +3755,13 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) { MaybeObject* Heap::ReinitializeJSReceiver( JSReceiver* object, InstanceType type, int size) { - ASSERT(type >= FIRST_JS_RECEIVER_TYPE); + ASSERT(type >= FIRST_JS_OBJECT_TYPE); // Allocate fresh map. // TODO(rossberg): Once we optimize proxies, cache these maps. Map* map; - MaybeObject* maybe_map_obj = AllocateMap(type, size); - if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj; + MaybeObject* maybe = AllocateMap(type, size); + if (!maybe->To<Map>(&map)) return maybe; // Check that the receiver has at least the size of the fresh object. int size_difference = object->map()->instance_size() - map->instance_size(); @@ -3608,30 +3772,35 @@ MaybeObject* Heap::ReinitializeJSReceiver( // Allocate the backing storage for the properties. int prop_size = map->unused_property_fields() - map->inobject_properties(); Object* properties; - { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED); - if (!maybe_properties->ToObject(&properties)) return maybe_properties; + maybe = AllocateFixedArray(prop_size, TENURED); + if (!maybe->ToObject(&properties)) return maybe; + + // Functions require some allocation, which might fail here. + SharedFunctionInfo* shared = NULL; + if (type == JS_FUNCTION_TYPE) { + String* name; + maybe = LookupAsciiSymbol("<freezing call trap>"); + if (!maybe->To<String>(&name)) return maybe; + maybe = AllocateSharedFunctionInfo(name); + if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe; } + // Because of possible retries of this function after failure, + // we must NOT fail after this point, where we have changed the type! + // Reset the map for the object. object->set_map(map); + JSObject* jsobj = JSObject::cast(object); // Reinitialize the object from the constructor map. - InitializeJSObjectFromMap(JSObject::cast(object), - FixedArray::cast(properties), map); + InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map); // Functions require some minimal initialization. if (type == JS_FUNCTION_TYPE) { - String* name; - MaybeObject* maybe_name = LookupAsciiSymbol("<freezing call trap>"); - if (!maybe_name->To<String>(&name)) return maybe_name; - SharedFunctionInfo* shared; - MaybeObject* maybe_shared = AllocateSharedFunctionInfo(name); - if (!maybe_shared->To<SharedFunctionInfo>(&shared)) return maybe_shared; - JSFunction* func; - MaybeObject* maybe_func = - InitializeFunction(JSFunction::cast(object), shared, the_hole_value()); - if (!maybe_func->To<JSFunction>(&func)) return maybe_func; - func->set_context(isolate()->context()->global_context()); + map->set_function_with_prototype(true); + InitializeFunction(JSFunction::cast(object), shared, the_hole_value()); + JSFunction::cast(object)->set_context( + isolate()->context()->global_context()); } // Put in filler if the new object is smaller than the old. @@ -3814,7 +3983,7 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer, // Allocate string. Object* result; { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace()) - ? lo_space_->AllocateRaw(size) + ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE) : old_data_space_->AllocateRaw(size); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -3931,7 +4100,7 @@ MaybeObject* Heap::AllocateRawFixedArray(int length) { int size = FixedArray::SizeFor(length); return size <= kMaxObjectSizeInNewSpace ? new_space_.AllocateRaw(size) - : lo_space_->AllocateRawFixedArray(size); + : lo_space_->AllocateRaw(size, NOT_EXECUTABLE); } @@ -4262,6 +4431,21 @@ STRUCT_LIST(MAKE_CASE) } +bool Heap::IsHeapIterable() { + return (!old_pointer_space()->was_swept_conservatively() && + !old_data_space()->was_swept_conservatively()); +} + + +void Heap::EnsureHeapIsIterable() { + ASSERT(IsAllocationAllowed()); + if (!IsHeapIterable()) { + CollectAllGarbage(kMakeHeapIterableMask); + } + ASSERT(IsHeapIterable()); +} + + bool Heap::IdleNotification() { static const int kIdlesBeforeScavenge = 4; static const int kIdlesBeforeMarkSweep = 7; @@ -4292,7 +4476,7 @@ bool Heap::IdleNotification() { if (number_idle_notifications_ == kIdlesBeforeScavenge) { if (contexts_disposed_ > 0) { HistogramTimerScope scope(isolate_->counters()->gc_context()); - CollectAllGarbage(false); + CollectAllGarbage(kNoGCFlags); } else { CollectGarbage(NEW_SPACE); } @@ -4304,12 +4488,12 @@ bool Heap::IdleNotification() { // generated code for cached functions. isolate_->compilation_cache()->Clear(); - CollectAllGarbage(false); + CollectAllGarbage(kNoGCFlags); new_space_.Shrink(); last_idle_notification_gc_count_ = gc_count_; } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) { - CollectAllGarbage(true); + CollectAllGarbage(kNoGCFlags); new_space_.Shrink(); last_idle_notification_gc_count_ = gc_count_; number_idle_notifications_ = 0; @@ -4319,7 +4503,7 @@ bool Heap::IdleNotification() { contexts_disposed_ = 0; } else { HistogramTimerScope scope(isolate_->counters()->gc_context()); - CollectAllGarbage(false); + CollectAllGarbage(kNoGCFlags); last_idle_notification_gc_count_ = gc_count_; } // If this is the first idle notification, we reset the @@ -4339,8 +4523,11 @@ bool Heap::IdleNotification() { // Make sure that we have no pending context disposals and // conditionally uncommit from space. - ASSERT(contexts_disposed_ == 0); + // Take into account that we might have decided to delay full collection + // because incremental marking is in progress. + ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped()); if (uncommit) UncommitFromSpace(); + return finished; } @@ -4374,11 +4561,11 @@ void Heap::ReportHeapStatistics(const char* title) { USE(title); PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title, gc_count_); - PrintF("mark-compact GC : %d\n", mc_count_); PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n", old_gen_promotion_limit_); PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n", old_gen_allocation_limit_); + PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_); PrintF("\n"); PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles()); @@ -4455,69 +4642,18 @@ bool Heap::InSpace(Address addr, AllocationSpace space) { #ifdef DEBUG -static void DummyScavengePointer(HeapObject** p) { -} - - -static void VerifyPointersUnderWatermark( - PagedSpace* space, - DirtyRegionCallback visit_dirty_region) { - PageIterator it(space, PageIterator::PAGES_IN_USE); - - while (it.has_next()) { - Page* page = it.next(); - Address start = page->ObjectAreaStart(); - Address end = page->AllocationWatermark(); - - HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks, - start, - end, - visit_dirty_region, - &DummyScavengePointer); - } -} - - -static void VerifyPointersUnderWatermark(LargeObjectSpace* space) { - LargeObjectIterator it(space); - for (HeapObject* object = it.next(); object != NULL; object = it.next()) { - if (object->IsFixedArray()) { - Address slot_address = object->address(); - Address end = object->address() + object->Size(); - - while (slot_address < end) { - HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); - // When we are not in GC the Heap::InNewSpace() predicate - // checks that pointers which satisfy predicate point into - // the active semispace. - HEAP->InNewSpace(*slot); - slot_address += kPointerSize; - } - } - } -} - - void Heap::Verify() { ASSERT(HasBeenSetup()); + store_buffer()->Verify(); + VerifyPointersVisitor visitor; IterateRoots(&visitor, VISIT_ONLY_STRONG); new_space_.Verify(); - VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor; - old_pointer_space_->Verify(&dirty_regions_visitor); - map_space_->Verify(&dirty_regions_visitor); - - VerifyPointersUnderWatermark(old_pointer_space_, - &IteratePointersInDirtyRegion); - VerifyPointersUnderWatermark(map_space_, - &IteratePointersInDirtyMapsRegion); - VerifyPointersUnderWatermark(lo_space_); - - VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID); - VerifyPageWatermarkValidity(map_space_, ALL_INVALID); + old_pointer_space_->Verify(&visitor); + map_space_->Verify(&visitor); VerifyPointersVisitor no_dirty_regions_visitor; old_data_space_->Verify(&no_dirty_regions_visitor); @@ -4526,6 +4662,7 @@ void Heap::Verify() { lo_space_->Verify(); } + #endif // DEBUG @@ -4621,275 +4758,221 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) { #ifdef DEBUG void Heap::ZapFromSpace() { - ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure()); - for (Address a = new_space_.FromSpaceLow(); - a < new_space_.FromSpaceHigh(); - a += kPointerSize) { - Memory::Address_at(a) = kFromSpaceZapValue; + NewSpacePageIterator it(new_space_.FromSpaceStart(), + new_space_.FromSpaceEnd()); + while (it.has_next()) { + NewSpacePage* page = it.next(); + for (Address cursor = page->body(), limit = page->body_limit(); + cursor < limit; + cursor += kPointerSize) { + Memory::Address_at(cursor) = kFromSpaceZapValue; + } } } #endif // DEBUG -bool Heap::IteratePointersInDirtyRegion(Heap* heap, - Address start, - Address end, - ObjectSlotCallback copy_object_func) { +void Heap::IterateAndMarkPointersToFromSpace(Address start, + Address end, + ObjectSlotCallback callback) { Address slot_address = start; - bool pointers_to_new_space_found = false; + + // We are not collecting slots on new space objects during mutation + // thus we have to scan for pointers to evacuation candidates when we + // promote objects. But we should not record any slots in non-black + // objects. Grey object's slots would be rescanned. + // White object might not survive until the end of collection + // it would be a violation of the invariant to record it's slots. + bool record_slots = false; + if (incremental_marking()->IsCompacting()) { + MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start)); + record_slots = Marking::IsBlack(mark_bit); + } while (slot_address < end) { Object** slot = reinterpret_cast<Object**>(slot_address); - if (heap->InNewSpace(*slot)) { - ASSERT((*slot)->IsHeapObject()); - copy_object_func(reinterpret_cast<HeapObject**>(slot)); - if (heap->InNewSpace(*slot)) { - ASSERT((*slot)->IsHeapObject()); - pointers_to_new_space_found = true; + Object* object = *slot; + // If the store buffer becomes overfull we mark pages as being exempt from + // the store buffer. These pages are scanned to find pointers that point + // to the new space. In that case we may hit newly promoted objects and + // fix the pointers before the promotion queue gets to them. Thus the 'if'. + if (object->IsHeapObject()) { + if (Heap::InFromSpace(object)) { + callback(reinterpret_cast<HeapObject**>(slot), + HeapObject::cast(object)); + Object* new_object = *slot; + if (InNewSpace(new_object)) { + ASSERT(Heap::InToSpace(new_object)); + ASSERT(new_object->IsHeapObject()); + store_buffer_.EnterDirectlyIntoStoreBuffer( + reinterpret_cast<Address>(slot)); + } + ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object)); + } else if (record_slots && + MarkCompactCollector::IsOnEvacuationCandidate(object)) { + mark_compact_collector()->RecordSlot(slot, slot, object); } } slot_address += kPointerSize; } - return pointers_to_new_space_found; } -// Compute start address of the first map following given addr. -static inline Address MapStartAlign(Address addr) { - Address page = Page::FromAddress(addr)->ObjectAreaStart(); - return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize); -} +#ifdef DEBUG +typedef bool (*CheckStoreBufferFilter)(Object** addr); -// Compute end address of the first map preceding given addr. -static inline Address MapEndAlign(Address addr) { - Address page = Page::FromAllocationTop(addr)->ObjectAreaStart(); - return page + ((addr - page) / Map::kSize * Map::kSize); +bool IsAMapPointerAddress(Object** addr) { + uintptr_t a = reinterpret_cast<uintptr_t>(addr); + int mod = a % Map::kSize; + return mod >= Map::kPointerFieldsBeginOffset && + mod < Map::kPointerFieldsEndOffset; } -static bool IteratePointersInDirtyMaps(Address start, - Address end, - ObjectSlotCallback copy_object_func) { - ASSERT(MapStartAlign(start) == start); - ASSERT(MapEndAlign(end) == end); - - Address map_address = start; - bool pointers_to_new_space_found = false; - - Heap* heap = HEAP; - while (map_address < end) { - ASSERT(!heap->InNewSpace(Memory::Object_at(map_address))); - ASSERT(Memory::Object_at(map_address)->IsMap()); +bool EverythingsAPointer(Object** addr) { + return true; +} - Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset; - Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset; - if (Heap::IteratePointersInDirtyRegion(heap, - pointer_fields_start, - pointer_fields_end, - copy_object_func)) { - pointers_to_new_space_found = true; +static void CheckStoreBuffer(Heap* heap, + Object** current, + Object** limit, + Object**** store_buffer_position, + Object*** store_buffer_top, + CheckStoreBufferFilter filter, + Address special_garbage_start, + Address special_garbage_end) { + Map* free_space_map = heap->free_space_map(); + for ( ; current < limit; current++) { + Object* o = *current; + Address current_address = reinterpret_cast<Address>(current); + // Skip free space. + if (o == free_space_map) { + Address current_address = reinterpret_cast<Address>(current); + FreeSpace* free_space = + FreeSpace::cast(HeapObject::FromAddress(current_address)); + int skip = free_space->Size(); + ASSERT(current_address + skip <= reinterpret_cast<Address>(limit)); + ASSERT(skip > 0); + current_address += skip - kPointerSize; + current = reinterpret_cast<Object**>(current_address); + continue; + } + // Skip the current linear allocation space between top and limit which is + // unmarked with the free space map, but can contain junk. + if (current_address == special_garbage_start && + special_garbage_end != special_garbage_start) { + current_address = special_garbage_end - kPointerSize; + current = reinterpret_cast<Object**>(current_address); + continue; + } + if (!(*filter)(current)) continue; + ASSERT(current_address < special_garbage_start || + current_address >= special_garbage_end); + ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue); + // We have to check that the pointer does not point into new space + // without trying to cast it to a heap object since the hash field of + // a string can contain values like 1 and 3 which are tagged null + // pointers. + if (!heap->InNewSpace(o)) continue; + while (**store_buffer_position < current && + *store_buffer_position < store_buffer_top) { + (*store_buffer_position)++; + } + if (**store_buffer_position != current || + *store_buffer_position == store_buffer_top) { + Object** obj_start = current; + while (!(*obj_start)->IsMap()) obj_start--; + UNREACHABLE(); } - - map_address += Map::kSize; } - - return pointers_to_new_space_found; } -bool Heap::IteratePointersInDirtyMapsRegion( - Heap* heap, - Address start, - Address end, - ObjectSlotCallback copy_object_func) { - Address map_aligned_start = MapStartAlign(start); - Address map_aligned_end = MapEndAlign(end); +// Check that the store buffer contains all intergenerational pointers by +// scanning a page and ensuring that all pointers to young space are in the +// store buffer. +void Heap::OldPointerSpaceCheckStoreBuffer() { + OldSpace* space = old_pointer_space(); + PageIterator pages(space); - bool contains_pointers_to_new_space = false; + store_buffer()->SortUniq(); - if (map_aligned_start != start) { - Address prev_map = map_aligned_start - Map::kSize; - ASSERT(Memory::Object_at(prev_map)->IsMap()); + while (pages.has_next()) { + Page* page = pages.next(); + Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart()); - Address pointer_fields_start = - Max(start, prev_map + Map::kPointerFieldsBeginOffset); + Address end = page->ObjectAreaEnd(); - Address pointer_fields_end = - Min(prev_map + Map::kPointerFieldsEndOffset, end); + Object*** store_buffer_position = store_buffer()->Start(); + Object*** store_buffer_top = store_buffer()->Top(); - contains_pointers_to_new_space = - IteratePointersInDirtyRegion(heap, - pointer_fields_start, - pointer_fields_end, - copy_object_func) - || contains_pointers_to_new_space; + Object** limit = reinterpret_cast<Object**>(end); + CheckStoreBuffer(this, + current, + limit, + &store_buffer_position, + store_buffer_top, + &EverythingsAPointer, + space->top(), + space->limit()); } - - contains_pointers_to_new_space = - IteratePointersInDirtyMaps(map_aligned_start, - map_aligned_end, - copy_object_func) - || contains_pointers_to_new_space; - - if (map_aligned_end != end) { - ASSERT(Memory::Object_at(map_aligned_end)->IsMap()); - - Address pointer_fields_start = - map_aligned_end + Map::kPointerFieldsBeginOffset; - - Address pointer_fields_end = - Min(end, map_aligned_end + Map::kPointerFieldsEndOffset); - - contains_pointers_to_new_space = - IteratePointersInDirtyRegion(heap, - pointer_fields_start, - pointer_fields_end, - copy_object_func) - || contains_pointers_to_new_space; - } - - return contains_pointers_to_new_space; } -void Heap::IterateAndMarkPointersToFromSpace(Address start, - Address end, - ObjectSlotCallback callback) { - Address slot_address = start; - Page* page = Page::FromAddress(start); +void Heap::MapSpaceCheckStoreBuffer() { + MapSpace* space = map_space(); + PageIterator pages(space); - uint32_t marks = page->GetRegionMarks(); + store_buffer()->SortUniq(); - while (slot_address < end) { - Object** slot = reinterpret_cast<Object**>(slot_address); - if (InFromSpace(*slot)) { - ASSERT((*slot)->IsHeapObject()); - callback(reinterpret_cast<HeapObject**>(slot)); - if (InNewSpace(*slot)) { - ASSERT((*slot)->IsHeapObject()); - marks |= page->GetRegionMaskForAddress(slot_address); - } - } - slot_address += kPointerSize; - } - - page->SetRegionMarks(marks); -} + while (pages.has_next()) { + Page* page = pages.next(); + Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart()); + Address end = page->ObjectAreaEnd(); -uint32_t Heap::IterateDirtyRegions( - uint32_t marks, - Address area_start, - Address area_end, - DirtyRegionCallback visit_dirty_region, - ObjectSlotCallback copy_object_func) { - uint32_t newmarks = 0; - uint32_t mask = 1; + Object*** store_buffer_position = store_buffer()->Start(); + Object*** store_buffer_top = store_buffer()->Top(); - if (area_start >= area_end) { - return newmarks; + Object** limit = reinterpret_cast<Object**>(end); + CheckStoreBuffer(this, + current, + limit, + &store_buffer_position, + store_buffer_top, + &IsAMapPointerAddress, + space->top(), + space->limit()); } - - Address region_start = area_start; - - // area_start does not necessarily coincide with start of the first region. - // Thus to calculate the beginning of the next region we have to align - // area_start by Page::kRegionSize. - Address second_region = - reinterpret_cast<Address>( - reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) & - ~Page::kRegionAlignmentMask); - - // Next region might be beyond area_end. - Address region_end = Min(second_region, area_end); - - if (marks & mask) { - if (visit_dirty_region(this, region_start, region_end, copy_object_func)) { - newmarks |= mask; - } - } - mask <<= 1; - - // Iterate subsequent regions which fully lay inside [area_start, area_end[. - region_start = region_end; - region_end = region_start + Page::kRegionSize; - - while (region_end <= area_end) { - if (marks & mask) { - if (visit_dirty_region(this, - region_start, - region_end, - copy_object_func)) { - newmarks |= mask; - } - } - - region_start = region_end; - region_end = region_start + Page::kRegionSize; - - mask <<= 1; - } - - if (region_start != area_end) { - // A small piece of area left uniterated because area_end does not coincide - // with region end. Check whether region covering last part of area is - // dirty. - if (marks & mask) { - if (visit_dirty_region(this, region_start, area_end, copy_object_func)) { - newmarks |= mask; - } - } - } - - return newmarks; } - -void Heap::IterateDirtyRegions( - PagedSpace* space, - DirtyRegionCallback visit_dirty_region, - ObjectSlotCallback copy_object_func, - ExpectedPageWatermarkState expected_page_watermark_state) { - - PageIterator it(space, PageIterator::PAGES_IN_USE); - - while (it.has_next()) { - Page* page = it.next(); - uint32_t marks = page->GetRegionMarks(); - - if (marks != Page::kAllRegionsCleanMarks) { - Address start = page->ObjectAreaStart(); - - // Do not try to visit pointers beyond page allocation watermark. - // Page can contain garbage pointers there. - Address end; - - if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) || - page->IsWatermarkValid()) { - end = page->AllocationWatermark(); - } else { - end = page->CachedAllocationWatermark(); - } - - ASSERT(space == old_pointer_space_ || - (space == map_space_ && - ((page->ObjectAreaStart() - end) % Map::kSize == 0))); - - page->SetRegionMarks(IterateDirtyRegions(marks, - start, - end, - visit_dirty_region, - copy_object_func)); +void Heap::LargeObjectSpaceCheckStoreBuffer() { + LargeObjectIterator it(lo_space()); + for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { + // We only have code, sequential strings, or fixed arrays in large + // object space, and only fixed arrays can possibly contain pointers to + // the young generation. + if (object->IsFixedArray()) { + Object*** store_buffer_position = store_buffer()->Start(); + Object*** store_buffer_top = store_buffer()->Top(); + Object** current = reinterpret_cast<Object**>(object->address()); + Object** limit = + reinterpret_cast<Object**>(object->address() + object->Size()); + CheckStoreBuffer(this, + current, + limit, + &store_buffer_position, + store_buffer_top, + &EverythingsAPointer, + NULL, + NULL); } - - // Mark page watermark as invalid to maintain watermark validity invariant. - // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details. - page->InvalidateWatermark(true); } } +#endif void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { @@ -4941,8 +5024,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { // Iterate over the builtin code objects and code stubs in the // heap. Note that it is not necessary to iterate over code objects // on scavenge collections. - if (mode != VISIT_ALL_IN_SCAVENGE && - mode != VISIT_ALL_IN_SWEEP_NEWSPACE) { + if (mode != VISIT_ALL_IN_SCAVENGE) { isolate_->builtins()->IterateBuiltins(v); } v->Synchronize("builtins"); @@ -4986,11 +5068,20 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { // and through the API, we should gracefully handle the case that the heap // size is not big enough to fit all the initial objects. bool Heap::ConfigureHeap(int max_semispace_size, - int max_old_gen_size, - int max_executable_size) { + intptr_t max_old_gen_size, + intptr_t max_executable_size) { if (HasBeenSetup()) return false; - if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size; + if (max_semispace_size > 0) { + if (max_semispace_size < Page::kPageSize) { + max_semispace_size = Page::kPageSize; + if (FLAG_trace_gc) { + PrintF("Max semispace size cannot be less than %dkbytes\n", + Page::kPageSize >> 10); + } + } + max_semispace_size_ = max_semispace_size; + } if (Snapshot::IsEnabled()) { // If we are using a snapshot we always reserve the default amount @@ -5000,6 +5091,10 @@ bool Heap::ConfigureHeap(int max_semispace_size, // than the default reserved semispace size. if (max_semispace_size_ > reserved_semispace_size_) { max_semispace_size_ = reserved_semispace_size_; + if (FLAG_trace_gc) { + PrintF("Max semispace size cannot be more than %dkbytes\n", + reserved_semispace_size_ >> 10); + } } } else { // If we are not using snapshots we reserve space for the actual @@ -5025,8 +5120,12 @@ bool Heap::ConfigureHeap(int max_semispace_size, initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_); external_allocation_limit_ = 10 * max_semispace_size_; - // The old generation is paged. - max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize); + // The old generation is paged and needs at least one page for each space. + int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; + max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count * + Page::kPageSize), + RoundUp(max_old_generation_size_, + Page::kPageSize)); configured_ = true; return true; @@ -5034,9 +5133,9 @@ bool Heap::ConfigureHeap(int max_semispace_size, bool Heap::ConfigureHeapDefault() { - return ConfigureHeap(FLAG_max_new_space_size / 2 * KB, - FLAG_max_old_space_size * MB, - FLAG_max_executable_size * MB); + return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB, + static_cast<intptr_t>(FLAG_max_old_space_size) * MB, + static_cast<intptr_t>(FLAG_max_executable_size) * MB); } @@ -5064,7 +5163,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) { *stats->os_error = OS::GetLastError(); isolate()->memory_allocator()->Available(); if (take_snapshot) { - HeapIterator iterator(HeapIterator::kFilterFreeListNodes); + HeapIterator iterator; for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { @@ -5280,31 +5379,21 @@ bool Heap::Setup(bool create_heap_objects) { gc_initializer_mutex->Lock(); static bool initialized_gc = false; if (!initialized_gc) { - initialized_gc = true; - InitializeScavengingVisitorsTables(); - NewSpaceScavenger::Initialize(); - MarkCompactCollector::Initialize(); + initialized_gc = true; + InitializeScavengingVisitorsTables(); + NewSpaceScavenger::Initialize(); + MarkCompactCollector::Initialize(); } gc_initializer_mutex->Unlock(); MarkMapPointersAsEncoded(false); - // Setup memory allocator and reserve a chunk of memory for new - // space. The chunk is double the size of the requested reserved - // new space size to ensure that we can find a pair of semispaces that - // are contiguous and aligned to their size. + // Setup memory allocator. if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize())) return false; - void* chunk = - isolate_->memory_allocator()->ReserveInitialChunk( - 4 * reserved_semispace_size_); - if (chunk == NULL) return false; - - // Align the pair of semispaces to their size, which must be a power - // of 2. - Address new_space_start = - RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_); - if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) { + + // Setup new space. + if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) { return false; } @@ -5315,7 +5404,7 @@ bool Heap::Setup(bool create_heap_objects) { OLD_POINTER_SPACE, NOT_EXECUTABLE); if (old_pointer_space_ == NULL) return false; - if (!old_pointer_space_->Setup(NULL, 0)) return false; + if (!old_pointer_space_->Setup()) return false; // Initialize old data space. old_data_space_ = @@ -5324,7 +5413,7 @@ bool Heap::Setup(bool create_heap_objects) { OLD_DATA_SPACE, NOT_EXECUTABLE); if (old_data_space_ == NULL) return false; - if (!old_data_space_->Setup(NULL, 0)) return false; + if (!old_data_space_->Setup()) return false; // Initialize the code space, set its maximum capacity to the old // generation size. It needs executable memory. @@ -5339,21 +5428,20 @@ bool Heap::Setup(bool create_heap_objects) { code_space_ = new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE); if (code_space_ == NULL) return false; - if (!code_space_->Setup(NULL, 0)) return false; + if (!code_space_->Setup()) return false; // Initialize map space. - map_space_ = new MapSpace(this, FLAG_use_big_map_space - ? max_old_generation_size_ - : MapSpace::kMaxMapPageIndex * Page::kPageSize, - FLAG_max_map_space_pages, - MAP_SPACE); + map_space_ = new MapSpace(this, + max_old_generation_size_, + FLAG_max_map_space_pages, + MAP_SPACE); if (map_space_ == NULL) return false; - if (!map_space_->Setup(NULL, 0)) return false; + if (!map_space_->Setup()) return false; // Initialize global property cell space. cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE); if (cell_space_ == NULL) return false; - if (!cell_space_->Setup(NULL, 0)) return false; + if (!cell_space_->Setup()) return false; // The large object code space may contain code or data. We set the memory // to be non-executable here for safety, but this means we need to enable it @@ -5361,7 +5449,6 @@ bool Heap::Setup(bool create_heap_objects) { lo_space_ = new LargeObjectSpace(this, LO_SPACE); if (lo_space_ == NULL) return false; if (!lo_space_->Setup()) return false; - if (create_heap_objects) { // Create initial maps. if (!CreateInitialMaps()) return false; @@ -5376,6 +5463,8 @@ bool Heap::Setup(bool create_heap_objects) { LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); LOG(isolate_, IntPtrTEvent("heap-available", Available())); + store_buffer()->Setup(); + return true; } @@ -5402,7 +5491,6 @@ void Heap::TearDown() { PrintF("\n\n"); PrintF("gc_count=%d ", gc_count_); PrintF("mark_sweep_count=%d ", ms_count_); - PrintF("mark_compact_count=%d ", mc_count_); PrintF("max_gc_pause=%d ", get_max_gc_pause()); PrintF("min_in_mutator=%d ", get_min_in_mutator()); PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", @@ -5452,6 +5540,9 @@ void Heap::TearDown() { lo_space_ = NULL; } + store_buffer()->TearDown(); + incremental_marking()->TearDown(); + isolate_->memory_allocator()->TearDown(); #ifdef DEBUG @@ -5465,7 +5556,7 @@ void Heap::Shrink() { // Try to shrink all paged spaces. PagedSpaces spaces; for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next()) - space->Shrink(); + space->ReleaseAllUnusedPages(); } @@ -5668,45 +5759,6 @@ class HeapObjectsFilter { }; -class FreeListNodesFilter : public HeapObjectsFilter { - public: - FreeListNodesFilter() { - MarkFreeListNodes(); - } - - bool SkipObject(HeapObject* object) { - if (object->IsMarked()) { - object->ClearMark(); - return true; - } else { - return false; - } - } - - private: - void MarkFreeListNodes() { - Heap* heap = HEAP; - heap->old_pointer_space()->MarkFreeListNodes(); - heap->old_data_space()->MarkFreeListNodes(); - MarkCodeSpaceFreeListNodes(heap); - heap->map_space()->MarkFreeListNodes(); - heap->cell_space()->MarkFreeListNodes(); - } - - void MarkCodeSpaceFreeListNodes(Heap* heap) { - // For code space, using FreeListNode::IsFreeListNode is OK. - HeapObjectIterator iter(heap->code_space()); - for (HeapObject* obj = iter.next_object(); - obj != NULL; - obj = iter.next_object()) { - if (FreeListNode::IsFreeListNode(obj)) obj->SetMark(); - } - } - - AssertNoAllocation no_alloc; -}; - - class UnreachableObjectsFilter : public HeapObjectsFilter { public: UnreachableObjectsFilter() { @@ -5714,8 +5766,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter { } bool SkipObject(HeapObject* object) { - if (object->IsMarked()) { - object->ClearMark(); + if (IntrusiveMarking::IsMarked(object)) { + IntrusiveMarking::ClearMark(object); return true; } else { return false; @@ -5731,8 +5783,8 @@ class UnreachableObjectsFilter : public HeapObjectsFilter { for (Object** p = start; p < end; p++) { if (!(*p)->IsHeapObject()) continue; HeapObject* obj = HeapObject::cast(*p); - if (obj->IsMarked()) { - obj->ClearMark(); + if (IntrusiveMarking::IsMarked(obj)) { + IntrusiveMarking::ClearMark(obj); list_.Add(obj); } } @@ -5754,7 +5806,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter { for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { - obj->SetMark(); + IntrusiveMarking::SetMark(obj); } UnmarkingVisitor visitor; HEAP->IterateRoots(&visitor, VISIT_ALL); @@ -5788,10 +5840,11 @@ HeapIterator::~HeapIterator() { void HeapIterator::Init() { // Start the iteration. space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator : - new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject); + new SpaceIterator(Isolate::Current()->heap()-> + GcSafeSizeOfOldObjectFunction()); switch (filtering_) { case kFilterFreeListNodes: - filter_ = new FreeListNodesFilter; + // TODO(gc): Not handled. break; case kFilterUnreachable: filter_ = new UnreachableObjectsFilter; @@ -5928,6 +5981,11 @@ void PathTracer::TracePathFrom(Object** root) { } +static bool SafeIsGlobalContext(HeapObject* obj) { + return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map(); +} + + void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) { if (!(*p)->IsHeapObject()) return; @@ -5946,7 +6004,7 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) { return; } - bool is_global_context = obj->IsGlobalContext(); + bool is_global_context = SafeIsGlobalContext(obj); // not visited yet Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map)); @@ -6054,7 +6112,7 @@ static intptr_t CountTotalHolesSize() { for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) { - holes_size += space->Waste() + space->AvailableFree(); + holes_size += space->Waste() + space->Available(); } return holes_size; } @@ -6065,17 +6123,10 @@ GCTracer::GCTracer(Heap* heap) start_size_(0), gc_count_(0), full_gc_count_(0), - is_compacting_(false), - marked_count_(0), allocated_since_last_gc_(0), spent_in_mutator_(0), promoted_objects_size_(0), heap_(heap) { - // These two fields reflect the state of the previous full collection. - // Set them before they are changed by the collector. - previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted(); - previous_marked_count_ = - heap_->mark_compact_collector_.previous_marked_count(); if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return; start_time_ = OS::TimeCurrentMillis(); start_size_ = heap_->SizeOfObjects(); @@ -6092,6 +6143,14 @@ GCTracer::GCTracer(Heap* heap) if (heap_->last_gc_end_timestamp_ > 0) { spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0); } + + steps_count_ = heap_->incremental_marking()->steps_count(); + steps_took_ = heap_->incremental_marking()->steps_took(); + longest_step_ = heap_->incremental_marking()->longest_step(); + steps_count_since_last_gc_ = + heap_->incremental_marking()->steps_count_since_last_gc(); + steps_took_since_last_gc_ = + heap_->incremental_marking()->steps_took_since_last_gc(); } @@ -6126,7 +6185,21 @@ GCTracer::~GCTracer() { SizeOfHeapObjects()); if (external_time > 0) PrintF("%d / ", external_time); - PrintF("%d ms.\n", time); + PrintF("%d ms", time); + if (steps_count_ > 0) { + if (collector_ == SCAVENGER) { + PrintF(" (+ %d ms in %d steps since last GC)", + static_cast<int>(steps_took_since_last_gc_), + steps_count_since_last_gc_); + } else { + PrintF(" (+ %d ms in %d steps since start of marking, " + "biggest step %f ms)", + static_cast<int>(steps_took_), + steps_count_, + longest_step_); + } + } + PrintF(".\n"); } else { PrintF("pause=%d ", time); PrintF("mutator=%d ", @@ -6138,8 +6211,7 @@ GCTracer::~GCTracer() { PrintF("s"); break; case MARK_COMPACTOR: - PrintF("%s", - heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms"); + PrintF("ms"); break; default: UNREACHABLE(); @@ -6161,6 +6233,14 @@ GCTracer::~GCTracer() { PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_); PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_); + if (collector_ == SCAVENGER) { + PrintF("stepscount=%d ", steps_count_since_last_gc_); + PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_)); + } else { + PrintF("stepscount=%d ", steps_count_); + PrintF("stepstook=%d ", static_cast<int>(steps_took_)); + } + PrintF("\n"); } @@ -6173,8 +6253,7 @@ const char* GCTracer::CollectorString() { case SCAVENGER: return "Scavenge"; case MARK_COMPACTOR: - return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact" - : "Mark-sweep"; + return "Mark-sweep"; } return "Unknown GC"; } @@ -6281,4 +6360,52 @@ void ExternalStringTable::TearDown() { } +void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) { + chunk->set_next_chunk(chunks_queued_for_free_); + chunks_queued_for_free_ = chunk; +} + + +void Heap::FreeQueuedChunks() { + if (chunks_queued_for_free_ == NULL) return; + MemoryChunk* next; + MemoryChunk* chunk; + for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { + next = chunk->next_chunk(); + chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); + + if (chunk->owner()->identity() == LO_SPACE) { + // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress. + // If FromAnyPointerAddress encounters a slot that belongs to a large + // chunk queued for deletion it will fail to find the chunk because + // it try to perform a search in the list of pages owned by of the large + // object space and queued chunks were detached from that list. + // To work around this we split large chunk into normal kPageSize aligned + // pieces and initialize owner field and flags of every piece. + // If FromAnyPointerAddress encounteres a slot that belongs to one of + // these smaller pieces it will treat it as a slot on a normal Page. + MemoryChunk* inner = MemoryChunk::FromAddress( + chunk->address() + Page::kPageSize); + MemoryChunk* inner_last = MemoryChunk::FromAddress( + chunk->address() + chunk->size() - 1); + while (inner <= inner_last) { + // Size of a large chunk is always a multiple of + // OS::AllocationAlignment() so there is always + // enough space for a fake MemoryChunk header. + inner->set_owner(lo_space()); + inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); + inner = MemoryChunk::FromAddress( + inner->address() + Page::kPageSize); + } + } + } + isolate_->heap()->store_buffer()->Compact(); + isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); + for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { + next = chunk->next_chunk(); + isolate_->memory_allocator()->Free(chunk); + } + chunks_queued_for_free_ = NULL; +} + } } // namespace v8::internal diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index d81ff6cad..6fb2d18c2 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -32,11 +32,15 @@ #include "allocation.h" #include "globals.h" +#include "incremental-marking.h" #include "list.h" #include "mark-compact.h" +#include "objects-visiting.h" #include "spaces.h" #include "splay-tree-inl.h" +#include "store-buffer.h" #include "v8-counters.h" +#include "v8globals.h" namespace v8 { namespace internal { @@ -48,20 +52,20 @@ inline Heap* _inline_get_heap_(); // Defines all the roots in Heap. -#define STRONG_ROOT_LIST(V) \ - /* Put the byte array map early. We need it to be in place by the time */ \ - /* the deserializer hits the next page, since it wants to put a byte */ \ - /* array in the unused space at the end of the page. */ \ +#define STRONG_ROOT_LIST(V) \ V(Map, byte_array_map, ByteArrayMap) \ + V(Map, free_space_map, FreeSpaceMap) \ V(Map, one_pointer_filler_map, OnePointerFillerMap) \ V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ /* Cluster the most popular ones in a few cache lines here at the top. */ \ - V(Object, undefined_value, UndefinedValue) \ - V(Object, the_hole_value, TheHoleValue) \ - V(Object, null_value, NullValue) \ - V(Object, true_value, TrueValue) \ - V(Object, false_value, FalseValue) \ - V(Object, arguments_marker, ArgumentsMarker) \ + V(Smi, store_buffer_top, StoreBufferTop) \ + V(Oddball, undefined_value, UndefinedValue) \ + V(Oddball, the_hole_value, TheHoleValue) \ + V(Oddball, null_value, NullValue) \ + V(Oddball, true_value, TrueValue) \ + V(Oddball, false_value, FalseValue) \ + V(Oddball, arguments_marker, ArgumentsMarker) \ + V(Oddball, frame_alignment_marker, FrameAlignmentMarker) \ V(Map, heap_number_map, HeapNumberMap) \ V(Map, global_context_map, GlobalContextMap) \ V(Map, fixed_array_map, FixedArrayMap) \ @@ -122,8 +126,9 @@ inline Heap* _inline_get_heap_(); V(Map, shared_function_info_map, SharedFunctionInfoMap) \ V(Map, message_object_map, JSMessageObjectMap) \ V(Map, foreign_map, ForeignMap) \ - V(Object, nan_value, NanValue) \ - V(Object, minus_zero_value, MinusZeroValue) \ + V(HeapNumber, nan_value, NanValue) \ + V(HeapNumber, infinity_value, InfinityValue) \ + V(HeapNumber, minus_zero_value, MinusZeroValue) \ V(Map, neander_map, NeanderMap) \ V(JSObject, message_listeners, MessageListeners) \ V(Foreign, prototype_accessors, PrototypeAccessors) \ @@ -226,7 +231,9 @@ inline Heap* _inline_get_heap_(); V(closure_symbol, "(closure)") \ V(use_strict, "use strict") \ V(dot_symbol, ".") \ - V(anonymous_function_symbol, "(anonymous function)") + V(anonymous_function_symbol, "(anonymous function)") \ + V(infinity_symbol, "Infinity") \ + V(minus_infinity_symbol, "-Infinity") // Forward declarations. class GCTracer; @@ -238,10 +245,26 @@ class WeakObjectRetainer; typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap, Object** pointer); -typedef bool (*DirtyRegionCallback)(Heap* heap, - Address start, - Address end, - ObjectSlotCallback copy_object_func); +class StoreBufferRebuilder { + public: + explicit StoreBufferRebuilder(StoreBuffer* store_buffer) + : store_buffer_(store_buffer) { + } + + void Callback(MemoryChunk* page, StoreBufferEvent event); + + private: + StoreBuffer* store_buffer_; + + // We record in this variable how full the store buffer was when we started + // iterating over the current page, finding pointers to new space. If the + // store buffer overflows again we can exempt the page from the store buffer + // by rewinding to this point instead of having to search the store buffer. + Object*** start_of_current_page_; + // The current page we are scanning in the store buffer iterator. + MemoryChunk* current_page_; +}; + // The all static Heap captures the interface to the global object heap. @@ -259,22 +282,37 @@ class PromotionQueue { PromotionQueue() : front_(NULL), rear_(NULL) { } void Initialize(Address start_address) { + // Assumes that a NewSpacePage exactly fits a number of promotion queue + // entries (where each is a pair of intptr_t). This allows us to simplify + // the test fpr when to switch pages. + ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) + == 0); + ASSERT(NewSpacePage::IsAtEnd(start_address)); front_ = rear_ = reinterpret_cast<intptr_t*>(start_address); } - bool is_empty() { return front_ <= rear_; } + bool is_empty() { return front_ == rear_; } inline void insert(HeapObject* target, int size); void remove(HeapObject** target, int* size) { + ASSERT(!is_empty()); + if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) { + NewSpacePage* front_page = + NewSpacePage::FromAddress(reinterpret_cast<Address>(front_)); + ASSERT(!front_page->prev_page()->is_anchor()); + front_ = + reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit()); + } *target = reinterpret_cast<HeapObject*>(*(--front_)); *size = static_cast<int>(*(--front_)); // Assert no underflow. - ASSERT(front_ >= rear_); + SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_), + reinterpret_cast<Address>(front_)); } private: - // The front of the queue is higher in memory than the rear. + // The front of the queue is higher in the memory page chain than the rear. intptr_t* front_; intptr_t* rear_; @@ -282,6 +320,11 @@ class PromotionQueue { }; +typedef void (*ScavengingCallback)(Map* map, + HeapObject** slot, + HeapObject* object); + + // External strings table is a place where all external strings are // registered. We need to keep track of such strings to properly // finalize them. @@ -327,8 +370,8 @@ class Heap { // Configure heap size before setup. Return false if the heap has been // setup already. bool ConfigureHeap(int max_semispace_size, - int max_old_gen_size, - int max_executable_size); + intptr_t max_old_gen_size, + intptr_t max_executable_size); bool ConfigureHeapDefault(); // Initializes the global object heap. If create_heap_objects is true, @@ -456,6 +499,7 @@ class Heap { // size, but keeping the original prototype. The receiver must have at least // the size of the new object. The object is reinitialized and behaves as an // object that has been freshly allocated. + // Returns failure if an error occured, otherwise object. MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object, InstanceType type, int size); @@ -484,8 +528,10 @@ class Heap { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this function does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type, - int instance_size); + MUST_USE_RESULT MaybeObject* AllocateMap( + InstanceType instance_type, + int instance_size, + ElementsKind elements_kind = FAST_ELEMENTS); // Allocates a partial map for bootstrapping. MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type, @@ -796,9 +842,9 @@ class Heap { // failed. // Please note this does not perform a garbage collection. MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii( - ExternalAsciiString::Resource* resource); + const ExternalAsciiString::Resource* resource); MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte( - ExternalTwoByteString::Resource* resource); + const ExternalTwoByteString::Resource* resource); // Finalizes an external string by deleting the associated external // data and clearing the resource pointer. @@ -885,13 +931,24 @@ class Heap { // collect more garbage. inline bool CollectGarbage(AllocationSpace space); - // Performs a full garbage collection. Force compaction if the - // parameter is true. - void CollectAllGarbage(bool force_compaction); + static const int kNoGCFlags = 0; + static const int kMakeHeapIterableMask = 1; + + // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is + // non-zero, then the slower precise sweeper is used, which leaves the heap + // in a state where we can iterate over the heap visiting all objects. + void CollectAllGarbage(int flags); // Last hope GC, should try to squeeze as much as possible. void CollectAllAvailableGarbage(); + // Check whether the heap is currently iterable. + bool IsHeapIterable(); + + // Ensure that we have swept all spaces in such a way that we can iterate + // over all objects. May cause a GC. + void EnsureHeapIsIterable(); + // Notify the heap that a context has been disposed. int NotifyContextDisposed() { return ++contexts_disposed_; } @@ -899,6 +956,20 @@ class Heap { // ensure correct callback for weak global handles. void PerformScavenge(); + inline void increment_scan_on_scavenge_pages() { + scan_on_scavenge_pages_++; + if (FLAG_gc_verbose) { + PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); + } + } + + inline void decrement_scan_on_scavenge_pages() { + scan_on_scavenge_pages_--; + if (FLAG_gc_verbose) { + PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_); + } + } + PromotionQueue* promotion_queue() { return &promotion_queue_; } #ifdef DEBUG @@ -925,6 +996,8 @@ class Heap { // Heap root getters. We have versions with and without type::cast() here. // You can't use type::cast during GC because the assert fails. + // TODO(1490): Try removing the unchecked accessors, now that GC marking does + // not corrupt the stack. #define ROOT_ACCESSOR(type, name, camel_name) \ type* name() { \ return type::cast(roots_[k##camel_name##RootIndex]); \ @@ -958,6 +1031,9 @@ class Heap { } Object* global_contexts_list() { return global_contexts_list_; } + // Number of mark-sweeps. + int ms_count() { return ms_count_; } + // Iterates over all roots in the heap. void IterateRoots(ObjectVisitor* v, VisitMode mode); // Iterates over all strong roots in the heap. @@ -965,60 +1041,16 @@ class Heap { // Iterates over all the other roots in the heap. void IterateWeakRoots(ObjectVisitor* v, VisitMode mode); - enum ExpectedPageWatermarkState { - WATERMARK_SHOULD_BE_VALID, - WATERMARK_CAN_BE_INVALID - }; - - // For each dirty region on a page in use from an old space call - // visit_dirty_region callback. - // If either visit_dirty_region or callback can cause an allocation - // in old space and changes in allocation watermark then - // can_preallocate_during_iteration should be set to true. - // All pages will be marked as having invalid watermark upon - // iteration completion. - void IterateDirtyRegions( - PagedSpace* space, - DirtyRegionCallback visit_dirty_region, - ObjectSlotCallback callback, - ExpectedPageWatermarkState expected_page_watermark_state); - - // Interpret marks as a bitvector of dirty marks for regions of size - // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering - // memory interval from start to top. For each dirty region call a - // visit_dirty_region callback. Return updated bitvector of dirty marks. - uint32_t IterateDirtyRegions(uint32_t marks, - Address start, - Address end, - DirtyRegionCallback visit_dirty_region, - ObjectSlotCallback callback); - // Iterate pointers to from semispace of new space found in memory interval // from start to end. - // Update dirty marks for page containing start address. void IterateAndMarkPointersToFromSpace(Address start, Address end, ObjectSlotCallback callback); - // Iterate pointers to new space found in memory interval from start to end. - // Return true if pointers to new space was found. - static bool IteratePointersInDirtyRegion(Heap* heap, - Address start, - Address end, - ObjectSlotCallback callback); - - - // Iterate pointers to new space found in memory interval from start to end. - // This interval is considered to belong to the map space. - // Return true if pointers to new space was found. - static bool IteratePointersInDirtyMapsRegion(Heap* heap, - Address start, - Address end, - ObjectSlotCallback callback); - - // Returns whether the object resides in new space. inline bool InNewSpace(Object* object); + inline bool InNewSpace(Address addr); + inline bool InNewSpacePage(Address addr); inline bool InFromSpace(Object* object); inline bool InToSpace(Object* object); @@ -1057,12 +1089,20 @@ class Heap { roots_[kEmptyScriptRootIndex] = script; } + void public_set_store_buffer_top(Address* top) { + roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top); + } + // Update the next script id. inline void SetLastScriptId(Object* last_script_id); // Generated code can embed this address to get access to the roots. Object** roots_address() { return roots_; } + Address* store_buffer_top_address() { + return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]); + } + // Get address of global contexts list for serialization support. Object** global_contexts_list_address() { return &global_contexts_list_; @@ -1075,6 +1115,10 @@ class Heap { // Verify the heap is in its normal state before or after a GC. void Verify(); + void OldPointerSpaceCheckStoreBuffer(); + void MapSpaceCheckStoreBuffer(); + void LargeObjectSpaceCheckStoreBuffer(); + // Report heap statistics. void ReportHeapStatistics(const char* title); void ReportCodeStatistics(const char* title); @@ -1170,22 +1214,53 @@ class Heap { MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length, PretenureFlag pretenure); + inline intptr_t PromotedTotalSize() { + return PromotedSpaceSize() + PromotedExternalMemorySize(); + } + // True if we have reached the allocation limit in the old generation that // should force the next GC (caused normally) to be a full one. - bool OldGenerationPromotionLimitReached() { - return (PromotedSpaceSize() + PromotedExternalMemorySize()) - > old_gen_promotion_limit_; + inline bool OldGenerationPromotionLimitReached() { + return PromotedTotalSize() > old_gen_promotion_limit_; } - intptr_t OldGenerationSpaceAvailable() { - return old_gen_allocation_limit_ - - (PromotedSpaceSize() + PromotedExternalMemorySize()); + inline intptr_t OldGenerationSpaceAvailable() { + return old_gen_allocation_limit_ - PromotedTotalSize(); } - // True if we have reached the allocation limit in the old generation that - // should artificially cause a GC right now. - bool OldGenerationAllocationLimitReached() { - return OldGenerationSpaceAvailable() < 0; + static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize; + static const intptr_t kMinimumAllocationLimit = + 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); + + // When we sweep lazily we initially guess that there is no garbage on the + // heap and set the limits for the next GC accordingly. As we sweep we find + // out that some of the pages contained garbage and we have to adjust + // downwards the size of the heap. This means the limits that control the + // timing of the next GC also need to be adjusted downwards. + void LowerOldGenLimits(intptr_t adjustment) { + size_of_old_gen_at_last_old_space_gc_ -= adjustment; + old_gen_promotion_limit_ = + OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_); + old_gen_allocation_limit_ = + OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_); + } + + intptr_t OldGenPromotionLimit(intptr_t old_gen_size) { + const int divisor = FLAG_stress_compaction ? 10 : 3; + intptr_t limit = + Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit); + limit += new_space_.Capacity(); + limit *= old_gen_limit_factor_; + return limit; + } + + intptr_t OldGenAllocationLimit(intptr_t old_gen_size) { + const int divisor = FLAG_stress_compaction ? 8 : 2; + intptr_t limit = + Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit); + limit += new_space_.Capacity(); + limit *= old_gen_limit_factor_; + return limit; } // Can be called when the embedding application is idle. @@ -1213,6 +1288,8 @@ class Heap { MUST_USE_RESULT MaybeObject* NumberToString( Object* number, bool check_number_string_cache = true); + MUST_USE_RESULT MaybeObject* Uint32ToString( + uint32_t value, bool check_number_string_cache = true); Map* MapForExternalArrayType(ExternalArrayType array_type); RootListIndex RootIndexForExternalArrayType( @@ -1224,18 +1301,10 @@ class Heap { // by pointer size. static inline void CopyBlock(Address dst, Address src, int byte_size); - inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst, - Address src, - int byte_size); - // Optimized version of memmove for blocks with pointer size aligned sizes and // pointer size aligned addresses. static inline void MoveBlock(Address dst, Address src, int byte_size); - inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst, - Address src, - int byte_size); - // Check new space expansion criteria and expand semispaces if it was hit. void CheckNewSpaceExpansionCriteria(); @@ -1244,9 +1313,31 @@ class Heap { survived_since_last_expansion_ += survived; } + inline bool NextGCIsLikelyToBeFull() { + if (FLAG_gc_global) return true; + + intptr_t total_promoted = PromotedTotalSize(); + + intptr_t adjusted_promotion_limit = + old_gen_promotion_limit_ - new_space_.Capacity(); + + if (total_promoted >= adjusted_promotion_limit) return true; + + intptr_t adjusted_allocation_limit = + old_gen_allocation_limit_ - new_space_.Capacity() / 5; + + if (PromotedSpaceSize() >= adjusted_allocation_limit) return true; + + return false; + } + + void UpdateNewSpaceReferencesInExternalStringTable( ExternalStringTableUpdaterCallback updater_func); + void UpdateReferencesInExternalStringTable( + ExternalStringTableUpdaterCallback updater_func); + void ProcessWeakReferences(WeakObjectRetainer* retainer); // Helper function that governs the promotion policy from new space to @@ -1263,6 +1354,9 @@ class Heap { GCTracer* tracer() { return tracer_; } + // Returns the size of objects residing in non new spaces. + intptr_t PromotedSpaceSize(); + double total_regexp_code_generated() { return total_regexp_code_generated_; } void IncreaseTotalRegexpCodeGenerated(int size) { total_regexp_code_generated_ += size; @@ -1281,6 +1375,18 @@ class Heap { return &mark_compact_collector_; } + StoreBuffer* store_buffer() { + return &store_buffer_; + } + + Marking* marking() { + return &marking_; + } + + IncrementalMarking* incremental_marking() { + return &incremental_marking_; + } + ExternalStringTable* external_string_table() { return &external_string_table_; } @@ -1291,16 +1397,28 @@ class Heap { } inline Isolate* isolate(); - bool is_safe_to_read_maps() { return is_safe_to_read_maps_; } - void CallGlobalGCPrologueCallback() { + inline void CallGlobalGCPrologueCallback() { if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_(); } - void CallGlobalGCEpilogueCallback() { + inline void CallGlobalGCEpilogueCallback() { if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_(); } + inline bool OldGenerationAllocationLimitReached(); + + inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) { + scavenging_visitors_table_.GetVisitor(map)(map, slot, obj); + } + + void QueueMemoryChunkForFree(MemoryChunk* chunk); + void FreeQueuedChunks(); + + // Completely clear the Instanceof cache (to stop it keeping objects alive + // around a GC). + inline void CompletelyClearInstanceofCache(); + private: Heap(); @@ -1308,12 +1426,12 @@ class Heap { // more expedient to get at the isolate directly from within Heap methods. Isolate* isolate_; + intptr_t code_range_size_; int reserved_semispace_size_; int max_semispace_size_; int initial_semispace_size_; intptr_t max_old_generation_size_; intptr_t max_executable_size_; - intptr_t code_range_size_; // For keeping track of how much data has survived // scavenge since last new space expansion. @@ -1328,6 +1446,8 @@ class Heap { // For keeping track of context disposals. int contexts_disposed_; + int scan_on_scavenge_pages_; + #if defined(V8_TARGET_ARCH_X64) static const int kMaxObjectSizeInNewSpace = 1024*KB; #else @@ -1344,13 +1464,9 @@ class Heap { HeapState gc_state_; int gc_post_processing_depth_; - // Returns the size of object residing in non new spaces. - intptr_t PromotedSpaceSize(); - // Returns the amount of external memory registered since last global gc. int PromotedExternalMemorySize(); - int mc_count_; // how many mark-compact collections happened int ms_count_; // how many mark-sweep collections happened unsigned int gc_count_; // how many gc happened @@ -1389,6 +1505,13 @@ class Heap { // every allocation in large object space. intptr_t old_gen_allocation_limit_; + // Sometimes the heuristics dictate that those limits are increased. This + // variable records that fact. + int old_gen_limit_factor_; + + // Used to adjust the limits that control the timing of the next GC. + intptr_t size_of_old_gen_at_last_old_space_gc_; + // Limit on the amount of externally allocated memory allowed // between global GCs. If reached a global GC is forced. intptr_t external_allocation_limit_; @@ -1408,6 +1531,8 @@ class Heap { Object* global_contexts_list_; + StoreBufferRebuilder store_buffer_rebuilder_; + struct StringTypeTable { InstanceType type; int size; @@ -1465,13 +1590,11 @@ class Heap { // Support for computing object sizes during GC. HeapObjectCallback gc_safe_size_of_old_object_; static int GcSafeSizeOfOldObject(HeapObject* object); - static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object); // Update the GC state. Called from the mark-compact collector. void MarkMapPointersAsEncoded(bool encoded) { - gc_safe_size_of_old_object_ = encoded - ? &GcSafeSizeOfOldObjectWithEncodedMap - : &GcSafeSizeOfOldObject; + ASSERT(!encoded); + gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject; } // Checks whether a global GC is necessary @@ -1483,11 +1606,10 @@ class Heap { bool PerformGarbageCollection(GarbageCollector collector, GCTracer* tracer); - static const intptr_t kMinimumPromotionLimit = 2 * MB; - static const intptr_t kMinimumAllocationLimit = 8 * MB; inline void UpdateOldSpaceLimits(); + // Allocate an uninitialized object in map space. The behavior is identical // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't // have to test the allocation space argument and (b) can reduce code size @@ -1522,8 +1644,6 @@ class Heap { // Allocate empty fixed double array. MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray(); - void SwitchScavengingVisitorsTableIfProfilingWasEnabled(); - // Performs a minor collection in new generation. void Scavenge(); @@ -1532,16 +1652,15 @@ class Heap { Object** pointer); Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front); + static void ScavengeStoreBufferCallback(Heap* heap, + MemoryChunk* page, + StoreBufferEvent event); // Performs a major collection in the whole heap. void MarkCompact(GCTracer* tracer); // Code to be run before and after mark-compact. - void MarkCompactPrologue(bool is_compacting); - - // Completely clear the Instanceof cache (to stop it keeping objects alive - // around a GC). - inline void CompletelyClearInstanceofCache(); + void MarkCompactPrologue(); // Record statistics before and after garbage collection. void ReportStatisticsBeforeGC(); @@ -1551,12 +1670,11 @@ class Heap { static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); // Initializes a function with a shared part and prototype. - // Returns the function. // Note: this code was factored out of AllocateFunction such that // other parts of the VM could use it. Specifically, a function that creates // instances of type JS_FUNCTION_TYPE benefit from the use of this function. // Please note this does not perform a garbage collection. - MUST_USE_RESULT inline MaybeObject* InitializeFunction( + inline void InitializeFunction( JSFunction* function, SharedFunctionInfo* shared, Object* prototype); @@ -1621,6 +1739,8 @@ class Heap { return high_survival_rate_period_length_ > 0; } + void SelectScavengingVisitorsTable(); + static const int kInitialSymbolTableSize = 2048; static const int kInitialEvalCacheSize = 64; @@ -1640,10 +1760,11 @@ class Heap { MarkCompactCollector mark_compact_collector_; - // This field contains the meaning of the WATERMARK_INVALIDATED flag. - // Instead of clearing this flag from all pages we just flip - // its meaning at the beginning of a scavenge. - intptr_t page_watermark_invalidated_mark_; + StoreBuffer store_buffer_; + + Marking marking_; + + IncrementalMarking incremental_marking_; int number_idle_notifications_; unsigned int last_idle_notification_gc_count_; @@ -1658,7 +1779,9 @@ class Heap { ExternalStringTable external_string_table_; - bool is_safe_to_read_maps_; + VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; + + MemoryChunk* chunks_queued_for_free_; friend class Factory; friend class GCTracer; @@ -1757,29 +1880,6 @@ class VerifyPointersVisitor: public ObjectVisitor { } } }; - - -// Visitor class to verify interior pointers in spaces that use region marks -// to keep track of intergenerational references. -// As VerifyPointersVisitor but also checks that dirty marks are set -// for regions covering intergenerational references. -class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor { - public: - void VisitPointers(Object** start, Object** end) { - for (Object** current = start; current < end; current++) { - if ((*current)->IsHeapObject()) { - HeapObject* object = HeapObject::cast(*current); - ASSERT(HEAP->Contains(object)); - ASSERT(object->map()->IsMap()); - if (HEAP->InNewSpace(object)) { - ASSERT(HEAP->InToSpace(object)); - Address addr = reinterpret_cast<Address>(current); - ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr)); - } - } - } - } -}; #endif @@ -2112,16 +2212,6 @@ class GCTracer BASE_EMBEDDED { // Sets the full GC count. void set_full_gc_count(int count) { full_gc_count_ = count; } - // Sets the flag that this is a compacting full GC. - void set_is_compacting() { is_compacting_ = true; } - bool is_compacting() const { return is_compacting_; } - - // Increment and decrement the count of marked objects. - void increment_marked_count() { ++marked_count_; } - void decrement_marked_count() { --marked_count_; } - - int marked_count() { return marked_count_; } - void increment_promoted_objects_size(int object_size) { promoted_objects_size_ += object_size; } @@ -2146,23 +2236,6 @@ class GCTracer BASE_EMBEDDED { // A count (including this one) of the number of full garbage collections. int full_gc_count_; - // True if the current GC is a compacting full collection, false - // otherwise. - bool is_compacting_; - - // True if the *previous* full GC cwas a compacting collection (will be - // false if there has not been a previous full GC). - bool previous_has_compacted_; - - // On a full GC, a count of the number of marked objects. Incremented - // when an object is marked and decremented when an object's mark bit is - // cleared. Will be zero on a scavenge collection. - int marked_count_; - - // The count from the end of the previous full GC. Will be zero if there - // was no previous full GC. - int previous_marked_count_; - // Amounts of time spent in different scopes during GC. double scopes_[Scope::kNumberOfScopes]; @@ -2181,6 +2254,13 @@ class GCTracer BASE_EMBEDDED { // Size of objects promoted during the current collection. intptr_t promoted_objects_size_; + // Incremental marking steps counters. + int steps_count_; + double steps_took_; + double longest_step_; + int steps_count_since_last_gc_; + double steps_took_since_last_gc_; + Heap* heap_; }; @@ -2292,6 +2372,46 @@ class WeakObjectRetainer { }; +// Intrusive object marking uses least significant bit of +// heap object's map word to mark objects. +// Normally all map words have least significant bit set +// because they contain tagged map pointer. +// If the bit is not set object is marked. +// All objects should be unmarked before resuming +// JavaScript execution. +class IntrusiveMarking { + public: + static bool IsMarked(HeapObject* object) { + return (object->map_word().ToRawValue() & kNotMarkedBit) == 0; + } + + static void ClearMark(HeapObject* object) { + uintptr_t map_word = object->map_word().ToRawValue(); + object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit)); + ASSERT(!IsMarked(object)); + } + + static void SetMark(HeapObject* object) { + uintptr_t map_word = object->map_word().ToRawValue(); + object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit)); + ASSERT(IsMarked(object)); + } + + static Map* MapOfMarkedObject(HeapObject* object) { + uintptr_t map_word = object->map_word().ToRawValue(); + return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap(); + } + + static int SizeOfMarkedObject(HeapObject* object) { + return object->SizeFromMap(MapOfMarkedObject(object)); + } + + private: + static const uintptr_t kNotMarkedBit = 0x1; + STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0); +}; + + #if defined(DEBUG) || defined(LIVE_OBJECT_LIST) // Helper class for tracing paths to a search target Object from all roots. // The TracePathFrom() method can be used to trace paths from a specific @@ -2350,7 +2470,6 @@ class PathTracer : public ObjectVisitor { }; #endif // DEBUG || LIVE_OBJECT_LIST - } } // namespace v8::internal #undef HEAP diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc index 5630ce391..fd0c3bb0d 100644 --- a/deps/v8/src/hydrogen-instructions.cc +++ b/deps/v8/src/hydrogen-instructions.cc @@ -707,6 +707,14 @@ void HUnaryControlInstruction::PrintDataTo(StringStream* stream) { } +void HIsNilAndBranch::PrintDataTo(StringStream* stream) { + value()->PrintNameTo(stream); + stream->Add(kind() == kStrictEquality ? " === " : " == "); + stream->Add(nil() == kNullValue ? "null" : "undefined"); + HControlInstruction::PrintDataTo(stream); +} + + void HReturn::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); } @@ -777,15 +785,22 @@ void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add(" == "); stream->Add(type_literal_->GetFlatContent().ToAsciiVector()); + HControlInstruction::PrintDataTo(stream); +} + + +void HTypeof::PrintDataTo(StringStream* stream) { + value()->PrintNameTo(stream); } void HChange::PrintDataTo(StringStream* stream) { HUnaryOperation::PrintDataTo(stream); - stream->Add(" %s to %s", from_.Mnemonic(), to().Mnemonic()); + stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic()); if (CanTruncateToInt32()) stream->Add(" truncating-int32"); if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?"); + if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined"); } @@ -857,6 +872,23 @@ void HCheckFunction::PrintDataTo(StringStream* stream) { } +const char* HCheckInstanceType::GetCheckName() { + switch (check_) { + case IS_SPEC_OBJECT: return "object"; + case IS_JS_ARRAY: return "array"; + case IS_STRING: return "string"; + case IS_SYMBOL: return "symbol"; + } + UNREACHABLE(); + return ""; +} + +void HCheckInstanceType::PrintDataTo(StringStream* stream) { + stream->Add("%s ", GetCheckName()); + HUnaryOperation::PrintDataTo(stream); +} + + void HCallStub::PrintDataTo(StringStream* stream) { stream->Add("%s ", CodeStub::MajorName(major_key_, false)); @@ -1311,6 +1343,14 @@ void HCompareIDAndBranch::PrintDataTo(StringStream* stream) { } +void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) { + left()->PrintNameTo(stream); + stream->Add(" "); + right()->PrintNameTo(stream); + HControlInstruction::PrintDataTo(stream); +} + + void HGoto::PrintDataTo(StringStream* stream) { stream->Add("B%d", SuccessorAt(0)->block_id()); } @@ -1425,7 +1465,7 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) { } -bool HLoadKeyedFastElement::RequiresHoleCheck() const { +bool HLoadKeyedFastElement::RequiresHoleCheck() { for (HUseIterator it(uses()); !it.Done(); it.Advance()) { HValue* use = it.value(); if (!use->IsChange()) return true; @@ -1442,11 +1482,6 @@ void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) { } -bool HLoadKeyedFastDoubleElement::RequiresHoleCheck() const { - return true; -} - - void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); stream->Add("["); @@ -1488,6 +1523,7 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo( stream->Add("pixel"); break; case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -1582,6 +1618,7 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo( case EXTERNAL_PIXEL_ELEMENTS: stream->Add("pixel"); break; + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: @@ -1598,7 +1635,18 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo( void HLoadGlobalCell::PrintDataTo(StringStream* stream) { stream->Add("[%p]", *cell()); - if (check_hole_value()) stream->Add(" (deleteable/read-only)"); + if (!details_.IsDontDelete()) stream->Add(" (deleteable)"); + if (details_.IsReadOnly()) stream->Add(" (read-only)"); +} + + +bool HLoadGlobalCell::RequiresHoleCheck() { + if (details_.IsDontDelete() && !details_.IsReadOnly()) return false; + for (HUseIterator it(uses()); !it.Done(); it.Advance()) { + HValue* use = it.value(); + if (!use->IsChange()) return true; + } + return false; } @@ -1610,6 +1658,8 @@ void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) { void HStoreGlobalCell::PrintDataTo(StringStream* stream) { stream->Add("[%p] = ", *cell()); value()->PrintNameTo(stream); + if (!details_.IsDontDelete()) stream->Add(" (deleteable)"); + if (details_.IsReadOnly()) stream->Add(" (read-only)"); } diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h index 1bc28ba82..6b43f53da 100644 --- a/deps/v8/src/hydrogen-instructions.h +++ b/deps/v8/src/hydrogen-instructions.h @@ -118,7 +118,7 @@ class LChunkBuilder; V(InstanceOfKnownGlobal) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ - V(IsNullAndBranch) \ + V(IsNilAndBranch) \ V(IsObjectAndBranch) \ V(IsSmiAndBranch) \ V(IsUndetectableAndBranch) \ @@ -625,7 +625,7 @@ class HValue: public ZoneObject { void ComputeInitialRange(); // Representation helpers. - virtual Representation RequiredInputRepresentation(int index) const = 0; + virtual Representation RequiredInputRepresentation(int index) = 0; virtual Representation InferredRepresentation() { return representation(); @@ -841,7 +841,7 @@ class HTemplateControlInstruction: public HControlInstruction { class HBlockEntry: public HTemplateInstruction<0> { public: - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -854,7 +854,7 @@ class HBlockEntry: public HTemplateInstruction<0> { // HSoftDeoptimize does not end a basic block as opposed to HDeoptimize. class HSoftDeoptimize: public HTemplateInstruction<0> { public: - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -866,7 +866,7 @@ class HDeoptimize: public HControlInstruction { public: explicit HDeoptimize(int environment_length) : values_(environment_length) { } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -908,10 +908,10 @@ class HDeoptimize: public HControlInstruction { class HGoto: public HTemplateControlInstruction<1, 0> { public: explicit HGoto(HBasicBlock* target) { - SetSuccessorAt(0, target); - } + SetSuccessorAt(0, target); + } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -951,7 +951,7 @@ class HBranch: public HUnaryControlInstruction { : HUnaryControlInstruction(value, NULL, NULL) { } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -983,7 +983,7 @@ class HCompareMap: public HUnaryControlInstruction { Handle<Map> map() const { return map_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1000,7 +1000,7 @@ class HReturn: public HTemplateControlInstruction<0, 1> { SetOperandAt(0, value); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1014,7 +1014,7 @@ class HReturn: public HTemplateControlInstruction<0, 1> { class HAbnormalExit: public HTemplateControlInstruction<0, 0> { public: - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -1049,7 +1049,7 @@ class HThrow: public HTemplateInstruction<2> { SetAllSideEffects(); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1064,7 +1064,7 @@ class HUseConst: public HUnaryOperation { public: explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -1083,7 +1083,7 @@ class HForceRepresentation: public HTemplateInstruction<1> { virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return representation(); // Same as the output representation. } @@ -1094,27 +1094,27 @@ class HForceRepresentation: public HTemplateInstruction<1> { class HChange: public HUnaryOperation { public: HChange(HValue* value, - Representation from, Representation to, bool is_truncating, bool deoptimize_on_undefined) - : HUnaryOperation(value), - from_(from), - deoptimize_on_undefined_(deoptimize_on_undefined) { - ASSERT(!from.IsNone() && !to.IsNone()); - ASSERT(!from.Equals(to)); + : HUnaryOperation(value) { + ASSERT(!value->representation().IsNone() && !to.IsNone()); + ASSERT(!value->representation().Equals(to)); set_representation(to); SetFlag(kUseGVN); + if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined); if (is_truncating) SetFlag(kTruncatingToInt32); } virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); - Representation from() const { return from_; } - Representation to() const { return representation(); } - bool deoptimize_on_undefined() const { return deoptimize_on_undefined_; } - virtual Representation RequiredInputRepresentation(int index) const { - return from_; + Representation from() { return value()->representation(); } + Representation to() { return representation(); } + bool deoptimize_on_undefined() const { + return CheckFlag(kDeoptimizeOnUndefined); + } + virtual Representation RequiredInputRepresentation(int index) { + return from(); } virtual Range* InferRange(); @@ -1124,16 +1124,7 @@ class HChange: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(Change) protected: - virtual bool DataEquals(HValue* other) { - if (!other->IsChange()) return false; - HChange* change = HChange::cast(other); - return to().Equals(change->to()) - && deoptimize_on_undefined() == change->deoptimize_on_undefined(); - } - - private: - Representation from_; - bool deoptimize_on_undefined_; + virtual bool DataEquals(HValue* other) { return true; } }; @@ -1145,7 +1136,7 @@ class HClampToUint8: public HUnaryOperation { SetFlag(kUseGVN); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -1164,7 +1155,7 @@ class HToInt32: public HUnaryOperation { SetFlag(kUseGVN); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -1223,7 +1214,7 @@ class HSimulate: public HInstruction { virtual int OperandCount() { return values_.length(); } virtual HValue* OperandAt(int index) { return values_[index]; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -1268,7 +1259,7 @@ class HStackCheck: public HTemplateInstruction<1> { HValue* context() { return OperandAt(0); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1306,7 +1297,7 @@ class HEnterInlined: public HTemplateInstruction<0> { FunctionLiteral* function() const { return function_; } CallKind call_kind() const { return call_kind_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -1323,7 +1314,7 @@ class HLeaveInlined: public HTemplateInstruction<0> { public: HLeaveInlined() {} - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -1337,7 +1328,7 @@ class HPushArgument: public HUnaryOperation { set_representation(Representation::Tagged()); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1354,7 +1345,7 @@ class HThisFunction: public HTemplateInstruction<0> { SetFlag(kUseGVN); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -1372,7 +1363,7 @@ class HContext: public HTemplateInstruction<0> { SetFlag(kUseGVN); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -1392,7 +1383,7 @@ class HOuterContext: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(OuterContext); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1410,7 +1401,7 @@ class HGlobalObject: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(GlobalObject) - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1429,7 +1420,7 @@ class HGlobalReceiver: public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver) - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1465,7 +1456,7 @@ class HUnaryCall: public HCall<1> { SetOperandAt(0, value); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1485,7 +1476,7 @@ class HBinaryCall: public HCall<2> { virtual void PrintDataTo(StringStream* stream); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1500,7 +1491,7 @@ class HInvokeFunction: public HBinaryCall { : HBinaryCall(context, function, argument_count) { } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1525,7 +1516,7 @@ class HCallConstantFunction: public HCall<0> { virtual void PrintDataTo(StringStream* stream); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -1542,7 +1533,7 @@ class HCallKeyed: public HBinaryCall { : HBinaryCall(context, key, argument_count) { } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1566,7 +1557,7 @@ class HCallNamed: public HUnaryCall { DECLARE_CONCRETE_INSTRUCTION(CallNamed) - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1583,7 +1574,7 @@ class HCallFunction: public HUnaryCall { HValue* context() { return value(); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1602,7 +1593,7 @@ class HCallGlobal: public HUnaryCall { HValue* context() { return value(); } Handle<String> name() const { return name_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1622,7 +1613,7 @@ class HCallKnownGlobal: public HCall<0> { Handle<JSFunction> target() const { return target_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -1639,7 +1630,7 @@ class HCallNew: public HBinaryCall { : HBinaryCall(context, constructor, argument_count) { } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1666,7 +1657,7 @@ class HCallRuntime: public HCall<1> { const Runtime::Function* function() const { return c_function_; } Handle<String> name() const { return name_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1692,7 +1683,7 @@ class HJSArrayLength: public HTemplateInstruction<2> { SetFlag(kDependsOnMaps); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1716,7 +1707,7 @@ class HFixedArrayBaseLength: public HUnaryOperation { SetFlag(kDependsOnArrayLengths); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1735,7 +1726,7 @@ class HElementsKind: public HUnaryOperation { SetFlag(kDependsOnMaps); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1754,7 +1745,7 @@ class HBitNot: public HUnaryOperation { SetFlag(kTruncatingToInt32); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Integer32(); } virtual HType CalculateInferredType(); @@ -1804,7 +1795,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> { virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { if (index == 0) { return Representation::Tagged(); } else { @@ -1861,7 +1852,7 @@ class HLoadElements: public HUnaryOperation { SetFlag(kDependsOnMaps); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1884,7 +1875,7 @@ class HLoadExternalArrayPointer: public HUnaryOperation { SetFlag(kUseGVN); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -1908,7 +1899,7 @@ class HCheckMap: public HTemplateInstruction<2> { SetFlag(kDependsOnMaps); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } virtual void PrintDataTo(StringStream* stream); @@ -1938,7 +1929,7 @@ class HCheckFunction: public HUnaryOperation { SetFlag(kUseGVN); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } virtual void PrintDataTo(StringStream* stream); @@ -1978,7 +1969,9 @@ class HCheckInstanceType: public HUnaryOperation { return new HCheckInstanceType(value, IS_SYMBOL); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual void PrintDataTo(StringStream* stream); + + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2008,6 +2001,8 @@ class HCheckInstanceType: public HUnaryOperation { LAST_INTERVAL_CHECK = IS_JS_ARRAY }; + const char* GetCheckName(); + HCheckInstanceType(HValue* value, Check check) : HUnaryOperation(value), check_(check) { set_representation(Representation::Tagged()); @@ -2025,7 +2020,7 @@ class HCheckNonSmi: public HUnaryOperation { SetFlag(kUseGVN); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2071,7 +2066,7 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> { DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps) - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -2102,7 +2097,7 @@ class HCheckSmi: public HUnaryOperation { SetFlag(kUseGVN); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } virtual HType CalculateInferredType(); @@ -2151,7 +2146,7 @@ class HPhi: public HValue { } virtual Range* InferRange(); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return representation(); } virtual HType CalculateInferredType(); @@ -2243,7 +2238,7 @@ class HArgumentsObject: public HTemplateInstruction<0> { SetFlag(kIsArguments); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -2259,7 +2254,20 @@ class HConstant: public HTemplateInstruction<0> { bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); } - virtual Representation RequiredInputRepresentation(int index) const { + bool ImmortalImmovable() const { + Heap* heap = HEAP; + if (*handle_ == heap->undefined_value()) return true; + if (*handle_ == heap->null_value()) return true; + if (*handle_ == heap->true_value()) return true; + if (*handle_ == heap->false_value()) return true; + if (*handle_ == heap->the_hole_value()) return true; + if (*handle_ == heap->minus_zero_value()) return true; + if (*handle_ == heap->nan_value()) return true; + if (*handle_ == heap->empty_string()) return true; + return false; + } + + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -2367,7 +2375,7 @@ class HApplyArguments: public HTemplateInstruction<4> { SetAllSideEffects(); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { // The length is untagged, all other inputs are tagged. return (index == 2) ? Representation::Integer32() @@ -2394,7 +2402,7 @@ class HArgumentsElements: public HTemplateInstruction<0> { DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements) - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -2410,7 +2418,7 @@ class HArgumentsLength: public HUnaryOperation { SetFlag(kUseGVN); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2433,7 +2441,7 @@ class HAccessArgumentsAt: public HTemplateInstruction<3> { virtual void PrintDataTo(StringStream* stream); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { // The arguments elements is considered tagged. return index == 0 ? Representation::Tagged() @@ -2459,7 +2467,7 @@ class HBoundsCheck: public HTemplateInstruction<2> { SetFlag(kUseGVN); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Integer32(); } @@ -2484,7 +2492,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation { SetAllSideEffects(); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return index == 0 ? Representation::Tagged() : representation(); @@ -2522,7 +2530,7 @@ class HArithmeticBinaryOperation: public HBinaryOperation { } virtual HType CalculateInferredType(); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return index == 0 ? Representation::Tagged() : representation(); @@ -2549,7 +2557,7 @@ class HCompareGeneric: public HBinaryOperation { SetAllSideEffects(); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2587,7 +2595,7 @@ class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> { return input_representation_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return input_representation_; } virtual void PrintDataTo(StringStream* stream); @@ -2610,7 +2618,9 @@ class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> { HValue* left() { return OperandAt(0); } HValue* right() { return OperandAt(1); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual void PrintDataTo(StringStream* stream); + + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2629,7 +2639,7 @@ class HCompareConstantEqAndBranch: public HUnaryControlInstruction { HValue* left() { return value(); } int right() const { return right_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Integer32(); } @@ -2641,21 +2651,25 @@ class HCompareConstantEqAndBranch: public HUnaryControlInstruction { }; -class HIsNullAndBranch: public HUnaryControlInstruction { +class HIsNilAndBranch: public HUnaryControlInstruction { public: - HIsNullAndBranch(HValue* value, bool is_strict) - : HUnaryControlInstruction(value, NULL, NULL), is_strict_(is_strict) { } + HIsNilAndBranch(HValue* value, EqualityKind kind, NilValue nil) + : HUnaryControlInstruction(value, NULL, NULL), kind_(kind), nil_(nil) { } + + EqualityKind kind() const { return kind_; } + NilValue nil() const { return nil_; } - bool is_strict() const { return is_strict_; } + virtual void PrintDataTo(StringStream* stream); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } - DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch) + DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch) private: - bool is_strict_; + EqualityKind kind_; + NilValue nil_; }; @@ -2664,7 +2678,7 @@ class HIsObjectAndBranch: public HUnaryControlInstruction { explicit HIsObjectAndBranch(HValue* value) : HUnaryControlInstruction(value, NULL, NULL) { } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2679,7 +2693,7 @@ class HIsSmiAndBranch: public HUnaryControlInstruction { DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch) - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2693,7 +2707,7 @@ class HIsUndetectableAndBranch: public HUnaryControlInstruction { explicit HIsUndetectableAndBranch(HValue* value) : HUnaryControlInstruction(value, NULL, NULL) { } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2703,7 +2717,7 @@ class HIsUndetectableAndBranch: public HUnaryControlInstruction { class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> { public: - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -2725,7 +2739,7 @@ class HHasInstanceTypeAndBranch: public HUnaryControlInstruction { virtual void PrintDataTo(StringStream* stream); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2742,7 +2756,7 @@ class HHasCachedArrayIndexAndBranch: public HUnaryControlInstruction { explicit HHasCachedArrayIndexAndBranch(HValue* value) : HUnaryControlInstruction(value, NULL, NULL) { } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2757,7 +2771,7 @@ class HGetCachedArrayIndex: public HUnaryOperation { SetFlag(kUseGVN); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2776,7 +2790,7 @@ class HClassOfTestAndBranch: public HUnaryControlInstruction { DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch) - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2800,7 +2814,7 @@ class HTypeofIsAndBranch: public HUnaryControlInstruction { DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch) - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2817,7 +2831,7 @@ class HInstanceOf: public HBinaryOperation { SetAllSideEffects(); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2845,7 +2859,7 @@ class HInstanceOfKnownGlobal: public HTemplateInstruction<2> { HValue* left() { return OperandAt(1); } Handle<JSFunction> function() { return function_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -2870,7 +2884,7 @@ class HPower: public HTemplateInstruction<2> { HValue* left() { return OperandAt(0); } HValue* right() { return OperandAt(1); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return index == 0 ? Representation::Double() : Representation::None(); @@ -3099,7 +3113,7 @@ class HOsrEntry: public HTemplateInstruction<0> { int ast_id() const { return ast_id_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -3120,7 +3134,7 @@ class HParameter: public HTemplateInstruction<0> { virtual void PrintDataTo(StringStream* stream); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -3152,7 +3166,7 @@ class HCallStub: public HUnaryCall { virtual void PrintDataTo(StringStream* stream); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -3168,7 +3182,7 @@ class HUnknownOSRValue: public HTemplateInstruction<0> { public: HUnknownOSRValue() { set_representation(Representation::Tagged()); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -3178,15 +3192,15 @@ class HUnknownOSRValue: public HTemplateInstruction<0> { class HLoadGlobalCell: public HTemplateInstruction<0> { public: - HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, bool check_hole_value) - : cell_(cell), check_hole_value_(check_hole_value) { + HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, PropertyDetails details) + : cell_(cell), details_(details) { set_representation(Representation::Tagged()); SetFlag(kUseGVN); SetFlag(kDependsOnGlobalVars); } Handle<JSGlobalPropertyCell> cell() const { return cell_; } - bool check_hole_value() const { return check_hole_value_; } + bool RequiresHoleCheck(); virtual void PrintDataTo(StringStream* stream); @@ -3195,7 +3209,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> { return reinterpret_cast<intptr_t>(*cell_); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } @@ -3209,7 +3223,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> { private: Handle<JSGlobalPropertyCell> cell_; - bool check_hole_value_; + PropertyDetails details_; }; @@ -3234,7 +3248,7 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> { virtual void PrintDataTo(StringStream* stream); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -3250,17 +3264,19 @@ class HStoreGlobalCell: public HUnaryOperation { public: HStoreGlobalCell(HValue* value, Handle<JSGlobalPropertyCell> cell, - bool check_hole_value) + PropertyDetails details) : HUnaryOperation(value), cell_(cell), - check_hole_value_(check_hole_value) { + details_(details) { SetFlag(kChangesGlobalVars); } Handle<JSGlobalPropertyCell> cell() const { return cell_; } - bool check_hole_value() const { return check_hole_value_; } + bool RequiresHoleCheck() { + return !details_.IsDontDelete() || details_.IsReadOnly(); + } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } virtual void PrintDataTo(StringStream* stream); @@ -3269,7 +3285,7 @@ class HStoreGlobalCell: public HUnaryOperation { private: Handle<JSGlobalPropertyCell> cell_; - bool check_hole_value_; + PropertyDetails details_; }; @@ -3297,7 +3313,7 @@ class HStoreGlobalGeneric: public HTemplateInstruction<3> { virtual void PrintDataTo(StringStream* stream); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -3320,7 +3336,7 @@ class HLoadContextSlot: public HUnaryOperation { int slot_index() const { return slot_index_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -3342,7 +3358,7 @@ class HLoadContextSlot: public HUnaryOperation { static inline bool StoringValueNeedsWriteBarrier(HValue* value) { return !value->type().IsBoolean() && !value->type().IsSmi() - && !(value->IsConstant() && HConstant::cast(value)->InOldSpace()); + && !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable()); } @@ -3363,7 +3379,7 @@ class HStoreContextSlot: public HTemplateInstruction<2> { return StoringValueNeedsWriteBarrier(value()); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -3396,7 +3412,7 @@ class HLoadNamedField: public HUnaryOperation { bool is_in_object() const { return is_in_object_; } int offset() const { return offset_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } virtual void PrintDataTo(StringStream* stream); @@ -3428,7 +3444,7 @@ class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> { Handle<String> name() { return name_; } bool need_generic() { return need_generic_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -3463,7 +3479,7 @@ class HLoadNamedGeneric: public HTemplateInstruction<2> { HValue* object() { return OperandAt(1); } Handle<Object> name() const { return name_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -3487,7 +3503,7 @@ class HLoadFunctionPrototype: public HUnaryOperation { HValue* function() { return OperandAt(0); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -3511,7 +3527,7 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> { HValue* object() { return OperandAt(0); } HValue* key() { return OperandAt(1); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { // The key is supposed to be Integer32. return index == 0 ? Representation::Tagged() @@ -3520,7 +3536,7 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> { virtual void PrintDataTo(StringStream* stream); - bool RequiresHoleCheck() const; + bool RequiresHoleCheck(); DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement) @@ -3542,7 +3558,7 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> { HValue* elements() { return OperandAt(0); } HValue* key() { return OperandAt(1); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { // The key is supposed to be Integer32. return index == 0 ? Representation::Tagged() @@ -3551,8 +3567,6 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> { virtual void PrintDataTo(StringStream* stream); - bool RequiresHoleCheck() const; - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement) protected: @@ -3582,7 +3596,7 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> { virtual void PrintDataTo(StringStream* stream); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { // The key is supposed to be Integer32, but the base pointer // for the element load is a naked pointer. return index == 0 @@ -3625,7 +3639,7 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> { virtual void PrintDataTo(StringStream* stream); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -3654,7 +3668,7 @@ class HStoreNamedField: public HTemplateInstruction<2> { DECLARE_CONCRETE_INSTRUCTION(StoreNamedField) - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } virtual void PrintDataTo(StringStream* stream); @@ -3703,7 +3717,7 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> { virtual void PrintDataTo(StringStream* stream); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -3717,14 +3731,16 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> { class HStoreKeyedFastElement: public HTemplateInstruction<3> { public: - HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val) { + HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val, + ElementsKind elements_kind = FAST_ELEMENTS) + : elements_kind_(elements_kind) { SetOperandAt(0, obj); SetOperandAt(1, key); SetOperandAt(2, val); SetFlag(kChangesArrayElements); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { // The key is supposed to be Integer32. return index == 1 ? Representation::Integer32() @@ -3734,14 +3750,28 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> { HValue* object() { return OperandAt(0); } HValue* key() { return OperandAt(1); } HValue* value() { return OperandAt(2); } + bool value_is_smi() { + return elements_kind_ == FAST_SMI_ONLY_ELEMENTS; + } bool NeedsWriteBarrier() { - return StoringValueNeedsWriteBarrier(value()); + if (value_is_smi()) { + return false; + } else { + return StoringValueNeedsWriteBarrier(value()); + } + } + + bool ValueNeedsSmiCheck() { + return value_is_smi(); } virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement) + + private: + ElementsKind elements_kind_; }; @@ -3756,7 +3786,7 @@ class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> { SetFlag(kChangesDoubleArrayElements); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { if (index == 1) { return Representation::Integer32(); } else if (index == 2) { @@ -3795,7 +3825,7 @@ class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> { virtual void PrintDataTo(StringStream* stream); - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { if (index == 0) { return Representation::External(); } else { @@ -3843,7 +3873,7 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> { HValue* context() { return OperandAt(3); } bool strict_mode() { return strict_mode_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -3865,7 +3895,7 @@ class HStringAdd: public HBinaryOperation { SetFlag(kDependsOnMaps); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -3891,7 +3921,7 @@ class HStringCharCodeAt: public HTemplateInstruction<3> { SetFlag(kDependsOnMaps); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { // The index is supposed to be Integer32. return index == 2 ? Representation::Integer32() @@ -3922,7 +3952,7 @@ class HStringCharFromCode: public HTemplateInstruction<2> { SetFlag(kUseGVN); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return index == 0 ? Representation::Tagged() : Representation::Integer32(); @@ -3945,7 +3975,7 @@ class HStringLength: public HUnaryOperation { SetFlag(kDependsOnMaps); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -4001,7 +4031,7 @@ class HArrayLiteral: public HMaterializedLiteral<1> { bool IsCopyOnWrite() const; - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -4035,7 +4065,7 @@ class HObjectLiteral: public HMaterializedLiteral<1> { bool fast_elements() const { return fast_elements_; } bool has_function() const { return has_function_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -4064,7 +4094,7 @@ class HRegExpLiteral: public HMaterializedLiteral<1> { Handle<String> pattern() { return pattern_; } Handle<String> flags() { return flags_; } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -4088,7 +4118,7 @@ class HFunctionLiteral: public HTemplateInstruction<1> { HValue* context() { return OperandAt(0); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -4114,7 +4144,9 @@ class HTypeof: public HTemplateInstruction<2> { HValue* context() { return OperandAt(0); } HValue* value() { return OperandAt(1); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual void PrintDataTo(StringStream* stream); + + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -4132,7 +4164,7 @@ class HToFastProperties: public HUnaryOperation { set_representation(Representation::Tagged()); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -4146,7 +4178,7 @@ class HValueOf: public HUnaryOperation { set_representation(Representation::Tagged()); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -4162,7 +4194,7 @@ class HDeleteProperty: public HBinaryOperation { SetAllSideEffects(); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } @@ -4189,7 +4221,7 @@ class HIn: public HTemplateInstruction<3> { HValue* key() { return OperandAt(1); } HValue* object() { return OperandAt(2); } - virtual Representation RequiredInputRepresentation(int index) const { + virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc index cca168a96..2d471cc29 100644 --- a/deps/v8/src/hydrogen.cc +++ b/deps/v8/src/hydrogen.cc @@ -422,7 +422,7 @@ class ReachabilityAnalyzer BASE_EMBEDDED { }; -void HGraph::Verify() const { +void HGraph::Verify(bool do_full_verify) const { for (int i = 0; i < blocks_.length(); i++) { HBasicBlock* block = blocks_.at(i); @@ -473,25 +473,27 @@ void HGraph::Verify() const { // Check special property of first block to have no predecessors. ASSERT(blocks_.at(0)->predecessors()->is_empty()); - // Check that the graph is fully connected. - ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL); - ASSERT(analyzer.visited_count() == blocks_.length()); + if (do_full_verify) { + // Check that the graph is fully connected. + ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL); + ASSERT(analyzer.visited_count() == blocks_.length()); - // Check that entry block dominator is NULL. - ASSERT(entry_block_->dominator() == NULL); + // Check that entry block dominator is NULL. + ASSERT(entry_block_->dominator() == NULL); - // Check dominators. - for (int i = 0; i < blocks_.length(); ++i) { - HBasicBlock* block = blocks_.at(i); - if (block->dominator() == NULL) { - // Only start block may have no dominator assigned to. - ASSERT(i == 0); - } else { - // Assert that block is unreachable if dominator must not be visited. - ReachabilityAnalyzer dominator_analyzer(entry_block_, - blocks_.length(), - block->dominator()); - ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id())); + // Check dominators. + for (int i = 0; i < blocks_.length(); ++i) { + HBasicBlock* block = blocks_.at(i); + if (block->dominator() == NULL) { + // Only start block may have no dominator assigned to. + ASSERT(i == 0); + } else { + // Assert that block is unreachable if dominator must not be visited. + ReachabilityAnalyzer dominator_analyzer(entry_block_, + blocks_.length(), + block->dominator()); + ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id())); + } } } } @@ -850,7 +852,7 @@ void HGraph::EliminateUnreachablePhis() { } -bool HGraph::CheckPhis() { +bool HGraph::CheckArgumentsPhiUses() { int block_count = blocks_.length(); for (int i = 0; i < block_count; ++i) { for (int j = 0; j < blocks_[i]->phis()->length(); ++j) { @@ -863,13 +865,11 @@ bool HGraph::CheckPhis() { } -bool HGraph::CollectPhis() { +bool HGraph::CheckConstPhiUses() { int block_count = blocks_.length(); - phi_list_ = new ZoneList<HPhi*>(block_count); for (int i = 0; i < block_count; ++i) { for (int j = 0; j < blocks_[i]->phis()->length(); ++j) { HPhi* phi = blocks_[i]->phis()->at(j); - phi_list_->Add(phi); // Check for the hole value (from an uninitialized const). for (int k = 0; k < phi->OperandCount(); k++) { if (phi->OperandAt(k) == GetConstantHole()) return false; @@ -880,6 +880,18 @@ bool HGraph::CollectPhis() { } +void HGraph::CollectPhis() { + int block_count = blocks_.length(); + phi_list_ = new ZoneList<HPhi*>(block_count); + for (int i = 0; i < block_count; ++i) { + for (int j = 0; j < blocks_[i]->phis()->length(); ++j) { + HPhi* phi = blocks_[i]->phis()->at(j); + phi_list_->Add(phi); + } + } +} + + void HGraph::InferTypes(ZoneList<HValue*>* worklist) { BitVector in_worklist(GetMaximumValueID()); for (int i = 0; i < worklist->length(); ++i) { @@ -1848,7 +1860,7 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value, } if (new_value == NULL) { - new_value = new(zone()) HChange(value, value->representation(), to, + new_value = new(zone()) HChange(value, to, is_truncating, deoptimize_on_undefined); } @@ -2299,7 +2311,7 @@ HGraph* HGraphBuilder::CreateGraph() { // Handle implicit declaration of the function name in named function // expressions before other declarations. if (scope->is_function_scope() && scope->function() != NULL) { - HandleDeclaration(scope->function(), Variable::CONST, NULL); + HandleDeclaration(scope->function(), CONST, NULL); } VisitDeclarations(scope->declarations()); AddSimulate(AstNode::kDeclarationsId); @@ -2320,17 +2332,24 @@ HGraph* HGraphBuilder::CreateGraph() { graph()->OrderBlocks(); graph()->AssignDominators(); + +#ifdef DEBUG + // Do a full verify after building the graph and computing dominators. + graph()->Verify(true); +#endif + graph()->PropagateDeoptimizingMark(); - graph()->EliminateRedundantPhis(); - if (!graph()->CheckPhis()) { - Bailout("Unsupported phi use of arguments object"); + if (!graph()->CheckConstPhiUses()) { + Bailout("Unsupported phi use of const variable"); return NULL; } - if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis(); - if (!graph()->CollectPhis()) { - Bailout("Unsupported phi use of uninitialized constant"); + graph()->EliminateRedundantPhis(); + if (!graph()->CheckArgumentsPhiUses()) { + Bailout("Unsupported phi use of arguments"); return NULL; } + if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis(); + graph()->CollectPhis(); HInferRepresentation rep(graph()); rep.Analyze(); @@ -3122,11 +3141,21 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) { ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); Variable* variable = expr->var(); - if (variable->mode() == Variable::LET) { + if (variable->mode() == LET) { return Bailout("reference to let variable"); } switch (variable->location()) { case Variable::UNALLOCATED: { + // Handle known global constants like 'undefined' specially to avoid a + // load from a global cell for them. + Handle<Object> constant_value = + isolate()->factory()->GlobalConstantFor(variable->name()); + if (!constant_value.is_null()) { + HConstant* instr = + new(zone()) HConstant(constant_value, Representation::Tagged()); + return ast_context()->ReturnInstruction(instr, expr->id()); + } + LookupResult lookup; GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, false); @@ -3139,8 +3168,8 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) { if (type == kUseCell) { Handle<GlobalObject> global(info()->global_object()); Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup)); - bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly(); - HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole); + HLoadGlobalCell* instr = + new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails()); return ast_context()->ReturnInstruction(instr, expr->id()); } else { HValue* context = environment()->LookupContext(); @@ -3159,7 +3188,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) { case Variable::PARAMETER: case Variable::LOCAL: { HValue* value = environment()->Lookup(variable); - if (variable->mode() == Variable::CONST && + if (variable->mode() == CONST && value == graph()->GetConstantHole()) { return Bailout("reference to uninitialized const variable"); } @@ -3167,7 +3196,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) { } case Variable::CONTEXT: { - if (variable->mode() == Variable::CONST) { + if (variable->mode() == CONST) { return Bailout("reference to const context slot"); } HValue* context = BuildContextChainWalk(variable); @@ -3317,7 +3346,43 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { HValue* key = AddInstruction( new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)), Representation::Integer32())); + HInstruction* elements_kind = + AddInstruction(new(zone()) HElementsKind(literal)); + HBasicBlock* store_fast = graph()->CreateBasicBlock(); + // Two empty blocks to satisfy edge split form. + HBasicBlock* store_fast_edgesplit1 = graph()->CreateBasicBlock(); + HBasicBlock* store_fast_edgesplit2 = graph()->CreateBasicBlock(); + HBasicBlock* store_generic = graph()->CreateBasicBlock(); + HBasicBlock* check_smi_only_elements = graph()->CreateBasicBlock(); + HBasicBlock* join = graph()->CreateBasicBlock(); + + HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(value); + smicheck->SetSuccessorAt(0, store_fast_edgesplit1); + smicheck->SetSuccessorAt(1, check_smi_only_elements); + current_block()->Finish(smicheck); + store_fast_edgesplit1->Finish(new(zone()) HGoto(store_fast)); + + set_current_block(check_smi_only_elements); + HCompareConstantEqAndBranch* smi_elements_check = + new(zone()) HCompareConstantEqAndBranch(elements_kind, + FAST_SMI_ONLY_ELEMENTS, + Token::EQ_STRICT); + smi_elements_check->SetSuccessorAt(0, store_generic); + smi_elements_check->SetSuccessorAt(1, store_fast_edgesplit2); + current_block()->Finish(smi_elements_check); + store_fast_edgesplit2->Finish(new(zone()) HGoto(store_fast)); + + set_current_block(store_fast); AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value)); + store_fast->Goto(join); + + set_current_block(store_generic); + AddInstruction(BuildStoreKeyedGeneric(literal, key, value)); + store_generic->Goto(join); + + join->SetJoinId(expr->id()); + set_current_block(join); + AddSimulate(expr->GetIdForElement(i)); } return ast_context()->ReturnValue(Pop()); @@ -3561,10 +3626,10 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var, LookupResult lookup; GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true); if (type == kUseCell) { - bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly(); Handle<GlobalObject> global(info()->global_object()); Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup)); - HInstruction* instr = new(zone()) HStoreGlobalCell(value, cell, check_hole); + HInstruction* instr = + new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails()); instr->set_position(position); AddInstruction(instr); if (instr->HasSideEffects()) AddSimulate(ast_id); @@ -3598,7 +3663,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) { if (proxy != NULL) { Variable* var = proxy->var(); - if (var->mode() == Variable::CONST || var->mode() == Variable::LET) { + if (var->mode() == CONST || var->mode() == LET) { return Bailout("unsupported let or const compound assignment"); } @@ -3743,7 +3808,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) { HandlePropertyAssignment(expr); } else if (proxy != NULL) { Variable* var = proxy->var(); - if (var->mode() == Variable::CONST) { + if (var->mode() == CONST) { if (expr->op() != Token::INIT_CONST) { return Bailout("non-initializer assignment to const"); } @@ -3754,7 +3819,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) { // variables (e.g. initialization inside a loop). HValue* old_value = environment()->Lookup(var); AddInstruction(new HUseConst(old_value)); - } else if (var->mode() == Variable::LET) { + } else if (var->mode() == LET) { return Bailout("unsupported assignment to let"); } @@ -3782,7 +3847,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) { } case Variable::CONTEXT: { - ASSERT(var->mode() != Variable::CONST); + ASSERT(var->mode() != CONST); // Bail out if we try to mutate a parameter value in a function using // the arguments object. We do not (yet) correctly handle the // arguments property of the function. @@ -3928,6 +3993,7 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: break; + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: @@ -3944,6 +4010,30 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess( } +HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements, + HValue* checked_key, + HValue* val, + ElementsKind elements_kind, + bool is_store) { + if (is_store) { + ASSERT(val != NULL); + if (elements_kind == FAST_DOUBLE_ELEMENTS) { + return new(zone()) HStoreKeyedFastDoubleElement( + elements, checked_key, val); + } else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS. + return new(zone()) HStoreKeyedFastElement( + elements, checked_key, val, elements_kind); + } + } + // It's an element load (!is_store). + if (elements_kind == FAST_DOUBLE_ELEMENTS) { + return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key); + } else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS. + return new(zone()) HLoadKeyedFastElement(elements, checked_key); + } +} + + HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object, HValue* key, HValue* val, @@ -3951,17 +4041,20 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object, bool is_store) { ASSERT(expr->IsMonomorphic()); Handle<Map> map = expr->GetMonomorphicReceiverType(); - if (!map->has_fast_elements() && - !map->has_fast_double_elements() && + AddInstruction(new(zone()) HCheckNonSmi(object)); + HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map)); + bool fast_smi_only_elements = map->has_fast_smi_only_elements(); + bool fast_elements = map->has_fast_elements(); + bool fast_double_elements = map->has_fast_double_elements(); + if (!fast_smi_only_elements && + !fast_elements && + !fast_double_elements && !map->has_external_array_elements()) { return is_store ? BuildStoreKeyedGeneric(object, key, val) : BuildLoadKeyedGeneric(object, key); } - AddInstruction(new(zone()) HCheckNonSmi(object)); - HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map)); HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object)); - bool fast_double_elements = map->has_fast_double_elements(); - if (is_store && map->has_fast_elements()) { + if (is_store && (fast_elements || fast_smi_only_elements)) { AddInstruction(new(zone()) HCheckMap( elements, isolate()->factory()->fixed_array_map())); } @@ -3976,28 +4069,15 @@ HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object, return BuildExternalArrayElementAccess(external_elements, checked_key, val, map->elements_kind(), is_store); } - ASSERT(map->has_fast_elements() || fast_double_elements); + ASSERT(fast_smi_only_elements || fast_elements || fast_double_elements); if (map->instance_type() == JS_ARRAY_TYPE) { length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck)); } else { length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements)); } checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length)); - if (is_store) { - if (fast_double_elements) { - return new(zone()) HStoreKeyedFastDoubleElement(elements, - checked_key, - val); - } else { - return new(zone()) HStoreKeyedFastElement(elements, checked_key, val); - } - } else { - if (fast_double_elements) { - return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key); - } else { - return new(zone()) HLoadKeyedFastElement(elements, checked_key); - } - } + return BuildFastElementAccess(elements, checked_key, val, + map->elements_kind(), is_store); } @@ -4039,14 +4119,20 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, HLoadExternalArrayPointer* external_elements = NULL; HInstruction* checked_key = NULL; - // FAST_ELEMENTS is assumed to be the first case. - STATIC_ASSERT(FAST_ELEMENTS == 0); + // Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS, + // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external + // arrays. + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); + STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); + STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); + STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); - for (ElementsKind elements_kind = FAST_ELEMENTS; + for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND; elements_kind <= LAST_ELEMENTS_KIND; elements_kind = ElementsKind(elements_kind + 1)) { - // After having handled FAST_ELEMENTS and DICTIONARY_ELEMENTS, we - // need to add some code that's executed for all external array cases. + // After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS, + // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code + // that's executed for all external array cases. STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND == LAST_ELEMENTS_KIND); if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND @@ -4068,15 +4154,25 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, set_current_block(if_true); HInstruction* access; - if (elements_kind == FAST_ELEMENTS || + if (elements_kind == FAST_SMI_ONLY_ELEMENTS || + elements_kind == FAST_ELEMENTS || elements_kind == FAST_DOUBLE_ELEMENTS) { - bool fast_double_elements = - elements_kind == FAST_DOUBLE_ELEMENTS; - if (is_store && elements_kind == FAST_ELEMENTS) { + if (is_store && elements_kind == FAST_SMI_ONLY_ELEMENTS) { + AddInstruction(new(zone()) HCheckSmi(val)); + } + if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) { AddInstruction(new(zone()) HCheckMap( elements, isolate()->factory()->fixed_array_map(), elements_kind_branch)); } + // TODO(jkummerow): The need for these two blocks could be avoided + // in one of two ways: + // (1) Introduce ElementsKinds for JSArrays that are distinct from + // those for fast objects. + // (2) Put the common instructions into a third "join" block. This + // requires additional AST IDs that we can deopt to from inside + // that join block. They must be added to the Property class (when + // it's a keyed property) and registered in the full codegen. HBasicBlock* if_jsarray = graph()->CreateBasicBlock(); HBasicBlock* if_fastobject = graph()->CreateBasicBlock(); HHasInstanceTypeAndBranch* typecheck = @@ -4086,29 +4182,15 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, current_block()->Finish(typecheck); set_current_block(if_jsarray); - HInstruction* length = new(zone()) HJSArrayLength(object, typecheck); - AddInstruction(length); + HInstruction* length; + length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck)); checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length)); - if (is_store) { - if (fast_double_elements) { - access = AddInstruction( - new(zone()) HStoreKeyedFastDoubleElement(elements, - checked_key, - val)); - } else { - access = AddInstruction( - new(zone()) HStoreKeyedFastElement(elements, checked_key, val)); - } - } else { - if (fast_double_elements) { - access = AddInstruction( - new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key)); - } else { - access = AddInstruction( - new(zone()) HLoadKeyedFastElement(elements, checked_key)); - } + access = AddInstruction(BuildFastElementAccess( + elements, checked_key, val, elements_kind, is_store)); + if (!is_store) { Push(access); } + *has_side_effects |= access->HasSideEffects(); if (position != -1) { access->set_position(position); @@ -4118,25 +4200,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, set_current_block(if_fastobject); length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements)); checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length)); - if (is_store) { - if (fast_double_elements) { - access = AddInstruction( - new(zone()) HStoreKeyedFastDoubleElement(elements, - checked_key, - val)); - } else { - access = AddInstruction( - new(zone()) HStoreKeyedFastElement(elements, checked_key, val)); - } - } else { - if (fast_double_elements) { - access = AddInstruction( - new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key)); - } else { - access = AddInstruction( - new(zone()) HLoadKeyedFastElement(elements, checked_key)); - } - } + access = AddInstruction(BuildFastElementAccess( + elements, checked_key, val, elements_kind, is_store)); } else if (elements_kind == DICTIONARY_ELEMENTS) { if (is_store) { access = AddInstruction(BuildStoreKeyedGeneric(object, key, val)); @@ -4474,20 +4539,25 @@ bool HGraphBuilder::TryInline(Call* expr) { return false; } - // No context change required. CompilationInfo* outer_info = info(); +#if !defined(V8_TARGET_ARCH_IA32) + // Target must be able to use caller's context. if (target->context() != outer_info->closure()->context() || outer_info->scope()->contains_with() || outer_info->scope()->num_heap_slots() > 0) { TraceInline(target, caller, "target requires context change"); return false; } +#endif + // Don't inline deeper than kMaxInliningLevels calls. HEnvironment* env = environment(); int current_level = 1; while (env->outer() != NULL) { - if (current_level == Compiler::kMaxInliningLevels) { + if (current_level == (FLAG_limit_inlining + ? Compiler::kMaxInliningLevels + : 2 * Compiler::kMaxInliningLevels)) { TraceInline(target, caller, "inline depth limit reached"); return false; } @@ -4593,7 +4663,8 @@ bool HGraphBuilder::TryInline(Call* expr) { ASSERT(target_shared->has_deoptimization_support()); TypeFeedbackOracle target_oracle( Handle<Code>(target_shared->code()), - Handle<Context>(target->context()->global_context())); + Handle<Context>(target->context()->global_context()), + isolate()); FunctionState target_state(this, &target_info, &target_oracle); HConstant* undefined = graph()->GetConstantUndefined(); @@ -4602,6 +4673,17 @@ bool HGraphBuilder::TryInline(Call* expr) { function, undefined, call_kind); +#ifdef V8_TARGET_ARCH_IA32 + // IA32 only, overwrite the caller's context in the deoptimization + // environment with the correct one. + // + // TODO(kmillikin): implement the same inlining on other platforms so we + // can remove the unsightly ifdefs in this function. + HConstant* context = new HConstant(Handle<Context>(target->context()), + Representation::Tagged()); + AddInstruction(context); + inner_env->BindContext(context); +#endif HBasicBlock* body_entry = CreateBasicBlock(inner_env); current_block()->Goto(body_entry); body_entry->SetJoinId(expr->ReturnId()); @@ -4922,8 +5004,8 @@ void HGraphBuilder::VisitCall(Call* expr) { } } else { + expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION); VariableProxy* proxy = expr->expression()->AsVariableProxy(); - // FIXME. bool global_call = proxy != NULL && proxy->var()->IsUnallocated(); if (global_call) { @@ -4975,6 +5057,46 @@ void HGraphBuilder::VisitCall(Call* expr) { Drop(argument_count); } + } else if (expr->IsMonomorphic()) { + // The function is on the stack in the unoptimized code during + // evaluation of the arguments. + CHECK_ALIVE(VisitForValue(expr->expression())); + HValue* function = Top(); + HValue* context = environment()->LookupContext(); + HGlobalObject* global = new(zone()) HGlobalObject(context); + HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global); + AddInstruction(global); + PushAndAdd(receiver); + CHECK_ALIVE(VisitExpressions(expr->arguments())); + AddInstruction(new(zone()) HCheckFunction(function, expr->target())); + if (TryInline(expr)) { + // The function is lingering in the deoptimization environment. + // Handle it by case analysis on the AST context. + if (ast_context()->IsEffect()) { + Drop(1); + } else if (ast_context()->IsValue()) { + HValue* result = Pop(); + Drop(1); + Push(result); + } else if (ast_context()->IsTest()) { + TestContext* context = TestContext::cast(ast_context()); + if (context->if_true()->HasPredecessor()) { + context->if_true()->last_environment()->Drop(1); + } + if (context->if_false()->HasPredecessor()) { + context->if_true()->last_environment()->Drop(1); + } + } else { + UNREACHABLE(); + } + return; + } else { + call = PreProcessCall(new(zone()) HInvokeFunction(context, + function, + argument_count)); + Drop(1); // The function. + } + } else { CHECK_ALIVE(VisitArgument(expr->expression())); HValue* context = environment()->LookupContext(); @@ -5281,7 +5403,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) { if (proxy != NULL) { Variable* var = proxy->var(); - if (var->mode() == Variable::CONST) { + if (var->mode() == CONST) { return Bailout("unsupported count operation with const"); } // Argument of the count operation is a variable, not a property. @@ -5668,26 +5790,36 @@ Representation HGraphBuilder::ToRepresentation(TypeInfo info) { } -void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr, - Expression* expr, +void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr, + Expression* sub_expr, Handle<String> check) { - CHECK_ALIVE(VisitForTypeOf(expr)); - HValue* expr_value = Pop(); - HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(expr_value, check); - instr->set_position(compare_expr->position()); - return ast_context()->ReturnControl(instr, compare_expr->id()); + CHECK_ALIVE(VisitForTypeOf(sub_expr)); + HValue* value = Pop(); + HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check); + instr->set_position(expr->position()); + return ast_context()->ReturnControl(instr, expr->id()); } -void HGraphBuilder::HandleLiteralCompareUndefined( - CompareOperation* compare_expr, Expression* expr) { - CHECK_ALIVE(VisitForValue(expr)); - HValue* lhs = Pop(); - HValue* rhs = graph()->GetConstantUndefined(); - HCompareObjectEqAndBranch* instr = - new(zone()) HCompareObjectEqAndBranch(lhs, rhs); - instr->set_position(compare_expr->position()); - return ast_context()->ReturnControl(instr, compare_expr->id()); +bool HGraphBuilder::TryLiteralCompare(CompareOperation* expr) { + Expression *sub_expr; + Handle<String> check; + if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) { + HandleLiteralCompareTypeof(expr, sub_expr, check); + return true; + } + + if (expr->IsLiteralCompareUndefined(&sub_expr)) { + HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue); + return true; + } + + if (expr->IsLiteralCompareNull(&sub_expr)) { + HandleLiteralCompareNil(expr, sub_expr, kNullValue); + return true; + } + + return false; } @@ -5709,17 +5841,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) { } // Check for special cases that compare against literals. - Expression *sub_expr; - Handle<String> check; - if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) { - HandleLiteralCompareTypeof(expr, sub_expr, check); - return; - } - - if (expr->IsLiteralCompareUndefined(&sub_expr)) { - HandleLiteralCompareUndefined(expr, sub_expr); - return; - } + if (TryLiteralCompare(expr)) return; TypeInfo type_info = oracle()->CompareType(expr); // Check if this expression was ever executed according to type feedback. @@ -5824,14 +5946,18 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) { } -void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) { +void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr, + Expression* sub_expr, + NilValue nil) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); - CHECK_ALIVE(VisitForValue(expr->expression())); + CHECK_ALIVE(VisitForValue(sub_expr)); HValue* value = Pop(); - HIsNullAndBranch* instr = - new(zone()) HIsNullAndBranch(value, expr->is_strict()); + EqualityKind kind = + expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality; + HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil); + instr->set_position(expr->position()); return ast_context()->ReturnControl(instr, expr->id()); } @@ -5851,9 +5977,9 @@ void HGraphBuilder::VisitDeclaration(Declaration* decl) { void HGraphBuilder::HandleDeclaration(VariableProxy* proxy, - Variable::Mode mode, + VariableMode mode, FunctionLiteral* function) { - if (mode == Variable::LET) return Bailout("unsupported let declaration"); + if (mode == LET) return Bailout("unsupported let declaration"); Variable* var = proxy->var(); switch (var->location()) { case Variable::UNALLOCATED: @@ -5861,9 +5987,9 @@ void HGraphBuilder::HandleDeclaration(VariableProxy* proxy, case Variable::PARAMETER: case Variable::LOCAL: case Variable::CONTEXT: - if (mode == Variable::CONST || function != NULL) { + if (mode == CONST || function != NULL) { HValue* value = NULL; - if (mode == Variable::CONST) { + if (mode == CONST) { value = graph()->GetConstantHole(); } else { VisitForValue(function); @@ -5914,9 +6040,7 @@ void HGraphBuilder::GenerateIsFunction(CallRuntime* call) { CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HHasInstanceTypeAndBranch* result = - new(zone()) HHasInstanceTypeAndBranch(value, - JS_FUNCTION_TYPE, - JS_FUNCTION_PROXY_TYPE); + new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE); return ast_context()->ReturnControl(result, call->id()); } @@ -6816,7 +6940,7 @@ void HPhase::End() const { } #ifdef DEBUG - if (graph_ != NULL) graph_->Verify(); + if (graph_ != NULL) graph_->Verify(false); // No full verify. if (allocator_ != NULL) allocator_->Verify(); #endif } diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h index 03fbc7322..b66042c2c 100644 --- a/deps/v8/src/hydrogen.h +++ b/deps/v8/src/hydrogen.h @@ -243,11 +243,13 @@ class HGraph: public ZoneObject { // Returns false if there are phi-uses of the arguments-object // which are not supported by the optimizing compiler. - bool CheckPhis(); + bool CheckArgumentsPhiUses(); - // Returns false if there are phi-uses of hole values comming - // from uninitialized consts. - bool CollectPhis(); + // Returns false if there are phi-uses of an uninitialized const + // which are not supported by the optimizing compiler. + bool CheckConstPhiUses(); + + void CollectPhis(); Handle<Code> Compile(CompilationInfo* info); @@ -283,7 +285,7 @@ class HGraph: public ZoneObject { } #ifdef DEBUG - void Verify() const; + void Verify(bool do_full_verify) const; #endif private: @@ -780,7 +782,7 @@ class HGraphBuilder: public AstVisitor { #undef INLINE_FUNCTION_GENERATOR_DECLARATION void HandleDeclaration(VariableProxy* proxy, - Variable::Mode mode, + VariableMode mode, FunctionLiteral* function); void VisitDelete(UnaryOperation* expr); @@ -910,11 +912,13 @@ class HGraphBuilder: public AstVisitor { HValue* receiver, SmallMapList* types, Handle<String> name); - void HandleLiteralCompareTypeof(CompareOperation* compare_expr, - Expression* expr, + bool TryLiteralCompare(CompareOperation* expr); + void HandleLiteralCompareTypeof(CompareOperation* expr, + Expression* sub_expr, Handle<String> check); - void HandleLiteralCompareUndefined(CompareOperation* compare_expr, - Expression* expr); + void HandleLiteralCompareNil(CompareOperation* expr, + Expression* sub_expr, + NilValue nil); HStringCharCodeAt* BuildStringCharCodeAt(HValue* context, HValue* string, @@ -938,6 +942,11 @@ class HGraphBuilder: public AstVisitor { HValue* val, ElementsKind elements_kind, bool is_store); + HInstruction* BuildFastElementAccess(HValue* elements, + HValue* checked_key, + HValue* val, + ElementsKind elements_kind, + bool is_store); HInstruction* BuildMonomorphicElementAccess(HValue* object, HValue* key, diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h index 0ca2d6b4a..446aa3e2d 100644 --- a/deps/v8/src/ia32/assembler-ia32-inl.h +++ b/deps/v8/src/ia32/assembler-ia32-inl.h @@ -89,8 +89,13 @@ int RelocInfo::target_address_size() { void RelocInfo::set_target_address(Address target) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); Assembler::set_target_address_at(pc_, target); + ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); + if (host() != NULL && IsCodeTarget(rmode_)) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } } @@ -116,6 +121,10 @@ void RelocInfo::set_target_object(Object* target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); Memory::Object_at(pc_) = target; CPU::FlushICache(pc_, sizeof(Address)); + if (host() != NULL && target->IsHeapObject()) { + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), &Memory::Object_at(pc_), HeapObject::cast(target)); + } } @@ -147,6 +156,12 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) { Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Memory::Address_at(pc_) = address; CPU::FlushICache(pc_, sizeof(Address)); + if (host() != NULL) { + // TODO(1550) We are passing NULL as a slot because cell can never be on + // evacuation candidate. + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), NULL, cell); + } } @@ -161,6 +176,11 @@ void RelocInfo::set_call_address(Address target) { ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); Assembler::set_target_address_at(pc_ + 1, target); + if (host() != NULL) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } } @@ -194,7 +214,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() { void RelocInfo::Visit(ObjectVisitor* visitor) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - visitor->VisitPointer(target_object_address()); + visitor->VisitEmbeddedPointer(this); CPU::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); @@ -222,7 +242,7 @@ template<typename StaticVisitor> void RelocInfo::Visit(Heap* heap) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - StaticVisitor::VisitPointer(heap, target_object_address()); + StaticVisitor::VisitEmbeddedPointer(heap, this); CPU::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(heap, this); diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index 999647487..66a98841a 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -55,6 +55,8 @@ uint64_t CpuFeatures::supported_ = 0; uint64_t CpuFeatures::found_by_runtime_probing_ = 0; +// The Probe method needs executable memory, so it uses Heap::CreateCode. +// Allocation failure is silent and leads to safe default. void CpuFeatures::Probe() { ASSERT(!initialized_); ASSERT(supported_ == 0); @@ -86,23 +88,23 @@ void CpuFeatures::Probe() { __ pushfd(); __ push(ecx); __ push(ebx); - __ mov(ebp, Operand(esp)); + __ mov(ebp, esp); // If we can modify bit 21 of the EFLAGS register, then CPUID is supported. __ pushfd(); __ pop(eax); - __ mov(edx, Operand(eax)); + __ mov(edx, eax); __ xor_(eax, 0x200000); // Flip bit 21. __ push(eax); __ popfd(); __ pushfd(); __ pop(eax); - __ xor_(eax, Operand(edx)); // Different if CPUID is supported. + __ xor_(eax, edx); // Different if CPUID is supported. __ j(not_zero, &cpuid); // CPUID not supported. Clear the supported features in edx:eax. - __ xor_(eax, Operand(eax)); - __ xor_(edx, Operand(edx)); + __ xor_(eax, eax); + __ xor_(edx, edx); __ jmp(&done); // Invoke CPUID with 1 in eax to get feature information in @@ -118,13 +120,13 @@ void CpuFeatures::Probe() { // Move the result from ecx:edx to edx:eax and make sure to mark the // CPUID feature as supported. - __ mov(eax, Operand(edx)); + __ mov(eax, edx); __ or_(eax, 1 << CPUID); - __ mov(edx, Operand(ecx)); + __ mov(edx, ecx); // Done. __ bind(&done); - __ mov(esp, Operand(ebp)); + __ mov(esp, ebp); __ pop(ebx); __ pop(ecx); __ popfd(); @@ -286,6 +288,18 @@ bool Operand::is_reg(Register reg) const { && ((buf_[0] & 0x07) == reg.code()); // register codes match. } + +bool Operand::is_reg_only() const { + return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only. +} + + +Register Operand::reg() const { + ASSERT(is_reg_only()); + return Register::from_code(buf_[0] & 0x07); +} + + // ----------------------------------------------------------------------------- // Implementation of Assembler. @@ -701,6 +715,13 @@ void Assembler::add(Register dst, const Operand& src) { } +void Assembler::add(const Operand& dst, Register src) { + EnsureSpace ensure_space(this); + EMIT(0x01); + emit_operand(src, dst); +} + + void Assembler::add(const Operand& dst, const Immediate& x) { ASSERT(reloc_info_writer.last_pc() != NULL); EnsureSpace ensure_space(this); @@ -741,25 +762,29 @@ void Assembler::and_(const Operand& dst, Register src) { void Assembler::cmpb(const Operand& op, int8_t imm8) { EnsureSpace ensure_space(this); - EMIT(0x80); - emit_operand(edi, op); // edi == 7 + if (op.is_reg(eax)) { + EMIT(0x3C); + } else { + EMIT(0x80); + emit_operand(edi, op); // edi == 7 + } EMIT(imm8); } -void Assembler::cmpb(const Operand& dst, Register src) { - ASSERT(src.is_byte_register()); +void Assembler::cmpb(const Operand& op, Register reg) { + ASSERT(reg.is_byte_register()); EnsureSpace ensure_space(this); EMIT(0x38); - emit_operand(src, dst); + emit_operand(reg, op); } -void Assembler::cmpb(Register dst, const Operand& src) { - ASSERT(dst.is_byte_register()); +void Assembler::cmpb(Register reg, const Operand& op) { + ASSERT(reg.is_byte_register()); EnsureSpace ensure_space(this); EMIT(0x3A); - emit_operand(dst, src); + emit_operand(reg, op); } @@ -1069,18 +1094,6 @@ void Assembler::shr_cl(Register dst) { } -void Assembler::subb(const Operand& op, int8_t imm8) { - EnsureSpace ensure_space(this); - if (op.is_reg(eax)) { - EMIT(0x2c); - } else { - EMIT(0x80); - emit_operand(ebp, op); // ebp == 5 - } - EMIT(imm8); -} - - void Assembler::sub(const Operand& dst, const Immediate& x) { EnsureSpace ensure_space(this); emit_arith(5, dst, x); @@ -1094,14 +1107,6 @@ void Assembler::sub(Register dst, const Operand& src) { } -void Assembler::subb(Register dst, const Operand& src) { - ASSERT(dst.code() < 4); - EnsureSpace ensure_space(this); - EMIT(0x2A); - emit_operand(dst, src); -} - - void Assembler::sub(const Operand& dst, Register src) { EnsureSpace ensure_space(this); EMIT(0x29); @@ -1158,6 +1163,10 @@ void Assembler::test(const Operand& op, const Immediate& imm) { void Assembler::test_b(const Operand& op, uint8_t imm8) { + if (op.is_reg_only() && op.reg().code() >= 4) { + test(op, Immediate(imm8)); + return; + } EnsureSpace ensure_space(this); EMIT(0xF6); emit_operand(eax, op); @@ -1178,10 +1187,10 @@ void Assembler::xor_(Register dst, const Operand& src) { } -void Assembler::xor_(const Operand& src, Register dst) { +void Assembler::xor_(const Operand& dst, Register src) { EnsureSpace ensure_space(this); EMIT(0x31); - emit_operand(dst, src); + emit_operand(src, dst); } @@ -2471,7 +2480,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { return; } } - RelocInfo rinfo(pc_, rmode, data); + RelocInfo rinfo(pc_, rmode, data, NULL); reloc_info_writer.Write(&rinfo); } diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h index 4698e3ed1..4dfde5f62 100644 --- a/deps/v8/src/ia32/assembler-ia32.h +++ b/deps/v8/src/ia32/assembler-ia32.h @@ -75,6 +75,8 @@ struct Register { static inline Register FromAllocationIndex(int index); static Register from_code(int code) { + ASSERT(code >= 0); + ASSERT(code < kNumRegisters); Register r = { code }; return r; } @@ -300,9 +302,6 @@ enum ScaleFactor { class Operand BASE_EMBEDDED { public: - // reg - INLINE(explicit Operand(Register reg)); - // XMM reg INLINE(explicit Operand(XMMRegister xmm_reg)); @@ -347,12 +346,16 @@ class Operand BASE_EMBEDDED { // Returns true if this Operand is a wrapper for the specified register. bool is_reg(Register reg) const; + // Returns true if this Operand is a wrapper for one register. + bool is_reg_only() const; + + // Asserts that this Operand is a wrapper for one register and returns the + // register. + Register reg() const; + private: - byte buf_[6]; - // The number of bytes in buf_. - unsigned int len_; - // Only valid if len_ > 4. - RelocInfo::Mode rmode_; + // reg + INLINE(explicit Operand(Register reg)); // Set the ModRM byte without an encoded 'reg' register. The // register is encoded later as part of the emit_operand operation. @@ -362,7 +365,15 @@ class Operand BASE_EMBEDDED { inline void set_disp8(int8_t disp); inline void set_dispr(int32_t disp, RelocInfo::Mode rmode); + byte buf_[6]; + // The number of bytes in buf_. + unsigned int len_; + // Only valid if len_ > 4. + RelocInfo::Mode rmode_; + friend class Assembler; + friend class MacroAssembler; + friend class LCodeGen; }; @@ -671,7 +682,9 @@ class Assembler : public AssemblerBase { void leave(); // Moves + void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); } void mov_b(Register dst, const Operand& src); + void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); } void mov_b(const Operand& dst, int8_t imm8); void mov_b(const Operand& dst, Register src); @@ -687,17 +700,24 @@ class Assembler : public AssemblerBase { void mov(const Operand& dst, Handle<Object> handle); void mov(const Operand& dst, Register src); + void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); } void movsx_b(Register dst, const Operand& src); + void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); } void movsx_w(Register dst, const Operand& src); + void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); } void movzx_b(Register dst, const Operand& src); + void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); } void movzx_w(Register dst, const Operand& src); // Conditional moves void cmov(Condition cc, Register dst, int32_t imm32); void cmov(Condition cc, Register dst, Handle<Object> handle); + void cmov(Condition cc, Register dst, Register src) { + cmov(cc, dst, Operand(src)); + } void cmov(Condition cc, Register dst, const Operand& src); // Flag management. @@ -715,24 +735,31 @@ class Assembler : public AssemblerBase { void adc(Register dst, int32_t imm32); void adc(Register dst, const Operand& src); + void add(Register dst, Register src) { add(dst, Operand(src)); } void add(Register dst, const Operand& src); + void add(const Operand& dst, Register src); + void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); } void add(const Operand& dst, const Immediate& x); void and_(Register dst, int32_t imm32); void and_(Register dst, const Immediate& x); + void and_(Register dst, Register src) { and_(dst, Operand(src)); } void and_(Register dst, const Operand& src); - void and_(const Operand& src, Register dst); + void and_(const Operand& dst, Register src); void and_(const Operand& dst, const Immediate& x); + void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); } void cmpb(const Operand& op, int8_t imm8); - void cmpb(Register src, const Operand& dst); - void cmpb(const Operand& dst, Register src); + void cmpb(Register reg, const Operand& op); + void cmpb(const Operand& op, Register reg); void cmpb_al(const Operand& op); void cmpw_ax(const Operand& op); void cmpw(const Operand& op, Immediate imm16); void cmp(Register reg, int32_t imm32); void cmp(Register reg, Handle<Object> handle); + void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); } void cmp(Register reg, const Operand& op); + void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); } void cmp(const Operand& op, const Immediate& imm); void cmp(const Operand& op, Handle<Object> handle); @@ -748,6 +775,7 @@ class Assembler : public AssemblerBase { // Signed multiply instructions. void imul(Register src); // edx:eax = eax * src. + void imul(Register dst, Register src) { imul(dst, Operand(src)); } void imul(Register dst, const Operand& src); // dst = dst * src. void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32. @@ -764,8 +792,10 @@ class Assembler : public AssemblerBase { void not_(Register dst); void or_(Register dst, int32_t imm32); + void or_(Register dst, Register src) { or_(dst, Operand(src)); } void or_(Register dst, const Operand& src); void or_(const Operand& dst, Register src); + void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); } void or_(const Operand& dst, const Immediate& x); void rcl(Register dst, uint8_t imm8); @@ -776,35 +806,42 @@ class Assembler : public AssemblerBase { void sbb(Register dst, const Operand& src); + void shld(Register dst, Register src) { shld(dst, Operand(src)); } void shld(Register dst, const Operand& src); void shl(Register dst, uint8_t imm8); void shl_cl(Register dst); + void shrd(Register dst, Register src) { shrd(dst, Operand(src)); } void shrd(Register dst, const Operand& src); void shr(Register dst, uint8_t imm8); void shr_cl(Register dst); - void subb(const Operand& dst, int8_t imm8); - void subb(Register dst, const Operand& src); + void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); } void sub(const Operand& dst, const Immediate& x); + void sub(Register dst, Register src) { sub(dst, Operand(src)); } void sub(Register dst, const Operand& src); void sub(const Operand& dst, Register src); void test(Register reg, const Immediate& imm); + void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); } void test(Register reg, const Operand& op); void test_b(Register reg, const Operand& op); void test(const Operand& op, const Immediate& imm); + void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); } void test_b(const Operand& op, uint8_t imm8); void xor_(Register dst, int32_t imm32); + void xor_(Register dst, Register src) { xor_(dst, Operand(src)); } void xor_(Register dst, const Operand& src); - void xor_(const Operand& src, Register dst); + void xor_(const Operand& dst, Register src); + void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); } void xor_(const Operand& dst, const Immediate& x); // Bit operations. void bt(const Operand& dst, Register src); + void bts(Register dst, Register src) { bts(Operand(dst), src); } void bts(const Operand& dst, Register src); // Miscellaneous @@ -835,6 +872,7 @@ class Assembler : public AssemblerBase { void call(Label* L); void call(byte* entry, RelocInfo::Mode rmode); int CallSize(const Operand& adr); + void call(Register reg) { call(Operand(reg)); } void call(const Operand& adr); int CallSize(Handle<Code> code, RelocInfo::Mode mode); void call(Handle<Code> code, @@ -845,6 +883,7 @@ class Assembler : public AssemblerBase { // unconditional jump to L void jmp(Label* L, Label::Distance distance = Label::kFar); void jmp(byte* entry, RelocInfo::Mode rmode); + void jmp(Register reg) { jmp(Operand(reg)); } void jmp(const Operand& adr); void jmp(Handle<Code> code, RelocInfo::Mode rmode); @@ -929,6 +968,7 @@ class Assembler : public AssemblerBase { void cvttss2si(Register dst, const Operand& src); void cvttsd2si(Register dst, const Operand& src); + void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); } void cvtsi2sd(XMMRegister dst, const Operand& src); void cvtss2sd(XMMRegister dst, XMMRegister src); void cvtsd2ss(XMMRegister dst, XMMRegister src); @@ -969,12 +1009,14 @@ class Assembler : public AssemblerBase { void movdbl(XMMRegister dst, const Operand& src); void movdbl(const Operand& dst, XMMRegister src); + void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); } void movd(XMMRegister dst, const Operand& src); - void movd(const Operand& src, XMMRegister dst); + void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); } + void movd(const Operand& dst, XMMRegister src); void movsd(XMMRegister dst, XMMRegister src); void movss(XMMRegister dst, const Operand& src); - void movss(const Operand& src, XMMRegister dst); + void movss(const Operand& dst, XMMRegister src); void movss(XMMRegister dst, XMMRegister src); void pand(XMMRegister dst, XMMRegister src); @@ -987,11 +1029,17 @@ class Assembler : public AssemblerBase { void psrlq(XMMRegister reg, int8_t shift); void psrlq(XMMRegister dst, XMMRegister src); void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle); + void pextrd(Register dst, XMMRegister src, int8_t offset) { + pextrd(Operand(dst), src, offset); + } void pextrd(const Operand& dst, XMMRegister src, int8_t offset); + void pinsrd(XMMRegister dst, Register src, int8_t offset) { + pinsrd(dst, Operand(src), offset); + } void pinsrd(XMMRegister dst, const Operand& src, int8_t offset); // Parallel XMM operations. - void movntdqa(XMMRegister src, const Operand& dst); + void movntdqa(XMMRegister dst, const Operand& src); void movntdq(const Operand& dst, XMMRegister src); // Prefetch src position into cache level. // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a @@ -1045,6 +1093,9 @@ class Assembler : public AssemblerBase { static const int kMaximalBufferSize = 512*MB; static const int kMinimalBufferSize = 4*KB; + byte byte_at(int pos) { return buffer_[pos]; } + void set_byte_at(int pos, byte value) { buffer_[pos] = value; } + protected: bool emit_debug_code() const { return emit_debug_code_; } @@ -1057,9 +1108,8 @@ class Assembler : public AssemblerBase { byte* addr_at(int pos) { return buffer_ + pos; } + private: - byte byte_at(int pos) { return buffer_[pos]; } - void set_byte_at(int pos, byte value) { buffer_[pos] = value; } uint32_t long_at(int pos) { return *reinterpret_cast<uint32_t*>(addr_at(pos)); } diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index 310ea3d12..53ade3a6c 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -69,7 +69,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, // JumpToExternalReference expects eax to contain the number of arguments // including the receiver and the extra arguments. - __ add(Operand(eax), Immediate(num_extra_args + 1)); + __ add(eax, Immediate(num_extra_args + 1)); __ JumpToExternalReference(ExternalReference(id, masm->isolate())); } @@ -80,25 +80,34 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // -- edi: constructor function // ----------------------------------- - Label non_function_call; + Label slow, non_function_call; // Check that function is not a smi. __ JumpIfSmi(edi, &non_function_call); // Check that function is a JSFunction. __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); - __ j(not_equal, &non_function_call); + __ j(not_equal, &slow); // Jump to the function-specific construct stub. __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset)); __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize)); - __ jmp(Operand(ebx)); + __ jmp(ebx); // edi: called object // eax: number of arguments + // ecx: object map + Label do_call; + __ bind(&slow); + __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE); + __ j(not_equal, &non_function_call); + __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); + __ jmp(&do_call); + __ bind(&non_function_call); + __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); + __ bind(&do_call); // Set expected number of arguments to zero (not changing eax). __ Set(ebx, Immediate(0)); - __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); Handle<Code> arguments_adaptor = masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); __ SetCallKind(ecx, CALL_AS_METHOD); @@ -113,264 +122,271 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, ASSERT(!is_api_function || !count_constructions); // Enter a construct frame. - __ EnterConstructFrame(); + { + FrameScope scope(masm, StackFrame::CONSTRUCT); - // Store a smi-tagged arguments count on the stack. - __ SmiTag(eax); - __ push(eax); + // Store a smi-tagged arguments count on the stack. + __ SmiTag(eax); + __ push(eax); - // Push the function to invoke on the stack. - __ push(edi); + // Push the function to invoke on the stack. + __ push(edi); - // Try to allocate the object without transitioning into C code. If any of the - // preconditions is not met, the code bails out to the runtime call. - Label rt_call, allocated; - if (FLAG_inline_new) { - Label undo_allocation; + // Try to allocate the object without transitioning into C code. If any of + // the preconditions is not met, the code bails out to the runtime call. + Label rt_call, allocated; + if (FLAG_inline_new) { + Label undo_allocation; #ifdef ENABLE_DEBUGGER_SUPPORT - ExternalReference debug_step_in_fp = - ExternalReference::debug_step_in_fp_address(masm->isolate()); - __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0)); - __ j(not_equal, &rt_call); + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(masm->isolate()); + __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0)); + __ j(not_equal, &rt_call); #endif - // Verified that the constructor is a JSFunction. - // Load the initial map and verify that it is in fact a map. - // edi: constructor - __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); - // Will both indicate a NULL and a Smi - __ JumpIfSmi(eax, &rt_call); - // edi: constructor - // eax: initial map (if proven valid below) - __ CmpObjectType(eax, MAP_TYPE, ebx); - __ j(not_equal, &rt_call); - - // Check that the constructor is not constructing a JSFunction (see comments - // in Runtime_NewObject in runtime.cc). In which case the initial map's - // instance type would be JS_FUNCTION_TYPE. - // edi: constructor - // eax: initial map - __ CmpInstanceType(eax, JS_FUNCTION_TYPE); - __ j(equal, &rt_call); - - if (count_constructions) { - Label allocate; - // Decrease generous allocation count. - __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); - __ dec_b(FieldOperand(ecx, SharedFunctionInfo::kConstructionCountOffset)); - __ j(not_zero, &allocate); + // Verified that the constructor is a JSFunction. + // Load the initial map and verify that it is in fact a map. + // edi: constructor + __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi + __ JumpIfSmi(eax, &rt_call); + // edi: constructor + // eax: initial map (if proven valid below) + __ CmpObjectType(eax, MAP_TYPE, ebx); + __ j(not_equal, &rt_call); + + // Check that the constructor is not constructing a JSFunction (see + // comments in Runtime_NewObject in runtime.cc). In which case the + // initial map's instance type would be JS_FUNCTION_TYPE. + // edi: constructor + // eax: initial map + __ CmpInstanceType(eax, JS_FUNCTION_TYPE); + __ j(equal, &rt_call); - __ push(eax); - __ push(edi); + if (count_constructions) { + Label allocate; + // Decrease generous allocation count. + __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ dec_b(FieldOperand(ecx, + SharedFunctionInfo::kConstructionCountOffset)); + __ j(not_zero, &allocate); - __ push(edi); // constructor - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); + __ push(eax); + __ push(edi); - __ pop(edi); - __ pop(eax); + __ push(edi); // constructor + // The call will replace the stub, so the countdown is only done once. + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); - __ bind(&allocate); - } + __ pop(edi); + __ pop(eax); - // Now allocate the JSObject on the heap. - // edi: constructor - // eax: initial map - __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset)); - __ shl(edi, kPointerSizeLog2); - __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS); - // Allocated the JSObject, now initialize the fields. - // eax: initial map - // ebx: JSObject - // edi: start of next object - __ mov(Operand(ebx, JSObject::kMapOffset), eax); - Factory* factory = masm->isolate()->factory(); - __ mov(ecx, factory->empty_fixed_array()); - __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx); - __ mov(Operand(ebx, JSObject::kElementsOffset), ecx); - // Set extra fields in the newly allocated object. - // eax: initial map - // ebx: JSObject - // edi: start of next object - { Label loop, entry; - // To allow for truncation. + __ bind(&allocate); + } + + // Now allocate the JSObject on the heap. + // edi: constructor + // eax: initial map + __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset)); + __ shl(edi, kPointerSizeLog2); + __ AllocateInNewSpace( + edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS); + // Allocated the JSObject, now initialize the fields. + // eax: initial map + // ebx: JSObject + // edi: start of next object + __ mov(Operand(ebx, JSObject::kMapOffset), eax); + Factory* factory = masm->isolate()->factory(); + __ mov(ecx, factory->empty_fixed_array()); + __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx); + __ mov(Operand(ebx, JSObject::kElementsOffset), ecx); + // Set extra fields in the newly allocated object. + // eax: initial map + // ebx: JSObject + // edi: start of next object + __ lea(ecx, Operand(ebx, JSObject::kHeaderSize)); + __ mov(edx, factory->undefined_value()); if (count_constructions) { + __ movzx_b(esi, + FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset)); + __ lea(esi, + Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize)); + // esi: offset of first field after pre-allocated fields + if (FLAG_debug_code) { + __ cmp(esi, edi); + __ Assert(less_equal, + "Unexpected number of pre-allocated property fields."); + } + __ InitializeFieldsWithFiller(ecx, esi, edx); __ mov(edx, factory->one_pointer_filler_map()); - } else { + } + __ InitializeFieldsWithFiller(ecx, edi, edx); + + // Add the object tag to make the JSObject real, so that we can continue + // and jump into the continuation code at any time from now on. Any + // failures need to undo the allocation, so that the heap is in a + // consistent state and verifiable. + // eax: initial map + // ebx: JSObject + // edi: start of next object + __ or_(ebx, Immediate(kHeapObjectTag)); + + // Check if a non-empty properties array is needed. + // Allocate and initialize a FixedArray if it is. + // eax: initial map + // ebx: JSObject + // edi: start of next object + // Calculate the total number of properties described by the map. + __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset)); + __ movzx_b(ecx, + FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset)); + __ add(edx, ecx); + // Calculate unused properties past the end of the in-object properties. + __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset)); + __ sub(edx, ecx); + // Done if no extra properties are to be allocated. + __ j(zero, &allocated); + __ Assert(positive, "Property allocation count failed."); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // ebx: JSObject + // edi: start of next object (will be start of FixedArray) + // edx: number of elements in properties array + __ AllocateInNewSpace(FixedArray::kHeaderSize, + times_pointer_size, + edx, + edi, + ecx, + no_reg, + &undo_allocation, + RESULT_CONTAINS_TOP); + + // Initialize the FixedArray. + // ebx: JSObject + // edi: FixedArray + // edx: number of elements + // ecx: start of next object + __ mov(eax, factory->fixed_array_map()); + __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map + __ SmiTag(edx); + __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length + + // Initialize the fields to undefined. + // ebx: JSObject + // edi: FixedArray + // ecx: start of next object + { Label loop, entry; __ mov(edx, factory->undefined_value()); + __ lea(eax, Operand(edi, FixedArray::kHeaderSize)); + __ jmp(&entry); + __ bind(&loop); + __ mov(Operand(eax, 0), edx); + __ add(eax, Immediate(kPointerSize)); + __ bind(&entry); + __ cmp(eax, ecx); + __ j(below, &loop); } - __ lea(ecx, Operand(ebx, JSObject::kHeaderSize)); - __ jmp(&entry); - __ bind(&loop); - __ mov(Operand(ecx, 0), edx); - __ add(Operand(ecx), Immediate(kPointerSize)); - __ bind(&entry); - __ cmp(ecx, Operand(edi)); - __ j(less, &loop); - } - - // Add the object tag to make the JSObject real, so that we can continue and - // jump into the continuation code at any time from now on. Any failures - // need to undo the allocation, so that the heap is in a consistent state - // and verifiable. - // eax: initial map - // ebx: JSObject - // edi: start of next object - __ or_(Operand(ebx), Immediate(kHeapObjectTag)); - - // Check if a non-empty properties array is needed. - // Allocate and initialize a FixedArray if it is. - // eax: initial map - // ebx: JSObject - // edi: start of next object - // Calculate the total number of properties described by the map. - __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset)); - __ movzx_b(ecx, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset)); - __ add(edx, Operand(ecx)); - // Calculate unused properties past the end of the in-object properties. - __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset)); - __ sub(edx, Operand(ecx)); - // Done if no extra properties are to be allocated. - __ j(zero, &allocated); - __ Assert(positive, "Property allocation count failed."); - - // Scale the number of elements by pointer size and add the header for - // FixedArrays to the start of the next object calculation from above. - // ebx: JSObject - // edi: start of next object (will be start of FixedArray) - // edx: number of elements in properties array - __ AllocateInNewSpace(FixedArray::kHeaderSize, - times_pointer_size, - edx, - edi, - ecx, - no_reg, - &undo_allocation, - RESULT_CONTAINS_TOP); - - // Initialize the FixedArray. - // ebx: JSObject - // edi: FixedArray - // edx: number of elements - // ecx: start of next object - __ mov(eax, factory->fixed_array_map()); - __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map - __ SmiTag(edx); - __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length - - // Initialize the fields to undefined. - // ebx: JSObject - // edi: FixedArray - // ecx: start of next object - { Label loop, entry; - __ mov(edx, factory->undefined_value()); - __ lea(eax, Operand(edi, FixedArray::kHeaderSize)); - __ jmp(&entry); - __ bind(&loop); - __ mov(Operand(eax, 0), edx); - __ add(Operand(eax), Immediate(kPointerSize)); - __ bind(&entry); - __ cmp(eax, Operand(ecx)); - __ j(below, &loop); - } - // Store the initialized FixedArray into the properties field of - // the JSObject - // ebx: JSObject - // edi: FixedArray - __ or_(Operand(edi), Immediate(kHeapObjectTag)); // add the heap tag - __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi); + // Store the initialized FixedArray into the properties field of + // the JSObject + // ebx: JSObject + // edi: FixedArray + __ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag + __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi); - // Continue with JSObject being successfully allocated - // ebx: JSObject - __ jmp(&allocated); + // Continue with JSObject being successfully allocated + // ebx: JSObject + __ jmp(&allocated); - // Undo the setting of the new top so that the heap is verifiable. For - // example, the map's unused properties potentially do not match the - // allocated objects unused properties. - // ebx: JSObject (previous new top) - __ bind(&undo_allocation); - __ UndoAllocationInNewSpace(ebx); - } + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // ebx: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(ebx); + } - // Allocate the new receiver object using the runtime call. - __ bind(&rt_call); - // Must restore edi (constructor) before calling runtime. - __ mov(edi, Operand(esp, 0)); - // edi: function (constructor) - __ push(edi); - __ CallRuntime(Runtime::kNewObject, 1); - __ mov(ebx, Operand(eax)); // store result in ebx + // Allocate the new receiver object using the runtime call. + __ bind(&rt_call); + // Must restore edi (constructor) before calling runtime. + __ mov(edi, Operand(esp, 0)); + // edi: function (constructor) + __ push(edi); + __ CallRuntime(Runtime::kNewObject, 1); + __ mov(ebx, eax); // store result in ebx - // New object allocated. - // ebx: newly allocated object - __ bind(&allocated); - // Retrieve the function from the stack. - __ pop(edi); + // New object allocated. + // ebx: newly allocated object + __ bind(&allocated); + // Retrieve the function from the stack. + __ pop(edi); - // Retrieve smi-tagged arguments count from the stack. - __ mov(eax, Operand(esp, 0)); - __ SmiUntag(eax); + // Retrieve smi-tagged arguments count from the stack. + __ mov(eax, Operand(esp, 0)); + __ SmiUntag(eax); - // Push the allocated receiver to the stack. We need two copies - // because we may have to return the original one and the calling - // conventions dictate that the called function pops the receiver. - __ push(ebx); - __ push(ebx); + // Push the allocated receiver to the stack. We need two copies + // because we may have to return the original one and the calling + // conventions dictate that the called function pops the receiver. + __ push(ebx); + __ push(ebx); - // Setup pointer to last argument. - __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset)); + // Setup pointer to last argument. + __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset)); - // Copy arguments and receiver to the expression stack. - Label loop, entry; - __ mov(ecx, Operand(eax)); - __ jmp(&entry); - __ bind(&loop); - __ push(Operand(ebx, ecx, times_4, 0)); - __ bind(&entry); - __ dec(ecx); - __ j(greater_equal, &loop); + // Copy arguments and receiver to the expression stack. + Label loop, entry; + __ mov(ecx, eax); + __ jmp(&entry); + __ bind(&loop); + __ push(Operand(ebx, ecx, times_4, 0)); + __ bind(&entry); + __ dec(ecx); + __ j(greater_equal, &loop); + + // Call the function. + if (is_api_function) { + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + Handle<Code> code = + masm->isolate()->builtins()->HandleApiCallConstruct(); + ParameterCount expected(0); + __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); + } else { + ParameterCount actual(eax); + __ InvokeFunction(edi, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } - // Call the function. - if (is_api_function) { - __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); - Handle<Code> code = - masm->isolate()->builtins()->HandleApiCallConstruct(); - ParameterCount expected(0); - __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET, - CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); - } else { - ParameterCount actual(eax); - __ InvokeFunction(edi, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); - } + // Restore context from the frame. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - // Restore context from the frame. - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, exit; - // If the result is an object (in the ECMA sense), we should get rid - // of the receiver and use the result; see ECMA-262 section 13.2.2-7 - // on page 74. - Label use_receiver, exit; + // If the result is a smi, it is *not* an object in the ECMA sense. + __ JumpIfSmi(eax, &use_receiver); - // If the result is a smi, it is *not* an object in the ECMA sense. - __ JumpIfSmi(eax, &use_receiver); + // If the type of the result (stored in its map) is less than + // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. + __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(above_equal, &exit); - // If the type of the result (stored in its map) is less than - // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. - __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); - __ j(above_equal, &exit); + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ mov(eax, Operand(esp, 0)); - // Throw away the result of the constructor invocation and use the - // on-stack receiver as the result. - __ bind(&use_receiver); - __ mov(eax, Operand(esp, 0)); + // Restore the arguments count and leave the construct frame. + __ bind(&exit); + __ mov(ebx, Operand(esp, kPointerSize)); // Get arguments count. - // Restore the arguments count and leave the construct frame. - __ bind(&exit); - __ mov(ebx, Operand(esp, kPointerSize)); // get arguments count - __ LeaveConstructFrame(); + // Leave construct frame. + } // Remove caller arguments from the stack and return. STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); @@ -399,57 +415,58 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, bool is_construct) { - // Clear the context before we push it when entering the JS frame. + // Clear the context before we push it when entering the internal frame. __ Set(esi, Immediate(0)); - // Enter an internal frame. - __ EnterInternalFrame(); - - // Load the previous frame pointer (ebx) to access C arguments - __ mov(ebx, Operand(ebp, 0)); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Get the function from the frame and setup the context. - __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset)); - __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset)); + // Load the previous frame pointer (ebx) to access C arguments + __ mov(ebx, Operand(ebp, 0)); - // Push the function and the receiver onto the stack. - __ push(ecx); - __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset)); + // Get the function from the frame and setup the context. + __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset)); + __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset)); - // Load the number of arguments and setup pointer to the arguments. - __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset)); - __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset)); + // Push the function and the receiver onto the stack. + __ push(ecx); + __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset)); - // Copy arguments to the stack in a loop. - Label loop, entry; - __ Set(ecx, Immediate(0)); - __ jmp(&entry); - __ bind(&loop); - __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv - __ push(Operand(edx, 0)); // dereference handle - __ inc(Operand(ecx)); - __ bind(&entry); - __ cmp(ecx, Operand(eax)); - __ j(not_equal, &loop); + // Load the number of arguments and setup pointer to the arguments. + __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset)); + __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset)); - // Get the function from the stack and call it. - __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize)); // +1 ~ receiver + // Copy arguments to the stack in a loop. + Label loop, entry; + __ Set(ecx, Immediate(0)); + __ jmp(&entry); + __ bind(&loop); + __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv + __ push(Operand(edx, 0)); // dereference handle + __ inc(ecx); + __ bind(&entry); + __ cmp(ecx, eax); + __ j(not_equal, &loop); + + // Get the function from the stack and call it. + // kPointerSize for the receiver. + __ mov(edi, Operand(esp, eax, times_4, kPointerSize)); + + // Invoke the code. + if (is_construct) { + __ call(masm->isolate()->builtins()->JSConstructCall(), + RelocInfo::CODE_TARGET); + } else { + ParameterCount actual(eax); + __ InvokeFunction(edi, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } - // Invoke the code. - if (is_construct) { - __ call(masm->isolate()->builtins()->JSConstructCall(), - RelocInfo::CODE_TARGET); - } else { - ParameterCount actual(eax); - __ InvokeFunction(edi, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Exit the internal frame. Notice that this also removes the empty. + // context and the function left on the stack by the code + // invocation. } - - // Exit the JS frame. Notice that this also removes the empty - // context and the function left on the stack by the code - // invocation. - __ LeaveInternalFrame(); - __ ret(1 * kPointerSize); // remove receiver + __ ret(kPointerSize); // Remove receiver. } @@ -464,68 +481,68 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { void Builtins::Generate_LazyCompile(MacroAssembler* masm) { - // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Push a copy of the function. - __ push(edi); - // Push call kind information. - __ push(ecx); + // Push a copy of the function. + __ push(edi); + // Push call kind information. + __ push(ecx); - __ push(edi); // Function is also the parameter to the runtime call. - __ CallRuntime(Runtime::kLazyCompile, 1); + __ push(edi); // Function is also the parameter to the runtime call. + __ CallRuntime(Runtime::kLazyCompile, 1); - // Restore call kind information. - __ pop(ecx); - // Restore receiver. - __ pop(edi); + // Restore call kind information. + __ pop(ecx); + // Restore receiver. + __ pop(edi); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down internal frame. + } // Do a tail-call of the compiled function. __ lea(eax, FieldOperand(eax, Code::kHeaderSize)); - __ jmp(Operand(eax)); + __ jmp(eax); } void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { - // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Push a copy of the function onto the stack. - __ push(edi); - // Push call kind information. - __ push(ecx); + // Push a copy of the function onto the stack. + __ push(edi); + // Push call kind information. + __ push(ecx); - __ push(edi); // Function is also the parameter to the runtime call. - __ CallRuntime(Runtime::kLazyRecompile, 1); + __ push(edi); // Function is also the parameter to the runtime call. + __ CallRuntime(Runtime::kLazyRecompile, 1); - // Restore call kind information. - __ pop(ecx); - // Restore receiver. - __ pop(edi); + // Restore call kind information. + __ pop(ecx); + // Restore receiver. + __ pop(edi); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down internal frame. + } // Do a tail-call of the compiled function. __ lea(eax, FieldOperand(eax, Code::kHeaderSize)); - __ jmp(Operand(eax)); + __ jmp(eax); } static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { - // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Pass the function and deoptimization type to the runtime system. - __ push(Immediate(Smi::FromInt(static_cast<int>(type)))); - __ CallRuntime(Runtime::kNotifyDeoptimized, 1); + // Pass the function and deoptimization type to the runtime system. + __ push(Immediate(Smi::FromInt(static_cast<int>(type)))); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down internal frame. + } // Get the full codegen state from the stack and untag it. __ mov(ecx, Operand(esp, 1 * kPointerSize)); @@ -566,9 +583,10 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { // the registers without worrying about which of them contain // pointers. This seems a bit fragile. __ pushad(); - __ EnterInternalFrame(); - __ CallRuntime(Runtime::kNotifyOSR, 0); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kNotifyOSR, 0); + } __ popad(); __ ret(0); } @@ -579,7 +597,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 1. Make sure we have at least one argument. { Label done; - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_zero, &done); __ pop(ebx); __ push(Immediate(factory->undefined_value())); @@ -631,18 +649,21 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ j(above_equal, &shift_arguments); __ bind(&convert_to_object); - __ EnterInternalFrame(); // In order to preserve argument count. - __ SmiTag(eax); - __ push(eax); - __ push(ebx); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ mov(ebx, eax); - __ Set(edx, Immediate(0)); // restore + { // In order to preserve argument count. + FrameScope scope(masm, StackFrame::INTERNAL); + __ SmiTag(eax); + __ push(eax); + + __ push(ebx); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(ebx, eax); + __ Set(edx, Immediate(0)); // restore + + __ pop(eax); + __ SmiUntag(eax); + } - __ pop(eax); - __ SmiUntag(eax); - __ LeaveInternalFrame(); // Restore the function to edi. __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize)); __ jmp(&patch_receiver); @@ -695,11 +716,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin, // or a function proxy via CALL_FUNCTION_PROXY. { Label function, non_proxy; - __ test(edx, Operand(edx)); + __ test(edx, edx); __ j(zero, &function); __ Set(ebx, Immediate(0)); __ SetCallKind(ecx, CALL_AS_METHOD); - __ cmp(Operand(edx), Immediate(1)); + __ cmp(edx, Immediate(1)); __ j(not_equal, &non_proxy); __ pop(edx); // return address @@ -726,13 +747,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset)); __ SmiUntag(ebx); __ SetCallKind(ecx, CALL_AS_METHOD); - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); __ j(not_equal, masm->isolate()->builtins()->ArgumentsAdaptorTrampoline()); ParameterCount expected(0); - __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(), + CALL_AS_METHOD); } @@ -740,155 +761,156 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { static const int kArgumentsOffset = 2 * kPointerSize; static const int kReceiverOffset = 3 * kPointerSize; static const int kFunctionOffset = 4 * kPointerSize; + { + FrameScope frame_scope(masm, StackFrame::INTERNAL); + + __ push(Operand(ebp, kFunctionOffset)); // push this + __ push(Operand(ebp, kArgumentsOffset)); // push arguments + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + ExternalReference real_stack_limit = + ExternalReference::address_of_real_stack_limit(masm->isolate()); + __ mov(edi, Operand::StaticVariable(real_stack_limit)); + // Make ecx the space we have left. The stack might already be overflowed + // here which will cause ecx to become negative. + __ mov(ecx, esp); + __ sub(ecx, edi); + // Make edx the space we need for the array when it is unrolled onto the + // stack. + __ mov(edx, eax); + __ shl(edx, kPointerSizeLog2 - kSmiTagSize); + // Check if the arguments will overflow the stack. + __ cmp(ecx, edx); + __ j(greater, &okay); // Signed comparison. + + // Out of stack space. + __ push(Operand(ebp, 4 * kPointerSize)); // push this + __ push(eax); + __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + __ bind(&okay); + // End of stack check. + + // Push current index and limit. + const int kLimitOffset = + StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize; + const int kIndexOffset = kLimitOffset - 1 * kPointerSize; + __ push(eax); // limit + __ push(Immediate(0)); // index + + // Get the receiver. + __ mov(ebx, Operand(ebp, kReceiverOffset)); + + // Check that the function is a JS function (otherwise it must be a proxy). + Label push_receiver; + __ mov(edi, Operand(ebp, kFunctionOffset)); + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &push_receiver); + + // Change context eagerly to get the right global object if necessary. + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); - __ EnterInternalFrame(); - - __ push(Operand(ebp, kFunctionOffset)); // push this - __ push(Operand(ebp, kArgumentsOffset)); // push arguments - __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); - - // Check the stack for overflow. We are not trying to catch - // interruptions (e.g. debug break and preemption) here, so the "real stack - // limit" is checked. - Label okay; - ExternalReference real_stack_limit = - ExternalReference::address_of_real_stack_limit(masm->isolate()); - __ mov(edi, Operand::StaticVariable(real_stack_limit)); - // Make ecx the space we have left. The stack might already be overflowed - // here which will cause ecx to become negative. - __ mov(ecx, Operand(esp)); - __ sub(ecx, Operand(edi)); - // Make edx the space we need for the array when it is unrolled onto the - // stack. - __ mov(edx, Operand(eax)); - __ shl(edx, kPointerSizeLog2 - kSmiTagSize); - // Check if the arguments will overflow the stack. - __ cmp(ecx, Operand(edx)); - __ j(greater, &okay); // Signed comparison. - - // Out of stack space. - __ push(Operand(ebp, 4 * kPointerSize)); // push this - __ push(eax); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); - __ bind(&okay); - // End of stack check. - - // Push current index and limit. - const int kLimitOffset = - StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize; - const int kIndexOffset = kLimitOffset - 1 * kPointerSize; - __ push(eax); // limit - __ push(Immediate(0)); // index - - // Get the receiver. - __ mov(ebx, Operand(ebp, kReceiverOffset)); - - // Check that the function is a JS function (otherwise it must be a proxy). - Label push_receiver; - __ mov(edi, Operand(ebp, kFunctionOffset)); - __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); - __ j(not_equal, &push_receiver); + // Compute the receiver. + // Do not transform the receiver for strict mode functions. + Label call_to_object, use_global_receiver; + __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset), + 1 << SharedFunctionInfo::kStrictModeBitWithinByte); + __ j(not_equal, &push_receiver); - // Change context eagerly to get the right global object if necessary. - __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + Factory* factory = masm->isolate()->factory(); - // Compute the receiver. - // Do not transform the receiver for strict mode functions. - Label call_to_object, use_global_receiver; - __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); - __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset), - 1 << SharedFunctionInfo::kStrictModeBitWithinByte); - __ j(not_equal, &push_receiver); + // Do not transform the receiver for natives (shared already in ecx). + __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset), + 1 << SharedFunctionInfo::kNativeBitWithinByte); + __ j(not_equal, &push_receiver); - Factory* factory = masm->isolate()->factory(); + // Compute the receiver in non-strict mode. + // Call ToObject on the receiver if it is not an object, or use the + // global object if it is null or undefined. + __ JumpIfSmi(ebx, &call_to_object); + __ cmp(ebx, factory->null_value()); + __ j(equal, &use_global_receiver); + __ cmp(ebx, factory->undefined_value()); + __ j(equal, &use_global_receiver); + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(above_equal, &push_receiver); - // Do not transform the receiver for natives (shared already in ecx). - __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset), - 1 << SharedFunctionInfo::kNativeBitWithinByte); - __ j(not_equal, &push_receiver); - - // Compute the receiver in non-strict mode. - // Call ToObject on the receiver if it is not an object, or use the - // global object if it is null or undefined. - __ JumpIfSmi(ebx, &call_to_object); - __ cmp(ebx, factory->null_value()); - __ j(equal, &use_global_receiver); - __ cmp(ebx, factory->undefined_value()); - __ j(equal, &use_global_receiver); - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx); - __ j(above_equal, &push_receiver); - - __ bind(&call_to_object); - __ push(ebx); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ mov(ebx, Operand(eax)); - __ jmp(&push_receiver); - - // Use the current global receiver object as the receiver. - __ bind(&use_global_receiver); - const int kGlobalOffset = - Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; - __ mov(ebx, FieldOperand(esi, kGlobalOffset)); - __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset)); - __ mov(ebx, FieldOperand(ebx, kGlobalOffset)); - __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset)); - - // Push the receiver. - __ bind(&push_receiver); - __ push(ebx); - - // Copy all arguments from the array to the stack. - Label entry, loop; - __ mov(eax, Operand(ebp, kIndexOffset)); - __ jmp(&entry); - __ bind(&loop); - __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments + __ bind(&call_to_object); + __ push(ebx); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(ebx, eax); + __ jmp(&push_receiver); - // Use inline caching to speed up access to arguments. - Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize(); - __ call(ic, RelocInfo::CODE_TARGET); - // It is important that we do not have a test instruction after the - // call. A test instruction after the call is used to indicate that - // we have generated an inline version of the keyed load. In this - // case, we know that we are not generating a test instruction next. + // Use the current global receiver object as the receiver. + __ bind(&use_global_receiver); + const int kGlobalOffset = + Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; + __ mov(ebx, FieldOperand(esi, kGlobalOffset)); + __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset)); + __ mov(ebx, FieldOperand(ebx, kGlobalOffset)); + __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset)); - // Push the nth argument. - __ push(eax); + // Push the receiver. + __ bind(&push_receiver); + __ push(ebx); - // Update the index on the stack and in register eax. - __ mov(eax, Operand(ebp, kIndexOffset)); - __ add(Operand(eax), Immediate(1 << kSmiTagSize)); - __ mov(Operand(ebp, kIndexOffset), eax); + // Copy all arguments from the array to the stack. + Label entry, loop; + __ mov(eax, Operand(ebp, kIndexOffset)); + __ jmp(&entry); + __ bind(&loop); + __ mov(edx, Operand(ebp, kArgumentsOffset)); // load arguments - __ bind(&entry); - __ cmp(eax, Operand(ebp, kLimitOffset)); - __ j(not_equal, &loop); + // Use inline caching to speed up access to arguments. + Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize(); + __ call(ic, RelocInfo::CODE_TARGET); + // It is important that we do not have a test instruction after the + // call. A test instruction after the call is used to indicate that + // we have generated an inline version of the keyed load. In this + // case, we know that we are not generating a test instruction next. - // Invoke the function. - Label call_proxy; - ParameterCount actual(eax); - __ SmiUntag(eax); - __ mov(edi, Operand(ebp, kFunctionOffset)); - __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); - __ j(not_equal, &call_proxy); - __ InvokeFunction(edi, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Push the nth argument. + __ push(eax); - __ LeaveInternalFrame(); - __ ret(3 * kPointerSize); // remove this, receiver, and arguments + // Update the index on the stack and in register eax. + __ mov(eax, Operand(ebp, kIndexOffset)); + __ add(eax, Immediate(1 << kSmiTagSize)); + __ mov(Operand(ebp, kIndexOffset), eax); - // Invoke the function proxy. - __ bind(&call_proxy); - __ push(edi); // add function proxy as last argument - __ inc(eax); - __ Set(ebx, Immediate(0)); - __ SetCallKind(ecx, CALL_AS_METHOD); - __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY); - __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + __ bind(&entry); + __ cmp(eax, Operand(ebp, kLimitOffset)); + __ j(not_equal, &loop); + + // Invoke the function. + Label call_proxy; + ParameterCount actual(eax); + __ SmiUntag(eax); + __ mov(edi, Operand(ebp, kFunctionOffset)); + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &call_proxy); + __ InvokeFunction(edi, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + + frame_scope.GenerateLeaveFrame(); + __ ret(3 * kPointerSize); // remove this, receiver, and arguments + + // Invoke the function proxy. + __ bind(&call_proxy); + __ push(edi); // add function proxy as last argument + __ inc(eax); + __ Set(ebx, Immediate(0)); + __ SetCallKind(ecx, CALL_AS_METHOD); + __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY); + __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); - __ LeaveInternalFrame(); + // Leave internal frame. + } __ ret(3 * kPointerSize); // remove this, receiver, and arguments } @@ -983,9 +1005,9 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, __ jmp(&entry); __ bind(&loop); __ mov(Operand(scratch1, 0), factory->the_hole_value()); - __ add(Operand(scratch1), Immediate(kPointerSize)); + __ add(scratch1, Immediate(kPointerSize)); __ bind(&entry); - __ cmp(scratch1, Operand(scratch2)); + __ cmp(scratch1, scratch2); __ j(below, &loop); } } @@ -1082,7 +1104,7 @@ static void AllocateJSArray(MacroAssembler* masm, __ bind(&loop); __ stos(); __ bind(&entry); - __ cmp(edi, Operand(elements_array_end)); + __ cmp(edi, elements_array_end); __ j(below, &loop); __ bind(&done); } @@ -1120,7 +1142,7 @@ static void ArrayNativeCode(MacroAssembler* masm, __ push(eax); // Check for array construction with zero arguments. - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_zero, &argc_one_or_more); __ bind(&empty_array); @@ -1147,7 +1169,7 @@ static void ArrayNativeCode(MacroAssembler* masm, __ j(not_equal, &argc_two_or_more); STATIC_ASSERT(kSmiTag == 0); __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize)); - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(not_zero, ¬_empty_array); // The single argument passed is zero, so we jump to the code above used to @@ -1160,7 +1182,7 @@ static void ArrayNativeCode(MacroAssembler* masm, __ mov(eax, Operand(esp, i * kPointerSize)); __ mov(Operand(esp, (i + 1) * kPointerSize), eax); } - __ add(Operand(esp), Immediate(2 * kPointerSize)); // Drop two stack slots. + __ add(esp, Immediate(2 * kPointerSize)); // Drop two stack slots. __ push(Immediate(0)); // Treat this as a call with argc of zero. __ jmp(&empty_array); @@ -1250,7 +1272,7 @@ static void ArrayNativeCode(MacroAssembler* masm, __ bind(&loop); __ mov(eax, Operand(edi, ecx, times_pointer_size, 0)); __ mov(Operand(edx, 0), eax); - __ add(Operand(edx), Immediate(kPointerSize)); + __ add(edx, Immediate(kPointerSize)); __ bind(&entry); __ dec(ecx); __ j(greater_equal, &loop); @@ -1356,14 +1378,14 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { if (FLAG_debug_code) { __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx); - __ cmp(edi, Operand(ecx)); + __ cmp(edi, ecx); __ Assert(equal, "Unexpected String function"); } // Load the first argument into eax and get rid of the rest // (including the receiver). Label no_arguments; - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(zero, &no_arguments); __ mov(ebx, Operand(esp, eax, times_pointer_size, 0)); __ pop(ecx); @@ -1439,12 +1461,13 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // Invoke the conversion builtin and put the result into ebx. __ bind(&convert_argument); __ IncrementCounter(counters->string_ctor_conversions(), 1); - __ EnterInternalFrame(); - __ push(edi); // Preserve the function. - __ push(eax); - __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); - __ pop(edi); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(edi); // Preserve the function. + __ push(eax); + __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); + __ pop(edi); + } __ mov(ebx, eax); __ jmp(&argument_is_string); @@ -1461,17 +1484,18 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // create a string wrapper. __ bind(&gc_required); __ IncrementCounter(counters->string_ctor_gc_required(), 1); - __ EnterInternalFrame(); - __ push(ebx); - __ CallRuntime(Runtime::kNewStringWrapper, 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(ebx); + __ CallRuntime(Runtime::kNewStringWrapper, 1); + } __ ret(0); } static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ push(ebp); - __ mov(ebp, Operand(esp)); + __ mov(ebp, esp); // Store the arguments adaptor context sentinel. __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); @@ -1515,7 +1539,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1); Label enough, too_few; - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); __ j(less, &too_few); __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel); __ j(equal, &dont_adapt_arguments); @@ -1533,8 +1557,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(©); __ inc(edi); __ push(Operand(eax, 0)); - __ sub(Operand(eax), Immediate(kPointerSize)); - __ cmp(edi, Operand(ebx)); + __ sub(eax, Immediate(kPointerSize)); + __ cmp(edi, ebx); __ j(less, ©); __ jmp(&invoke); } @@ -1547,17 +1571,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { const int offset = StandardFrameConstants::kCallerSPOffset; __ lea(edi, Operand(ebp, eax, times_4, offset)); // ebx = expected - actual. - __ sub(ebx, Operand(eax)); + __ sub(ebx, eax); // eax = -actual - 1 __ neg(eax); - __ sub(Operand(eax), Immediate(1)); + __ sub(eax, Immediate(1)); Label copy; __ bind(©); __ inc(eax); __ push(Operand(edi, 0)); - __ sub(Operand(edi), Immediate(kPointerSize)); - __ test(eax, Operand(eax)); + __ sub(edi, Immediate(kPointerSize)); + __ test(eax, eax); __ j(not_zero, ©); // Fill remaining expected arguments with undefined values. @@ -1565,7 +1589,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(&fill); __ inc(eax); __ push(Immediate(masm->isolate()->factory()->undefined_value())); - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); __ j(less, &fill); } @@ -1573,7 +1597,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(&invoke); // Restore function pointer. __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); - __ call(Operand(edx)); + __ call(edx); // Leave frame and return. LeaveArgumentsAdaptorFrame(masm); @@ -1583,13 +1607,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // Dont adapt arguments. // ------------------------------------------- __ bind(&dont_adapt_arguments); - __ jmp(Operand(edx)); + __ jmp(edx); } void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { CpuFeatures::TryForceFeatureScope scope(SSE2); - if (!CpuFeatures::IsSupported(SSE2)) { + if (!CpuFeatures::IsSupported(SSE2) && FLAG_debug_code) { __ Abort("Unreachable code: Cannot optimize without SSE2 support."); return; } @@ -1616,15 +1640,16 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // Pass the function to optimize as the argument to the on-stack // replacement runtime function. - __ EnterInternalFrame(); - __ push(eax); - __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(eax); + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + } // If the result was -1 it means that we couldn't optimize the // function. Just return and continue in the unoptimized version. Label skip; - __ cmp(Operand(eax), Immediate(Smi::FromInt(-1))); + __ cmp(eax, Immediate(Smi::FromInt(-1))); __ j(not_equal, &skip, Label::kNear); __ ret(0); @@ -1638,7 +1663,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { __ j(above_equal, &ok, Label::kNear); StackCheckStub stub; __ TailCallStub(&stub); - __ Abort("Unreachable code: returned from tail call."); + if (FLAG_debug_code) { + __ Abort("Unreachable code: returned from tail call."); + } __ bind(&ok); __ ret(0); diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index 1009aaf57..1e886e202 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -49,7 +49,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) { __ bind(&check_heap_number); __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); Factory* factory = masm->isolate()->factory(); - __ cmp(Operand(ebx), Immediate(factory->heap_number_map())); + __ cmp(ebx, Immediate(factory->heap_number_map())); __ j(not_equal, &call_builtin, Label::kNear); __ ret(0); @@ -150,7 +150,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { } // Return and remove the on-stack parameter. - __ mov(esi, Operand(eax)); + __ mov(esi, eax); __ ret(1 * kPointerSize); // Need to collect. Call into runtime system. @@ -159,6 +159,77 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { } +void FastNewBlockContextStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [esp + (1 * kPointerSize)]: function + // [esp + (2 * kPointerSize)]: serialized scope info + + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + __ AllocateInNewSpace(FixedArray::SizeFor(length), + eax, ebx, ecx, &gc, TAG_OBJECT); + + // Get the function or sentinel from the stack. + __ mov(ecx, Operand(esp, 1 * kPointerSize)); + + // Get the serialized scope info from the stack. + __ mov(ebx, Operand(esp, 2 * kPointerSize)); + + // Setup the object header. + Factory* factory = masm->isolate()->factory(); + __ mov(FieldOperand(eax, HeapObject::kMapOffset), + factory->block_context_map()); + __ mov(FieldOperand(eax, Context::kLengthOffset), + Immediate(Smi::FromInt(length))); + + // If this block context is nested in the global context we get a smi + // sentinel instead of a function. The block context should get the + // canonical empty function of the global context as its closure which + // we still have to look up. + Label after_sentinel; + __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear); + if (FLAG_debug_code) { + const char* message = "Expected 0 as a Smi sentinel"; + __ cmp(ecx, 0); + __ Assert(equal, message); + } + __ mov(ecx, GlobalObjectOperand()); + __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset)); + __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX)); + __ bind(&after_sentinel); + + // Setup the fixed slots. + __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx); + __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi); + __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx); + + // Copy the global object from the previous context. + __ mov(ebx, ContextOperand(esi, Context::GLOBAL_INDEX)); + __ mov(ContextOperand(eax, Context::GLOBAL_INDEX), ebx); + + // Initialize the rest of the slots to the hole value. + if (slots_ == 1) { + __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS), + factory->the_hole_value()); + } else { + __ mov(ebx, factory->the_hole_value()); + for (int i = 0; i < slots_; i++) { + __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx); + } + } + + // Return and remove the on-stack parameters. + __ mov(esi, eax); + __ ret(2 * kPointerSize); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); +} + + void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // Stack layout on entry: // @@ -239,6 +310,8 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // The stub expects its argument on the stack and returns its result in tos_: // zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. Label patch; Factory* factory = masm->isolate()->factory(); const Register argument = eax; @@ -336,6 +409,41 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { } +void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { + // We don't allow a GC during a store buffer overflow so there is no need to + // store the registers in any particular way, but we do have to store and + // restore them. + __ pushad(); + if (save_doubles_ == kSaveFPRegs) { + CpuFeatures::Scope scope(SSE2); + __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); + for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + XMMRegister reg = XMMRegister::from_code(i); + __ movdbl(Operand(esp, i * kDoubleSize), reg); + } + } + const int argument_count = 1; + + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(argument_count, ecx); + __ mov(Operand(esp, 0 * kPointerSize), + Immediate(ExternalReference::isolate_address())); + __ CallCFunction( + ExternalReference::store_buffer_overflow_function(masm->isolate()), + argument_count); + if (save_doubles_ == kSaveFPRegs) { + CpuFeatures::Scope scope(SSE2); + for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + XMMRegister reg = XMMRegister::from_code(i); + __ movdbl(reg, Operand(esp, i * kDoubleSize)); + } + __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); + } + __ popad(); + __ ret(0); +} + + void ToBooleanStub::CheckOddball(MacroAssembler* masm, Type type, Heap::RootListIndex value, @@ -470,27 +578,27 @@ static void IntegerConvert(MacroAssembler* masm, // Check whether the exponent is too big for a 64 bit signed integer. static const uint32_t kTooBigExponent = (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(kTooBigExponent)); + __ cmp(scratch2, Immediate(kTooBigExponent)); __ j(greater_equal, conversion_failure); // Load x87 register with heap number. __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); // Reserve space for 64 bit answer. - __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. + __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint. // Do conversion, which cannot fail because we checked the exponent. __ fisttp_d(Operand(esp, 0)); __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. - __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. + __ add(esp, Immediate(sizeof(uint64_t))); // Nolint. } else { // Load ecx with zero. We use this either for the final shift or // for the answer. - __ xor_(ecx, Operand(ecx)); + __ xor_(ecx, ecx); // Check whether the exponent matches a 32 bit signed int that cannot be // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the // exponent is 30 (biased). This is the exponent that we are fastest at and // also the highest exponent we can handle here. const uint32_t non_smi_exponent = (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(non_smi_exponent)); + __ cmp(scratch2, Immediate(non_smi_exponent)); // If we have a match of the int32-but-not-Smi exponent then skip some // logic. __ j(equal, &right_exponent, Label::kNear); @@ -503,7 +611,7 @@ static void IntegerConvert(MacroAssembler* masm, // >>> operator has a tendency to generate numbers with an exponent of 31. const uint32_t big_non_smi_exponent = (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent)); + __ cmp(scratch2, Immediate(big_non_smi_exponent)); __ j(not_equal, conversion_failure); // We have the big exponent, typically from >>>. This means the number is // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. @@ -522,9 +630,9 @@ static void IntegerConvert(MacroAssembler* masm, // Shift down 21 bits to get the most significant 11 bits or the low // mantissa word. __ shr(ecx, 32 - big_shift_distance); - __ or_(ecx, Operand(scratch2)); + __ or_(ecx, scratch2); // We have the answer in ecx, but we may need to negate it. - __ test(scratch, Operand(scratch)); + __ test(scratch, scratch); __ j(positive, &done, Label::kNear); __ neg(ecx); __ jmp(&done, Label::kNear); @@ -538,14 +646,14 @@ static void IntegerConvert(MacroAssembler* masm, // it rounds to zero. const uint32_t zero_exponent = (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; - __ sub(Operand(scratch2), Immediate(zero_exponent)); + __ sub(scratch2, Immediate(zero_exponent)); // ecx already has a Smi zero. __ j(less, &done, Label::kNear); // We have a shifted exponent between 0 and 30 in scratch2. __ shr(scratch2, HeapNumber::kExponentShift); __ mov(ecx, Immediate(30)); - __ sub(ecx, Operand(scratch2)); + __ sub(ecx, scratch2); __ bind(&right_exponent); // Here ecx is the shift, scratch is the exponent word. @@ -565,19 +673,19 @@ static void IntegerConvert(MacroAssembler* masm, // Shift down 22 bits to get the most significant 10 bits or the low // mantissa word. __ shr(scratch2, 32 - shift_distance); - __ or_(scratch2, Operand(scratch)); + __ or_(scratch2, scratch); // Move down according to the exponent. __ shr_cl(scratch2); // Now the unsigned answer is in scratch2. We need to move it to ecx and // we may need to fix the sign. Label negative; - __ xor_(ecx, Operand(ecx)); + __ xor_(ecx, ecx); __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); __ j(greater, &negative, Label::kNear); __ mov(ecx, scratch2); __ jmp(&done, Label::kNear); __ bind(&negative); - __ sub(ecx, Operand(scratch2)); + __ sub(ecx, scratch2); __ bind(&done); } } @@ -679,13 +787,13 @@ void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, __ JumpIfNotSmi(eax, non_smi, non_smi_near); // We can't handle -0 with smis, so use a type transition for that case. - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(zero, slow, slow_near); // Try optimistic subtraction '0 - value', saving operand in eax for undo. - __ mov(edx, Operand(eax)); + __ mov(edx, eax); __ Set(eax, Immediate(0)); - __ sub(eax, Operand(edx)); + __ sub(eax, edx); __ j(overflow, undo, undo_near); __ ret(0); } @@ -706,7 +814,7 @@ void UnaryOpStub::GenerateSmiCodeBitNot( void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) { - __ mov(eax, Operand(edx)); + __ mov(eax, edx); } @@ -760,7 +868,7 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(HeapNumber::kSignMask)); // Flip sign. } else { - __ mov(edx, Operand(eax)); + __ mov(edx, eax); // edx: operand Label slow_allocate_heapnumber, heapnumber_allocated; @@ -768,11 +876,12 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ jmp(&heapnumber_allocated, Label::kNear); __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - __ push(edx); - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ pop(edx); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(edx); + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ pop(edx); + } __ bind(&heapnumber_allocated); // eax: allocated 'empty' number @@ -815,15 +924,16 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - // Push the original HeapNumber on the stack. The integer value can't - // be stored since it's untagged and not in the smi range (so we can't - // smi-tag it). We'll recalculate the value after the GC instead. - __ push(ebx); - __ CallRuntime(Runtime::kNumberAlloc, 0); - // New HeapNumber is in eax. - __ pop(edx); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Push the original HeapNumber on the stack. The integer value can't + // be stored since it's untagged and not in the smi range (so we can't + // smi-tag it). We'll recalculate the value after the GC instead. + __ push(ebx); + __ CallRuntime(Runtime::kNumberAlloc, 0); + // New HeapNumber is in eax. + __ pop(edx); + } // IntegerConvert uses ebx and edi as scratch registers. // This conversion won't go slow-case. IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow); @@ -833,7 +943,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, } if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ecx)); + __ cvtsi2sd(xmm0, ecx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ push(ecx); @@ -947,6 +1057,10 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) { void BinaryOpStub::Generate(MacroAssembler* masm) { + // Explicitly allow generation of nested stubs. It is safe here because + // generation code does not use any raw pointers. + AllowStubCallsScope allow_stub_calls(masm, true); + switch (operands_type_) { case BinaryOpIC::UNINITIALIZED: GenerateTypeTransition(masm); @@ -1022,7 +1136,7 @@ void BinaryOpStub::GenerateSmiCode( // eax in case the result is not a smi. ASSERT(!left.is(ecx) && !right.is(ecx)); __ mov(ecx, right); - __ or_(right, Operand(left)); // Bitwise or is commutative. + __ or_(right, left); // Bitwise or is commutative. combined = right; break; @@ -1034,7 +1148,7 @@ void BinaryOpStub::GenerateSmiCode( case Token::DIV: case Token::MOD: __ mov(combined, right); - __ or_(combined, Operand(left)); + __ or_(combined, left); break; case Token::SHL: @@ -1044,7 +1158,7 @@ void BinaryOpStub::GenerateSmiCode( // for the smi check register. ASSERT(!left.is(ecx) && !right.is(ecx)); __ mov(ecx, right); - __ or_(right, Operand(left)); + __ or_(right, left); combined = right; break; @@ -1067,12 +1181,12 @@ void BinaryOpStub::GenerateSmiCode( case Token::BIT_XOR: ASSERT(right.is(eax)); - __ xor_(right, Operand(left)); // Bitwise xor is commutative. + __ xor_(right, left); // Bitwise xor is commutative. break; case Token::BIT_AND: ASSERT(right.is(eax)); - __ and_(right, Operand(left)); // Bitwise and is commutative. + __ and_(right, left); // Bitwise and is commutative. break; case Token::SHL: @@ -1121,12 +1235,12 @@ void BinaryOpStub::GenerateSmiCode( case Token::ADD: ASSERT(right.is(eax)); - __ add(right, Operand(left)); // Addition is commutative. + __ add(right, left); // Addition is commutative. __ j(overflow, &use_fp_on_smis); break; case Token::SUB: - __ sub(left, Operand(right)); + __ sub(left, right); __ j(overflow, &use_fp_on_smis); __ mov(eax, left); break; @@ -1140,7 +1254,7 @@ void BinaryOpStub::GenerateSmiCode( // Remove tag from one of the operands (but keep sign). __ SmiUntag(right); // Do multiplication. - __ imul(right, Operand(left)); // Multiplication is commutative. + __ imul(right, left); // Multiplication is commutative. __ j(overflow, &use_fp_on_smis); // Check for negative zero result. Use combined = left | right. __ NegativeZeroTest(right, combined, &use_fp_on_smis); @@ -1151,7 +1265,7 @@ void BinaryOpStub::GenerateSmiCode( // save the left operand. __ mov(edi, left); // Check for 0 divisor. - __ test(right, Operand(right)); + __ test(right, right); __ j(zero, &use_fp_on_smis); // Sign extend left into edx:eax. ASSERT(left.is(eax)); @@ -1167,7 +1281,7 @@ void BinaryOpStub::GenerateSmiCode( // Check for negative zero result. Use combined = left | right. __ NegativeZeroTest(eax, combined, &use_fp_on_smis); // Check that the remainder is zero. - __ test(edx, Operand(edx)); + __ test(edx, edx); __ j(not_zero, &use_fp_on_smis); // Tag the result and store it in register eax. __ SmiTag(eax); @@ -1175,7 +1289,7 @@ void BinaryOpStub::GenerateSmiCode( case Token::MOD: // Check for 0 divisor. - __ test(right, Operand(right)); + __ test(right, right); __ j(zero, ¬_smis); // Sign extend left into edx:eax. @@ -1226,11 +1340,11 @@ void BinaryOpStub::GenerateSmiCode( break; case Token::ADD: // Revert right = right + left. - __ sub(right, Operand(left)); + __ sub(right, left); break; case Token::SUB: // Revert left = left - right. - __ add(left, Operand(right)); + __ add(left, right); break; case Token::MUL: // Right was clobbered but a copy is in ebx. @@ -1268,7 +1382,7 @@ void BinaryOpStub::GenerateSmiCode( ASSERT_EQ(Token::SHL, op_); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(left)); + __ cvtsi2sd(xmm0, left); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ mov(Operand(esp, 1 * kPointerSize), left); @@ -1290,11 +1404,11 @@ void BinaryOpStub::GenerateSmiCode( switch (op_) { case Token::ADD: // Revert right = right + left. - __ sub(right, Operand(left)); + __ sub(right, left); break; case Token::SUB: // Revert left = left - right. - __ add(left, Operand(right)); + __ add(left, right); break; case Token::MUL: // Right was clobbered but a copy is in ebx. @@ -1486,7 +1600,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Check result type if it is currently Int32. if (result_type_ <= BinaryOpIC::INT32) { __ cvttsd2si(ecx, Operand(xmm0)); - __ cvtsi2sd(xmm2, Operand(ecx)); + __ cvtsi2sd(xmm2, ecx); __ ucomisd(xmm0, xmm2); __ j(not_zero, ¬_int32); __ j(carry, ¬_int32); @@ -1548,9 +1662,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_, ¬_int32); switch (op_) { - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; + case Token::BIT_OR: __ or_(eax, ecx); break; + case Token::BIT_AND: __ and_(eax, ecx); break; + case Token::BIT_XOR: __ xor_(eax, ecx); break; case Token::SAR: __ sar_cl(eax); break; case Token::SHL: __ shl_cl(eax); break; case Token::SHR: __ shr_cl(eax); break; @@ -1574,7 +1688,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { if (op_ != Token::SHR) { __ bind(&non_smi_result); // Allocate a heap number if needed. - __ mov(ebx, Operand(eax)); // ebx: result + __ mov(ebx, eax); // ebx: result Label skip_allocation; switch (mode_) { case OVERWRITE_LEFT: @@ -1594,7 +1708,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ebx)); + __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ mov(Operand(esp, 1 * kPointerSize), ebx); @@ -1675,7 +1789,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { __ cmp(edx, factory->undefined_value()); __ j(not_equal, &check, Label::kNear); if (Token::IsBitOp(op_)) { - __ xor_(edx, Operand(edx)); + __ xor_(edx, edx); } else { __ mov(edx, Immediate(factory->nan_value())); } @@ -1684,7 +1798,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { __ cmp(eax, factory->undefined_value()); __ j(not_equal, &done, Label::kNear); if (Token::IsBitOp(op_)) { - __ xor_(eax, Operand(eax)); + __ xor_(eax, eax); } else { __ mov(eax, Immediate(factory->nan_value())); } @@ -1762,9 +1876,9 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { use_sse3_, ¬_floats); switch (op_) { - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; + case Token::BIT_OR: __ or_(eax, ecx); break; + case Token::BIT_AND: __ and_(eax, ecx); break; + case Token::BIT_XOR: __ xor_(eax, ecx); break; case Token::SAR: __ sar_cl(eax); break; case Token::SHL: __ shl_cl(eax); break; case Token::SHR: __ shr_cl(eax); break; @@ -1788,7 +1902,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { if (op_ != Token::SHR) { __ bind(&non_smi_result); // Allocate a heap number if needed. - __ mov(ebx, Operand(eax)); // ebx: result + __ mov(ebx, eax); // ebx: result Label skip_allocation; switch (mode_) { case OVERWRITE_LEFT: @@ -1808,7 +1922,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ebx)); + __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ mov(Operand(esp, 1 * kPointerSize), ebx); @@ -1961,9 +2075,9 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { use_sse3_, &call_runtime); switch (op_) { - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; + case Token::BIT_OR: __ or_(eax, ecx); break; + case Token::BIT_AND: __ and_(eax, ecx); break; + case Token::BIT_XOR: __ xor_(eax, ecx); break; case Token::SAR: __ sar_cl(eax); break; case Token::SHL: __ shl_cl(eax); break; case Token::SHR: __ shr_cl(eax); break; @@ -1987,7 +2101,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { if (op_ != Token::SHR) { __ bind(&non_smi_result); // Allocate a heap number if needed. - __ mov(ebx, Operand(eax)); // ebx: result + __ mov(ebx, eax); // ebx: result Label skip_allocation; switch (mode_) { case OVERWRITE_LEFT: @@ -2007,7 +2121,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ebx)); + __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ mov(Operand(esp, 1 * kPointerSize), ebx); @@ -2117,10 +2231,10 @@ void BinaryOpStub::GenerateHeapResultAllocation( __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); // Now edx can be overwritten losing one of the arguments as we are // now done and will not need it any more. - __ mov(edx, Operand(ebx)); + __ mov(edx, ebx); __ bind(&skip_allocation); // Use object in edx as a result holder - __ mov(eax, Operand(edx)); + __ mov(eax, edx); break; } case OVERWRITE_RIGHT: @@ -2178,7 +2292,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Then load the low and high words of the double into ebx, edx. STATIC_ASSERT(kSmiTagSize == 1); __ sar(eax, 1); - __ sub(Operand(esp), Immediate(2 * kPointerSize)); + __ sub(esp, Immediate(2 * kPointerSize)); __ mov(Operand(esp, 0), eax); __ fild_s(Operand(esp, 0)); __ fst_d(Operand(esp, 0)); @@ -2189,7 +2303,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Check if input is a HeapNumber. __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); Factory* factory = masm->isolate()->factory(); - __ cmp(Operand(ebx), Immediate(factory->heap_number_map())); + __ cmp(ebx, Immediate(factory->heap_number_map())); __ j(not_equal, &runtime_call); // Input is a HeapNumber. Push it on the FPU stack and load its // low and high words into ebx, edx. @@ -2201,12 +2315,12 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { } else { // UNTAGGED. if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatures::Scope sse4_scope(SSE4_1); - __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx. + __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx. } else { __ pshufd(xmm0, xmm1, 0x1); - __ movd(Operand(edx), xmm0); + __ movd(edx, xmm0); } - __ movd(Operand(ebx), xmm1); + __ movd(ebx, xmm1); } // ST[0] or xmm1 == double value @@ -2215,15 +2329,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Compute hash (the shifts are arithmetic): // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); __ mov(ecx, ebx); - __ xor_(ecx, Operand(edx)); + __ xor_(ecx, edx); __ mov(eax, ecx); __ sar(eax, 16); - __ xor_(ecx, Operand(eax)); + __ xor_(ecx, eax); __ mov(eax, ecx); __ sar(eax, 8); - __ xor_(ecx, Operand(eax)); + __ xor_(ecx, eax); ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); - __ and_(Operand(ecx), + __ and_(ecx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1)); // ST[0] or xmm1 == double value. @@ -2238,7 +2352,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ mov(eax, Operand(eax, cache_array_index)); // Eax points to the cache for the type type_. // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(zero, &runtime_call_clear_stack); #ifdef DEBUG // Check that the layout of cache elements match expectations. @@ -2281,10 +2395,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); } else { // UNTAGGED. __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); - __ sub(Operand(esp), Immediate(kDoubleSize)); + __ sub(esp, Immediate(kDoubleSize)); __ movdbl(Operand(esp, 0), xmm1); __ fld_d(Operand(esp, 0)); - __ add(Operand(esp), Immediate(kDoubleSize)); + __ add(esp, Immediate(kDoubleSize)); } GenerateOperation(masm); __ mov(Operand(ecx, 0), ebx); @@ -2299,20 +2413,21 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Skip cache and return answer directly, only in untagged case. __ bind(&skip_cache); - __ sub(Operand(esp), Immediate(kDoubleSize)); + __ sub(esp, Immediate(kDoubleSize)); __ movdbl(Operand(esp, 0), xmm1); __ fld_d(Operand(esp, 0)); GenerateOperation(masm); __ fstp_d(Operand(esp, 0)); __ movdbl(xmm1, Operand(esp, 0)); - __ add(Operand(esp), Immediate(kDoubleSize)); + __ add(esp, Immediate(kDoubleSize)); // We return the value in xmm1 without adding it to the cache, but // we cause a scavenging GC so that future allocations will succeed. - __ EnterInternalFrame(); - // Allocate an unused object bigger than a HeapNumber. - __ push(Immediate(Smi::FromInt(2 * kDoubleSize))); - __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Allocate an unused object bigger than a HeapNumber. + __ push(Immediate(Smi::FromInt(2 * kDoubleSize))); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + } __ Ret(); } @@ -2329,10 +2444,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ bind(&runtime_call); __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1); - __ EnterInternalFrame(); - __ push(eax); - __ CallRuntime(RuntimeFunction(), 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(eax); + __ CallRuntime(RuntimeFunction(), 1); + } __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); __ Ret(); } @@ -2364,13 +2480,13 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { // If argument is outside the range -2^63..2^63, fsin/cos doesn't // work. We must reduce it to the appropriate range. __ mov(edi, edx); - __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only. + __ and_(edi, Immediate(0x7ff00000)); // Exponent only. int supported_exponent_limit = (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; - __ cmp(Operand(edi), Immediate(supported_exponent_limit)); + __ cmp(edi, Immediate(supported_exponent_limit)); __ j(below, &in_range, Label::kNear); // Check for infinity and NaN. Both return NaN for sin. - __ cmp(Operand(edi), Immediate(0x7ff00000)); + __ cmp(edi, Immediate(0x7ff00000)); Label non_nan_result; __ j(not_equal, &non_nan_result, Label::kNear); // Input is +/-Infinity or NaN. Result is NaN. @@ -2379,7 +2495,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { __ push(Immediate(0x7ff80000)); __ push(Immediate(0)); __ fld_d(Operand(esp, 0)); - __ add(Operand(esp), Immediate(2 * kPointerSize)); + __ add(esp, Immediate(2 * kPointerSize)); __ jmp(&done, Label::kNear); __ bind(&non_nan_result); @@ -2395,7 +2511,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { __ fwait(); __ fnstsw_ax(); // Clear if Illegal Operand or Zero Division exceptions are set. - __ test(Operand(eax), Immediate(5)); + __ test(eax, Immediate(5)); __ j(zero, &no_exceptions, Label::kNear); __ fnclex(); __ bind(&no_exceptions); @@ -2408,7 +2524,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { __ fprem1(); __ fwait(); __ fnstsw_ax(); - __ test(Operand(eax), Immediate(0x400 /* C2 */)); + __ test(eax, Immediate(0x400 /* C2 */)); // If C2 is set, computation only has partial result. Loop to // continue computation. __ j(not_zero, &partial_remainder_loop); @@ -2541,13 +2657,13 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) { __ bind(&load_smi_edx); __ SmiUntag(edx); // Untag smi before converting to float. - __ cvtsi2sd(xmm0, Operand(edx)); + __ cvtsi2sd(xmm0, edx); __ SmiTag(edx); // Retag smi for heap number overwriting test. __ jmp(&load_eax); __ bind(&load_smi_eax); __ SmiUntag(eax); // Untag smi before converting to float. - __ cvtsi2sd(xmm1, Operand(eax)); + __ cvtsi2sd(xmm1, eax); __ SmiTag(eax); // Retag smi for heap number overwriting test. __ bind(&done); @@ -2571,12 +2687,12 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, __ jmp(not_numbers); // Argument in eax is not a number. __ bind(&load_smi_edx); __ SmiUntag(edx); // Untag smi before converting to float. - __ cvtsi2sd(xmm0, Operand(edx)); + __ cvtsi2sd(xmm0, edx); __ SmiTag(edx); // Retag smi for heap number overwriting test. __ jmp(&load_eax); __ bind(&load_smi_eax); __ SmiUntag(eax); // Untag smi before converting to float. - __ cvtsi2sd(xmm1, Operand(eax)); + __ cvtsi2sd(xmm1, eax); __ SmiTag(eax); // Retag smi for heap number overwriting test. __ jmp(&done, Label::kNear); __ bind(&load_float_eax); @@ -2592,11 +2708,11 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, __ mov(scratch, left); ASSERT(!scratch.is(right)); // We're about to clobber scratch. __ SmiUntag(scratch); - __ cvtsi2sd(xmm0, Operand(scratch)); + __ cvtsi2sd(xmm0, scratch); __ mov(scratch, right); __ SmiUntag(scratch); - __ cvtsi2sd(xmm1, Operand(scratch)); + __ cvtsi2sd(xmm1, scratch); } @@ -2604,12 +2720,12 @@ void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm, Label* non_int32, Register scratch) { __ cvttsd2si(scratch, Operand(xmm0)); - __ cvtsi2sd(xmm2, Operand(scratch)); + __ cvtsi2sd(xmm2, scratch); __ ucomisd(xmm0, xmm2); __ j(not_zero, non_int32); __ j(carry, non_int32); __ cvttsd2si(scratch, Operand(xmm1)); - __ cvtsi2sd(xmm2, Operand(scratch)); + __ cvtsi2sd(xmm2, scratch); __ ucomisd(xmm1, xmm2); __ j(not_zero, non_int32); __ j(carry, non_int32); @@ -2717,7 +2833,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Save 1 in xmm3 - we need this several times later on. __ mov(ecx, Immediate(1)); - __ cvtsi2sd(xmm3, Operand(ecx)); + __ cvtsi2sd(xmm3, ecx); Label exponent_nonsmi; Label base_nonsmi; @@ -2728,7 +2844,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Optimized version when both exponent and base are smis. Label powi; __ SmiUntag(edx); - __ cvtsi2sd(xmm0, Operand(edx)); + __ cvtsi2sd(xmm0, edx); __ jmp(&powi); // exponent is smi and base is a heapnumber. __ bind(&base_nonsmi); @@ -2770,11 +2886,11 @@ void MathPowStub::Generate(MacroAssembler* masm) { // base has the original value of the exponent - if the exponent is // negative return 1/result. - __ test(edx, Operand(edx)); + __ test(edx, edx); __ j(positive, &allocate_return); // Special case if xmm1 has reached infinity. __ mov(ecx, Immediate(0x7FB00000)); - __ movd(xmm0, Operand(ecx)); + __ movd(xmm0, ecx); __ cvtss2sd(xmm0, xmm0); __ ucomisd(xmm0, xmm1); __ j(equal, &call_runtime); @@ -2797,7 +2913,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { Label handle_special_cases; __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear); __ SmiUntag(edx); - __ cvtsi2sd(xmm0, Operand(edx)); + __ cvtsi2sd(xmm0, edx); __ jmp(&handle_special_cases, Label::kNear); __ bind(&base_not_smi); @@ -2806,7 +2922,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ j(not_equal, &call_runtime); __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset)); __ and_(ecx, HeapNumber::kExponentMask); - __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask)); + __ cmp(ecx, Immediate(HeapNumber::kExponentMask)); // base is NaN or +/-Infinity __ j(greater_equal, &call_runtime); __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); @@ -2817,7 +2933,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Test for -0.5. // Load xmm2 with -0.5. __ mov(ecx, Immediate(0xBF000000)); - __ movd(xmm2, Operand(ecx)); + __ movd(xmm2, ecx); __ cvtss2sd(xmm2, xmm2); // xmm2 now has -0.5. __ ucomisd(xmm2, xmm1); @@ -2873,13 +2989,13 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { Label adaptor; __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(equal, &adaptor, Label::kNear); // Check index against formal parameters count limit passed in // through register eax. Use unsigned comparison to get negative // check for free. - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); __ j(above_equal, &slow, Label::kNear); // Read the argument from the stack and return it. @@ -2895,7 +3011,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // comparison to get negative check for free. __ bind(&adaptor); __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ cmp(edx, Operand(ecx)); + __ cmp(edx, ecx); __ j(above_equal, &slow, Label::kNear); // Read the argument from the stack and return it. @@ -2926,7 +3042,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { Label runtime; __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(not_equal, &runtime, Label::kNear); // Patch the arguments.length and the parameters pointer. @@ -2957,7 +3073,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { Label adaptor_frame, try_allocate; __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(equal, &adaptor_frame, Label::kNear); // No adaptor, parameter count = argument count. @@ -2976,7 +3092,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { // esp[4] = parameter count (tagged) // esp[8] = address of receiver argument // Compute the mapped parameter count = min(ebx, ecx) in ebx. - __ cmp(ebx, Operand(ecx)); + __ cmp(ebx, ecx); __ j(less_equal, &try_allocate, Label::kNear); __ mov(ebx, ecx); @@ -2990,7 +3106,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { const int kParameterMapHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize; Label no_parameter_map; - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(zero, &no_parameter_map, Label::kNear); __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize)); __ bind(&no_parameter_map); @@ -2999,7 +3115,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize)); // 3. Arguments object. - __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize)); + __ add(ebx, Immediate(Heap::kArgumentsObjectSize)); // Do the allocation of all three objects in one go. __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT); @@ -3014,7 +3130,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset)); __ mov(ebx, Operand(esp, 0 * kPointerSize)); - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(not_zero, &has_mapped_parameters, Label::kNear); __ mov(edi, Operand(edi, Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX))); @@ -3069,7 +3185,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { // Initialize parameter map. If there are no mapped arguments, we're done. Label skip_parameter_map; - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(zero, &skip_parameter_map); __ mov(FieldOperand(edi, FixedArray::kMapOffset), @@ -3093,7 +3209,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ mov(eax, Operand(esp, 2 * kPointerSize)); __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); __ add(ebx, Operand(esp, 4 * kPointerSize)); - __ sub(ebx, Operand(eax)); + __ sub(ebx, eax); __ mov(ecx, FACTORY->the_hole_value()); __ mov(edx, edi); __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize)); @@ -3110,12 +3226,12 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ jmp(¶meters_test, Label::kNear); __ bind(¶meters_loop); - __ sub(Operand(eax), Immediate(Smi::FromInt(1))); + __ sub(eax, Immediate(Smi::FromInt(1))); __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx); __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx); - __ add(Operand(ebx), Immediate(Smi::FromInt(1))); + __ add(ebx, Immediate(Smi::FromInt(1))); __ bind(¶meters_test); - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_zero, ¶meters_loop, Label::kNear); __ pop(ecx); @@ -3135,18 +3251,18 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { Label arguments_loop, arguments_test; __ mov(ebx, Operand(esp, 1 * kPointerSize)); __ mov(edx, Operand(esp, 4 * kPointerSize)); - __ sub(Operand(edx), ebx); // Is there a smarter way to do negative scaling? - __ sub(Operand(edx), ebx); + __ sub(edx, ebx); // Is there a smarter way to do negative scaling? + __ sub(edx, ebx); __ jmp(&arguments_test, Label::kNear); __ bind(&arguments_loop); - __ sub(Operand(edx), Immediate(kPointerSize)); + __ sub(edx, Immediate(kPointerSize)); __ mov(eax, Operand(edx, 0)); __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax); - __ add(Operand(ebx), Immediate(Smi::FromInt(1))); + __ add(ebx, Immediate(Smi::FromInt(1))); __ bind(&arguments_test); - __ cmp(ebx, Operand(ecx)); + __ cmp(ebx, ecx); __ j(less, &arguments_loop, Label::kNear); // Restore. @@ -3174,7 +3290,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { Label adaptor_frame, try_allocate, runtime; __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(equal, &adaptor_frame, Label::kNear); // Get the length from the frame. @@ -3193,11 +3309,11 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // the arguments object and the elements array. Label add_arguments_object; __ bind(&try_allocate); - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(zero, &add_arguments_object, Label::kNear); __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize)); __ bind(&add_arguments_object); - __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict)); + __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict)); // Do the allocation of both objects in one go. __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT); @@ -3224,7 +3340,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // If there are no actual arguments, we're done. Label done; - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(zero, &done, Label::kNear); // Get the parameters pointer from the stack. @@ -3246,8 +3362,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { __ bind(&loop); __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver. __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx); - __ add(Operand(edi), Immediate(kPointerSize)); - __ sub(Operand(edx), Immediate(kPointerSize)); + __ add(edi, Immediate(kPointerSize)); + __ sub(edx, Immediate(kPointerSize)); __ dec(ecx); __ j(not_zero, &loop); @@ -3268,10 +3384,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { #ifdef V8_INTERPRETED_REGEXP __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); #else // V8_INTERPRETED_REGEXP - if (!FLAG_regexp_entry_native) { - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); - return; - } // Stack frame on entry. // esp[0]: return address @@ -3294,7 +3406,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ExternalReference address_of_regexp_stack_memory_size = ExternalReference::address_of_regexp_stack_memory_size(masm->isolate()); __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(zero, &runtime); // Check that the first argument is a JSRegExp object. @@ -3315,7 +3427,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // ecx: RegExp data (FixedArray) // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset)); - __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); + __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); __ j(not_equal, &runtime); // ecx: RegExp data (FixedArray) @@ -3325,7 +3437,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // uses the asumption that smis are 2 * their untagged value. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(Operand(edx), Immediate(2)); // edx was a smi. + __ add(edx, Immediate(2)); // edx was a smi. // Check that the static offsets vector buffer is large enough. __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize); __ j(above, &runtime); @@ -3347,7 +3459,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // string length. A negative value will be greater (unsigned comparison). __ mov(eax, Operand(esp, kPreviousIndexOffset)); __ JumpIfNotSmi(eax, &runtime); - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); __ j(above_equal, &runtime); // ecx: RegExp data (FixedArray) @@ -3367,8 +3479,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // additional information. __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); __ SmiUntag(eax); - __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead)); - __ cmp(edx, Operand(eax)); + __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead)); + __ cmp(edx, eax); __ j(greater, &runtime); // Reset offset for possibly sliced string. @@ -3385,8 +3497,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); __ j(zero, &seq_two_byte_string, Label::kNear); // Any other flat string must be a flat ascii string. - __ and_(Operand(ebx), - Immediate(kIsNotStringMask | kStringRepresentationMask)); + __ and_(ebx, Immediate(kIsNotStringMask | kStringRepresentationMask)); __ j(zero, &seq_ascii_string, Label::kNear); // Check for flat cons string or sliced string. @@ -3398,7 +3509,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { Label cons_string, check_encoding; STATIC_ASSERT(kConsStringTag < kExternalStringTag); STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); - __ cmp(Operand(ebx), Immediate(kExternalStringTag)); + __ cmp(ebx, Immediate(kExternalStringTag)); __ j(less, &cons_string); __ j(equal, &runtime); @@ -3504,14 +3615,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Prepare start and end index of the input. // Load the length from the original sliced string if that is the case. __ mov(esi, FieldOperand(esi, String::kLengthOffset)); - __ add(esi, Operand(edi)); // Calculate input end wrt offset. + __ add(esi, edi); // Calculate input end wrt offset. __ SmiUntag(edi); - __ add(ebx, Operand(edi)); // Calculate input start wrt offset. + __ add(ebx, edi); // Calculate input start wrt offset. // ebx: start index of the input string // esi: end index of the input string Label setup_two_byte, setup_rest; - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(zero, &setup_two_byte, Label::kNear); __ SmiUntag(esi); __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize)); @@ -3531,8 +3642,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ bind(&setup_rest); // Locate the code entry and call it. - __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ call(Operand(edx)); + __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ call(edx); // Drop arguments and come back to JS mode. __ LeaveApiExitFrame(); @@ -3553,11 +3664,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // TODO(592): Rerunning the RegExp to get the stack overflow exception. ExternalReference pending_exception(Isolate::kPendingExceptionAddress, masm->isolate()); - __ mov(edx, - Operand::StaticVariable(ExternalReference::the_hole_value_location( - masm->isolate()))); + __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); __ mov(eax, Operand::StaticVariable(pending_exception)); - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); __ j(equal, &runtime); // For exception, throw the exception again. @@ -3578,7 +3687,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ bind(&failure); // For failure to match, return null. - __ mov(Operand(eax), factory->null_value()); + __ mov(eax, factory->null_value()); __ ret(4 * kPointerSize); // Load RegExp data. @@ -3589,7 +3698,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Calculate number of capture registers (number_of_captures + 1) * 2. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(Operand(edx), Immediate(2)); // edx was a smi. + __ add(edx, Immediate(2)); // edx was a smi. // edx: Number of capture registers // Load last_match_info which is still known to be a fast case JSArray. @@ -3605,12 +3714,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Store last subject and last input. __ mov(eax, Operand(esp, kSubjectOffset)); __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); - __ mov(ecx, ebx); - __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi); + __ RecordWriteField(ebx, + RegExpImpl::kLastSubjectOffset, + eax, + edi, + kDontSaveFPRegs); __ mov(eax, Operand(esp, kSubjectOffset)); __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); - __ mov(ecx, ebx); - __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi); + __ RecordWriteField(ebx, + RegExpImpl::kLastInputOffset, + eax, + edi, + kDontSaveFPRegs); // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = @@ -3624,7 +3739,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Capture register counter starts from number of capture registers and // counts down until wraping after zero. __ bind(&next_capture); - __ sub(Operand(edx), Immediate(1)); + __ sub(edx, Immediate(1)); __ j(negative, &done, Label::kNear); // Read the value from the static offsets vector buffer. __ mov(edi, Operand(ecx, edx, times_int_size, 0)); @@ -3655,7 +3770,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { Label done; __ mov(ebx, Operand(esp, kPointerSize * 3)); __ JumpIfNotSmi(ebx, &slowcase); - __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength))); + __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength))); __ j(above, &slowcase); // Smi-tagging is equivalent to multiplying by 2. STATIC_ASSERT(kSmiTag == 0); @@ -3715,10 +3830,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { // ebx: Start of elements in FixedArray. // edx: the hole. Label loop; - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ bind(&loop); __ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero. - __ sub(Operand(ecx), Immediate(1)); + __ sub(ecx, Immediate(1)); __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx); __ jmp(&loop); @@ -3752,7 +3867,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, // contains two elements (number and string) for each cache entry. __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two. - __ sub(Operand(mask), Immediate(1)); // Make mask. + __ sub(mask, Immediate(1)); // Make mask. // Calculate the entry in the number string cache. The hash value in the // number string cache for smis is just the smi value, and the hash for @@ -3778,7 +3893,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); // Object is heap number and hash is now in scratch. Calculate cache index. - __ and_(scratch, Operand(mask)); + __ and_(scratch, mask); Register index = scratch; Register probe = mask; __ mov(probe, @@ -3804,7 +3919,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, __ bind(&smi_hash_calculated); // Object is smi and hash is now in scratch. Calculate cache index. - __ and_(scratch, Operand(mask)); + __ and_(scratch, mask); Register index = scratch; // Check if the entry is the smi we are looking for. __ cmp(object, @@ -3856,10 +3971,10 @@ void CompareStub::Generate(MacroAssembler* masm) { // Compare two smis if required. if (include_smi_compare_) { Label non_smi, smi_done; - __ mov(ecx, Operand(edx)); - __ or_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ or_(ecx, eax); __ JumpIfNotSmi(ecx, &non_smi, Label::kNear); - __ sub(edx, Operand(eax)); // Return on the result of the subtraction. + __ sub(edx, eax); // Return on the result of the subtraction. __ j(no_overflow, &smi_done, Label::kNear); __ not_(edx); // Correct sign in case of overflow. edx is never 0 here. __ bind(&smi_done); @@ -3867,8 +3982,8 @@ void CompareStub::Generate(MacroAssembler* masm) { __ ret(0); __ bind(&non_smi); } else if (FLAG_debug_code) { - __ mov(ecx, Operand(edx)); - __ or_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ or_(ecx, eax); __ test(ecx, Immediate(kSmiTagMask)); __ Assert(not_zero, "Unexpected smi operands."); } @@ -3880,7 +3995,7 @@ void CompareStub::Generate(MacroAssembler* masm) { // for NaN and undefined. { Label not_identical; - __ cmp(eax, Operand(edx)); + __ cmp(eax, edx); __ j(not_equal, ¬_identical); if (cc_ != equal) { @@ -3929,7 +4044,7 @@ void CompareStub::Generate(MacroAssembler* masm) { __ Set(eax, Immediate(0)); // Shift value and mask so kQuietNaNHighBitsMask applies to topmost // bits. - __ add(edx, Operand(edx)); + __ add(edx, edx); __ cmp(edx, kQuietNaNHighBitsMask << 1); if (cc_ == equal) { STATIC_ASSERT(EQUAL != 1); @@ -3963,19 +4078,19 @@ void CompareStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSmiTag == 0); ASSERT_EQ(0, Smi::FromInt(0)); __ mov(ecx, Immediate(kSmiTagMask)); - __ and_(ecx, Operand(eax)); - __ test(ecx, Operand(edx)); + __ and_(ecx, eax); + __ test(ecx, edx); __ j(not_zero, ¬_smis, Label::kNear); // One operand is a smi. // Check whether the non-smi is a heap number. STATIC_ASSERT(kSmiTagMask == 1); // ecx still holds eax & kSmiTag, which is either zero or one. - __ sub(Operand(ecx), Immediate(0x01)); + __ sub(ecx, Immediate(0x01)); __ mov(ebx, edx); - __ xor_(ebx, Operand(eax)); - __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx. - __ xor_(ebx, Operand(eax)); + __ xor_(ebx, eax); + __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx. + __ xor_(ebx, eax); // if eax was smi, ebx is now edx, else eax. // Check if the non-smi operand is a heap number. @@ -4037,9 +4152,9 @@ void CompareStub::Generate(MacroAssembler* masm) { // Return a result of -1, 0, or 1, based on EFLAGS. __ mov(eax, 0); // equal __ mov(ecx, Immediate(Smi::FromInt(1))); - __ cmov(above, eax, Operand(ecx)); + __ cmov(above, eax, ecx); __ mov(ecx, Immediate(Smi::FromInt(-1))); - __ cmov(below, eax, Operand(ecx)); + __ cmov(below, eax, ecx); __ ret(0); } else { FloatingPointHelper::CheckFloatOperands( @@ -4198,25 +4313,49 @@ void StackCheckStub::Generate(MacroAssembler* masm) { } +void CallFunctionStub::FinishCode(Code* code) { + code->set_has_function_cache(RecordCallTarget()); +} + + +void CallFunctionStub::Clear(Heap* heap, Address address) { + ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte); + // 1 ~ size of the test eax opcode. + Object* cell = Memory::Object_at(address + kPointerSize + 1); + // Low-level because clearing happens during GC. + reinterpret_cast<JSGlobalPropertyCell*>(cell)->set_value( + RawUninitializedSentinel(heap)); +} + + +Object* CallFunctionStub::GetCachedValue(Address address) { + ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte); + // 1 ~ size of the test eax opcode. + Object* cell = Memory::Object_at(address + kPointerSize + 1); + return JSGlobalPropertyCell::cast(cell)->value(); +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { + Isolate* isolate = masm->isolate(); Label slow, non_function; // The receiver might implicitly be the global object. This is // indicated by passing the hole as the receiver to the call // function stub. if (ReceiverMightBeImplicit()) { - Label call; + Label receiver_ok; // Get the receiver from the stack. // +1 ~ return address __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize)); // Call as function is indicated with the hole. - __ cmp(eax, masm->isolate()->factory()->the_hole_value()); - __ j(not_equal, &call, Label::kNear); + __ cmp(eax, isolate->factory()->the_hole_value()); + __ j(not_equal, &receiver_ok, Label::kNear); // Patch the receiver on the stack with the global receiver object. __ mov(ebx, GlobalObjectOperand()); __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset)); __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx); - __ bind(&call); + __ bind(&receiver_ok); } // Get the function to call from the stack. @@ -4229,12 +4368,53 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); __ j(not_equal, &slow); + if (RecordCallTarget()) { + // Cache the called function in a global property cell in the + // instruction stream after the call. Cache states are uninitialized, + // monomorphic (indicated by a JSFunction), and megamorphic. + Label initialize, call; + // Load the cache cell address into ebx and the cache state into ecx. + __ mov(ebx, Operand(esp, 0)); // Return address. + __ mov(ebx, Operand(ebx, 1)); // 1 ~ sizeof 'test eax' opcode in bytes. + __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset)); + + // A monomorphic cache hit or an already megamorphic state: invoke the + // function without changing the state. + __ cmp(ecx, edi); + __ j(equal, &call, Label::kNear); + __ cmp(ecx, Immediate(MegamorphicSentinel(isolate))); + __ j(equal, &call, Label::kNear); + + // A monomorphic miss (i.e, here the cache is not uninitialized) goes + // megamorphic. + __ cmp(ecx, Immediate(UninitializedSentinel(isolate))); + __ j(equal, &initialize, Label::kNear); + // MegamorphicSentinel is a root so no write-barrier is needed. + __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), + Immediate(MegamorphicSentinel(isolate))); + __ jmp(&call, Label::kNear); + + // An uninitialized cache is patched with the function. + __ bind(&initialize); + __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi); + __ mov(ecx, edi); + __ RecordWriteField(ebx, + JSGlobalPropertyCell::kValueOffset, + ecx, + edx, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, // Cells are rescanned. + OMIT_SMI_CHECK); + + __ bind(&call); + } + // Fast-case: Just invoke the function. ParameterCount actual(argc_); if (ReceiverMightBeImplicit()) { Label call_as_function; - __ cmp(eax, masm->isolate()->factory()->the_hole_value()); + __ cmp(eax, isolate->factory()->the_hole_value()); __ j(equal, &call_as_function); __ InvokeFunction(edi, actual, @@ -4251,6 +4431,14 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // Slow-case: Non-function called. __ bind(&slow); + if (RecordCallTarget()) { + // If there is a call target cache, mark it megamorphic in the + // non-function case. + __ mov(ebx, Operand(esp, 0)); + __ mov(ebx, Operand(ebx, 1)); + __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), + Immediate(MegamorphicSentinel(isolate))); + } // Check for function proxy. __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE); __ j(not_equal, &non_function); @@ -4262,8 +4450,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ SetCallKind(ecx, CALL_AS_FUNCTION); __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY); { - Handle<Code> adaptor = - masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); + Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline(); __ jmp(adaptor, RelocInfo::CODE_TARGET); } @@ -4275,8 +4462,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ Set(ebx, Immediate(0)); __ SetCallKind(ecx, CALL_AS_METHOD); __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); - Handle<Code> adaptor = - masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); + Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline(); __ jmp(adaptor, RelocInfo::CODE_TARGET); } @@ -4286,6 +4472,35 @@ bool CEntryStub::NeedsImmovableCode() { } +bool CEntryStub::IsPregenerated() { + return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && + result_size_ == 1; +} + + +void CodeStub::GenerateStubsAheadOfTime() { + CEntryStub::GenerateAheadOfTime(); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); + // It is important that the store buffer overflow stubs are generated first. + RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); +} + + +void CodeStub::GenerateFPStubs() { + CEntryStub save_doubles(1, kSaveFPRegs); + Handle<Code> code = save_doubles.GetCode(); + code->set_is_pregenerated(true); + code->GetIsolate()->set_fp_stubs_generated(true); +} + + +void CEntryStub::GenerateAheadOfTime() { + CEntryStub stub(1, kDontSaveFPRegs); + Handle<Code> code = stub.GetCode(); + code->set_is_pregenerated(true); +} + + void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { __ Throw(eax); } @@ -4332,7 +4547,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. __ mov(Operand(esp, 2 * kPointerSize), Immediate(ExternalReference::isolate_address())); - __ call(Operand(ebx)); + __ call(ebx); // Result is in eax or edx:eax - do not destroy these registers! if (always_allocate_scope) { @@ -4364,8 +4579,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // should have returned some failure value. if (FLAG_debug_code) { __ push(edx); - __ mov(edx, Operand::StaticVariable( - ExternalReference::the_hole_value_location(masm->isolate()))); + __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); Label okay; __ cmp(edx, Operand::StaticVariable(pending_exception_address)); // Cannot use check here as it attempts to generate call into runtime. @@ -4376,7 +4590,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, } // Exit the JavaScript to C++ exit frame. - __ LeaveExitFrame(save_doubles_); + __ LeaveExitFrame(save_doubles_ == kSaveFPRegs); __ ret(0); // Handling of failure. @@ -4393,10 +4607,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ j(equal, throw_out_of_memory_exception); // Retrieve the pending exception and clear the variable. - ExternalReference the_hole_location = - ExternalReference::the_hole_value_location(masm->isolate()); __ mov(eax, Operand::StaticVariable(pending_exception_address)); - __ mov(edx, Operand::StaticVariable(the_hole_location)); + __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); __ mov(Operand::StaticVariable(pending_exception_address), edx); // Special handling of termination exceptions which are uncatchable @@ -4431,7 +4643,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { // a garbage collection and retrying the builtin (twice). // Enter the exit frame that transitions from JavaScript to C++. - __ EnterExitFrame(save_doubles_); + __ EnterExitFrame(save_doubles_ == kSaveFPRegs); // eax: result parameter for PerformGC, if any (setup below) // ebx: pointer to builtin function (C callee-saved) @@ -4487,7 +4699,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Setup frame. __ push(ebp); - __ mov(ebp, Operand(esp)); + __ mov(ebp, esp); // Push marker in two places. int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; @@ -4531,9 +4743,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); // Clear any pending exceptions. - ExternalReference the_hole_location = - ExternalReference::the_hole_value_location(masm->isolate()); - __ mov(edx, Operand::StaticVariable(the_hole_location)); + __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value())); __ mov(Operand::StaticVariable(pending_exception), edx); // Fake a receiver (NULL). @@ -4555,7 +4765,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { } __ mov(edx, Operand(edx, 0)); // deref address __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); - __ call(Operand(edx)); + __ call(edx); // Unlink this frame from the handler chain. __ PopTryHandler(); @@ -4563,8 +4773,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ bind(&exit); // Check if the current stack frame is marked as the outermost JS frame. __ pop(ebx); - __ cmp(Operand(ebx), - Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); + __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); __ j(not_equal, ¬_outermost_js_2); __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0)); __ bind(¬_outermost_js_2); @@ -4578,7 +4787,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ pop(ebx); __ pop(esi); __ pop(edi); - __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers + __ add(esp, Immediate(2 * kPointerSize)); // remove markers // Restore frame pointer and return. __ pop(ebp); @@ -4694,10 +4903,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset)); Label loop, is_instance, is_not_instance; __ bind(&loop); - __ cmp(scratch, Operand(prototype)); + __ cmp(scratch, prototype); __ j(equal, &is_instance, Label::kNear); Factory* factory = masm->isolate()->factory(); - __ cmp(Operand(scratch), Immediate(factory->null_value())); + __ cmp(scratch, Immediate(factory->null_value())); __ j(equal, &is_not_instance, Label::kNear); __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset)); @@ -4788,13 +4997,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); } else { // Call the builtin and convert 0/1 to true/false. - __ EnterInternalFrame(); - __ push(object); - __ push(function); - __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(object); + __ push(function); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); + } Label true_value, done; - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(zero, &true_value, Label::kNear); __ mov(eax, factory->false_value()); __ jmp(&done, Label::kNear); @@ -4905,22 +5115,24 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { Immediate(masm->isolate()->factory()->empty_string())); __ j(not_equal, &call_runtime_); // Get the first of the two strings and load its instance type. - __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset)); + __ mov(result_, FieldOperand(object_, ConsString::kFirstOffset)); __ jmp(&assure_seq_string, Label::kNear); // SlicedString, unpack and add offset. __ bind(&sliced_string); __ add(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset)); - __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset)); + __ mov(result_, FieldOperand(object_, SlicedString::kParentOffset)); // Assure that we are dealing with a sequential string. Go to runtime if not. __ bind(&assure_seq_string); - __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ mov(result_, FieldOperand(result_, HeapObject::kMapOffset)); __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); STATIC_ASSERT(kSeqStringTag == 0); __ test(result_, Immediate(kStringRepresentationMask)); __ j(not_zero, &call_runtime_); - __ jmp(&flat_string, Label::kNear); + // Actually fetch the parent string if it is confirmed to be sequential. + STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset); + __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset)); // Check for 1-byte or 2-byte string. __ bind(&flat_string); @@ -5110,7 +5322,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { Label second_not_zero_length, both_not_zero_length; __ mov(ecx, FieldOperand(edx, String::kLengthOffset)); STATIC_ASSERT(kSmiTag == 0); - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(not_zero, &second_not_zero_length, Label::kNear); // Second string is empty, result is first string which is already in eax. Counters* counters = masm->isolate()->counters(); @@ -5119,7 +5331,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ bind(&second_not_zero_length); __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); STATIC_ASSERT(kSmiTag == 0); - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(not_zero, &both_not_zero_length, Label::kNear); // First string is empty, result is second string which is in edx. __ mov(eax, edx); @@ -5134,13 +5346,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Look at the length of the result of adding the two strings. Label string_add_flat_result, longer_than_two; __ bind(&both_not_zero_length); - __ add(ebx, Operand(ecx)); + __ add(ebx, ecx); STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength); // Handle exceptionally long strings in the runtime system. __ j(overflow, &string_add_runtime); // Use the symbol table when adding two one character strings, as it // helps later optimizations to return a symbol here. - __ cmp(Operand(ebx), Immediate(Smi::FromInt(2))); + __ cmp(ebx, Immediate(Smi::FromInt(2))); __ j(not_equal, &longer_than_two); // Check that both strings are non-external ascii strings. @@ -5177,7 +5389,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { &string_add_runtime); // Pack both characters in ebx. __ shl(ecx, kBitsPerByte); - __ or_(ebx, Operand(ecx)); + __ or_(ebx, ecx); // Set the characters in the new string. __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx); __ IncrementCounter(counters->string_add_native(), 1); @@ -5185,7 +5397,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ bind(&longer_than_two); // Check if resulting string will be flat. - __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength))); + __ cmp(ebx, Immediate(Smi::FromInt(String::kMinNonFlatLength))); __ j(below, &string_add_flat_result); // If result is not supposed to be flat allocate a cons string object. If both @@ -5195,7 +5407,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset)); __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); - __ and_(ecx, Operand(edi)); + __ and_(ecx, edi); STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ test(ecx, Immediate(kStringEncodingMask)); @@ -5223,7 +5435,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ j(not_zero, &ascii_data); __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); - __ xor_(edi, Operand(ecx)); + __ xor_(edi, ecx); STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); __ and_(edi, kAsciiStringTag | kAsciiDataHintTag); __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag); @@ -5271,12 +5483,12 @@ void StringAddStub::Generate(MacroAssembler* masm) { // eax: result string __ mov(ecx, eax); // Locate first character of result. - __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // Load first argument and locate first character. __ mov(edx, Operand(esp, 2 * kPointerSize)); __ mov(edi, FieldOperand(edx, String::kLengthOffset)); __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // eax: result string // ecx: first character of result // edx: first char of first argument @@ -5286,7 +5498,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ mov(edx, Operand(esp, 1 * kPointerSize)); __ mov(edi, FieldOperand(edx, String::kLengthOffset)); __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // eax: result string // ecx: next character of result // edx: first char of second argument @@ -5310,13 +5522,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { // eax: result string __ mov(ecx, eax); // Locate first character of result. - __ add(Operand(ecx), + __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // Load first argument and locate first character. __ mov(edx, Operand(esp, 2 * kPointerSize)); __ mov(edi, FieldOperand(edx, String::kLengthOffset)); __ SmiUntag(edi); - __ add(Operand(edx), + __ add(edx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // eax: result string // ecx: first character of result @@ -5327,7 +5539,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ mov(edx, Operand(esp, 1 * kPointerSize)); __ mov(edi, FieldOperand(edx, String::kLengthOffset)); __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // eax: result string // ecx: next character of result // edx: first char of second argument @@ -5403,15 +5615,15 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, if (ascii) { __ mov_b(scratch, Operand(src, 0)); __ mov_b(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(1)); - __ add(Operand(dest), Immediate(1)); + __ add(src, Immediate(1)); + __ add(dest, Immediate(1)); } else { __ mov_w(scratch, Operand(src, 0)); __ mov_w(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(2)); - __ add(Operand(dest), Immediate(2)); + __ add(src, Immediate(2)); + __ add(dest, Immediate(2)); } - __ sub(Operand(count), Immediate(1)); + __ sub(count, Immediate(1)); __ j(not_zero, &loop); } @@ -5434,7 +5646,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, // Nothing to do for zero characters. Label done; - __ test(count, Operand(count)); + __ test(count, count); __ j(zero, &done); // Make count the number of bytes to copy. @@ -5459,7 +5671,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, // Check if there are more bytes to copy. __ bind(&last_bytes); - __ test(count, Operand(count)); + __ test(count, count); __ j(zero, &done); // Copy remaining characters. @@ -5467,9 +5679,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, __ bind(&loop); __ mov_b(scratch, Operand(src, 0)); __ mov_b(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(1)); - __ add(Operand(dest), Immediate(1)); - __ sub(Operand(count), Immediate(1)); + __ add(src, Immediate(1)); + __ add(dest, Immediate(1)); + __ sub(count, Immediate(1)); __ j(not_zero, &loop); __ bind(&done); @@ -5491,12 +5703,12 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // different hash algorithm. Don't try to look for these in the symbol table. Label not_array_index; __ mov(scratch, c1); - __ sub(Operand(scratch), Immediate(static_cast<int>('0'))); - __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0'))); + __ sub(scratch, Immediate(static_cast<int>('0'))); + __ cmp(scratch, Immediate(static_cast<int>('9' - '0'))); __ j(above, ¬_array_index, Label::kNear); __ mov(scratch, c2); - __ sub(Operand(scratch), Immediate(static_cast<int>('0'))); - __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0'))); + __ sub(scratch, Immediate(static_cast<int>('0'))); + __ cmp(scratch, Immediate(static_cast<int>('9' - '0'))); __ j(below_equal, not_probed); __ bind(¬_array_index); @@ -5509,7 +5721,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Collect the two characters in a register. Register chars = c1; __ shl(c2, kBitsPerByte); - __ or_(chars, Operand(c2)); + __ or_(chars, c2); // chars: two character string, char 1 in byte 0 and char 2 in byte 1. // hash: hash of two character string. @@ -5526,7 +5738,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, Register mask = scratch2; __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset)); __ SmiUntag(mask); - __ sub(Operand(mask), Immediate(1)); + __ sub(mask, Immediate(1)); // Registers // chars: two character string, char 1 in byte 0 and char 2 in byte 1. @@ -5543,9 +5755,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Calculate entry in symbol table. __ mov(scratch, hash); if (i > 0) { - __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i))); + __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i))); } - __ and_(scratch, Operand(mask)); + __ and_(scratch, mask); // Load the entry from the symbol table. Register candidate = scratch; // Scratch register contains candidate. @@ -5582,7 +5794,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Check if the two characters match. __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize)); __ and_(temp, 0x0000ffff); - __ cmp(chars, Operand(temp)); + __ cmp(chars, temp); __ j(equal, &found_in_symbol_table); __ bind(&next_probe_pop_mask[i]); __ pop(mask); @@ -5609,11 +5821,11 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm, // hash = character + (character << 10); __ mov(hash, character); __ shl(hash, 10); - __ add(hash, Operand(character)); + __ add(hash, character); // hash ^= hash >> 6; __ mov(scratch, hash); __ sar(scratch, 6); - __ xor_(hash, Operand(scratch)); + __ xor_(hash, scratch); } @@ -5622,15 +5834,15 @@ void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, Register character, Register scratch) { // hash += character; - __ add(hash, Operand(character)); + __ add(hash, character); // hash += hash << 10; __ mov(scratch, hash); __ shl(scratch, 10); - __ add(hash, Operand(scratch)); + __ add(hash, scratch); // hash ^= hash >> 6; __ mov(scratch, hash); __ sar(scratch, 6); - __ xor_(hash, Operand(scratch)); + __ xor_(hash, scratch); } @@ -5640,19 +5852,19 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm, // hash += hash << 3; __ mov(scratch, hash); __ shl(scratch, 3); - __ add(hash, Operand(scratch)); + __ add(hash, scratch); // hash ^= hash >> 11; __ mov(scratch, hash); __ sar(scratch, 11); - __ xor_(hash, Operand(scratch)); + __ xor_(hash, scratch); // hash += hash << 15; __ mov(scratch, hash); __ shl(scratch, 15); - __ add(hash, Operand(scratch)); + __ add(hash, scratch); // if (hash == 0) hash = 27; Label hash_not_zero; - __ test(hash, Operand(hash)); + __ test(hash, hash); __ j(not_zero, &hash_not_zero, Label::kNear); __ mov(hash, Immediate(27)); __ bind(&hash_not_zero); @@ -5684,7 +5896,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ JumpIfNotSmi(ecx, &runtime); __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index. __ JumpIfNotSmi(edx, &runtime); - __ sub(ecx, Operand(edx)); + __ sub(ecx, edx); __ cmp(ecx, FieldOperand(eax, String::kLengthOffset)); Label return_eax; __ j(equal, &return_eax); @@ -5816,13 +6028,13 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ mov(edx, esi); // esi used by following code. // Locate first character of result. __ mov(edi, eax); - __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // Load string argument and locate character of sub string start. __ mov(esi, Operand(esp, 3 * kPointerSize)); - __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from __ SmiUntag(ebx); - __ add(esi, Operand(ebx)); + __ add(esi, ebx); // eax: result string // ecx: result length @@ -5851,18 +6063,17 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ mov(edx, esi); // esi used by following code. // Locate first character of result. __ mov(edi, eax); - __ add(Operand(edi), + __ add(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // Load string argument and locate character of sub string start. __ mov(esi, Operand(esp, 3 * kPointerSize)); - __ add(Operand(esi), - Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from // As from is a smi it is 2 times the value which matches the size of a two // byte character. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(esi, Operand(ebx)); + __ add(esi, ebx); // eax: result string // ecx: result length @@ -5902,7 +6113,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, Label compare_chars; __ bind(&check_zero_length); STATIC_ASSERT(kSmiTag == 0); - __ test(length, Operand(length)); + __ test(length, length); __ j(not_zero, &compare_chars, Label::kNear); __ Set(eax, Immediate(Smi::FromInt(EQUAL))); __ ret(0); @@ -5937,14 +6148,14 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, __ j(less_equal, &left_shorter, Label::kNear); // Right string is shorter. Change scratch1 to be length of right string. - __ sub(scratch1, Operand(length_delta)); + __ sub(scratch1, length_delta); __ bind(&left_shorter); Register min_length = scratch1; // If either length is zero, just compare lengths. Label compare_lengths; - __ test(min_length, Operand(min_length)); + __ test(min_length, min_length); __ j(zero, &compare_lengths, Label::kNear); // Compare characters. @@ -5954,7 +6165,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, // Compare lengths - strings up to min-length are equal. __ bind(&compare_lengths); - __ test(length_delta, Operand(length_delta)); + __ test(length_delta, length_delta); __ j(not_zero, &result_not_equal, Label::kNear); // Result is EQUAL. @@ -6003,7 +6214,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( __ mov_b(scratch, Operand(left, index, times_1, 0)); __ cmpb(scratch, Operand(right, index, times_1, 0)); __ j(not_equal, chars_not_equal, chars_not_equal_near); - __ add(Operand(index), Immediate(1)); + __ add(index, Immediate(1)); __ j(not_zero, &loop); } @@ -6020,7 +6231,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { __ mov(eax, Operand(esp, 1 * kPointerSize)); // right Label not_same; - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); __ j(not_equal, ¬_same, Label::kNear); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); @@ -6036,7 +6247,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { // Compare flat ascii strings. // Drop arguments from the stack. __ pop(ecx); - __ add(Operand(esp), Immediate(2 * kPointerSize)); + __ add(esp, Immediate(2 * kPointerSize)); __ push(ecx); GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi); @@ -6050,16 +6261,16 @@ void StringCompareStub::Generate(MacroAssembler* masm) { void ICCompareStub::GenerateSmis(MacroAssembler* masm) { ASSERT(state_ == CompareIC::SMIS); Label miss; - __ mov(ecx, Operand(edx)); - __ or_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ or_(ecx, eax); __ JumpIfNotSmi(ecx, &miss, Label::kNear); if (GetCondition() == equal) { // For equality we do not care about the sign of the result. - __ sub(eax, Operand(edx)); + __ sub(eax, edx); } else { Label done; - __ sub(edx, Operand(eax)); + __ sub(edx, eax); __ j(no_overflow, &done, Label::kNear); // Correct sign of result in case of overflow. __ not_(edx); @@ -6079,8 +6290,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { Label generic_stub; Label unordered; Label miss; - __ mov(ecx, Operand(edx)); - __ and_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ and_(ecx, eax); __ JumpIfSmi(ecx, &generic_stub, Label::kNear); __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx); @@ -6108,9 +6319,9 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { // Performing mov, because xor would destroy the flag register. __ mov(eax, 0); // equal __ mov(ecx, Immediate(Smi::FromInt(1))); - __ cmov(above, eax, Operand(ecx)); + __ cmov(above, eax, ecx); __ mov(ecx, Immediate(Smi::FromInt(-1))); - __ cmov(below, eax, Operand(ecx)); + __ cmov(below, eax, ecx); __ ret(0); __ bind(&unordered); @@ -6137,9 +6348,9 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { // Check that both operands are heap objects. Label miss; - __ mov(tmp1, Operand(left)); + __ mov(tmp1, left); STATIC_ASSERT(kSmiTag == 0); - __ and_(tmp1, Operand(right)); + __ and_(tmp1, right); __ JumpIfSmi(tmp1, &miss, Label::kNear); // Check that both operands are symbols. @@ -6148,13 +6359,13 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); STATIC_ASSERT(kSymbolTag != 0); - __ and_(tmp1, Operand(tmp2)); + __ and_(tmp1, tmp2); __ test(tmp1, Immediate(kIsSymbolMask)); __ j(zero, &miss, Label::kNear); // Symbols are compared by identity. Label done; - __ cmp(left, Operand(right)); + __ cmp(left, right); // Make sure eax is non-zero. At this point input operands are // guaranteed to be non-zero. ASSERT(right.is(eax)); @@ -6183,9 +6394,9 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { Register tmp3 = edi; // Check that both operands are heap objects. - __ mov(tmp1, Operand(left)); + __ mov(tmp1, left); STATIC_ASSERT(kSmiTag == 0); - __ and_(tmp1, Operand(right)); + __ and_(tmp1, right); __ JumpIfSmi(tmp1, &miss); // Check that both operands are strings. This leaves the instance @@ -6196,13 +6407,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); __ mov(tmp3, tmp1); STATIC_ASSERT(kNotStringTag != 0); - __ or_(tmp3, Operand(tmp2)); + __ or_(tmp3, tmp2); __ test(tmp3, Immediate(kIsNotStringMask)); __ j(not_zero, &miss); // Fast check for identical strings. Label not_same; - __ cmp(left, Operand(right)); + __ cmp(left, right); __ j(not_equal, ¬_same, Label::kNear); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); @@ -6216,7 +6427,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { // because we already know they are not identical. Label do_compare; STATIC_ASSERT(kSymbolTag != 0); - __ and_(tmp1, Operand(tmp2)); + __ and_(tmp1, tmp2); __ test(tmp1, Immediate(kIsSymbolMask)); __ j(zero, &do_compare, Label::kNear); // Make sure eax is non-zero. At this point input operands are @@ -6249,8 +6460,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { void ICCompareStub::GenerateObjects(MacroAssembler* masm) { ASSERT(state_ == CompareIC::OBJECTS); Label miss; - __ mov(ecx, Operand(edx)); - __ and_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ and_(ecx, eax); __ JumpIfSmi(ecx, &miss, Label::kNear); __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx); @@ -6259,7 +6470,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) { __ j(not_equal, &miss, Label::kNear); ASSERT(GetCondition() == equal); - __ sub(eax, Operand(edx)); + __ sub(eax, edx); __ ret(0); __ bind(&miss); @@ -6274,15 +6485,16 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { __ push(eax); __ push(ecx); - // Call the runtime system in a fresh internal frame. - ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), - masm->isolate()); - __ EnterInternalFrame(); - __ push(edx); - __ push(eax); - __ push(Immediate(Smi::FromInt(op_))); - __ CallExternalReference(miss, 3); - __ LeaveInternalFrame(); + { + // Call the runtime system in a fresh internal frame. + ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), + masm->isolate()); + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(edx); + __ push(eax); + __ push(Immediate(Smi::FromInt(op_))); + __ CallExternalReference(miss, 3); + } // Compute the entry point of the rewritten stub. __ lea(edi, FieldOperand(eax, Code::kHeaderSize)); @@ -6294,7 +6506,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { __ push(ecx); // Do a tail call to the rewritten stub. - __ jmp(Operand(edi)); + __ jmp(edi); } @@ -6323,8 +6535,8 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup( // Capacity is smi 2^n. __ mov(index, FieldOperand(properties, kCapacityOffset)); __ dec(index); - __ and_(Operand(index), - Immediate(Smi::FromInt(name->Hash() + + __ and_(index, + Immediate(Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i)))); // Scale the index by multiplying by the entry size. @@ -6357,7 +6569,7 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup( __ push(Immediate(name->Hash())); MaybeObject* result = masm->TryCallStub(&stub); if (result->IsFailure()) return result; - __ test(r0, Operand(r0)); + __ test(r0, r0); __ j(not_zero, miss); __ jmp(done); return result; @@ -6390,9 +6602,9 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, __ mov(r0, FieldOperand(name, String::kHashFieldOffset)); __ shr(r0, String::kHashShift); if (i > 0) { - __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i))); + __ add(r0, Immediate(StringDictionary::GetProbeOffset(i))); } - __ and_(r0, Operand(r1)); + __ and_(r0, r1); // Scale the index by multiplying by the entry size. ASSERT(StringDictionary::kEntrySize == 3); @@ -6416,13 +6628,15 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, __ push(r0); __ CallStub(&stub); - __ test(r1, Operand(r1)); + __ test(r1, r1); __ j(zero, miss); __ jmp(done); } void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. // Stack frame on entry: // esp[0 * kPointerSize]: return address. // esp[1 * kPointerSize]: key's hash. @@ -6453,8 +6667,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { // Compute the masked index: (hash + i + i * i) & mask. __ mov(scratch, Operand(esp, 2 * kPointerSize)); if (i > 0) { - __ add(Operand(scratch), - Immediate(StringDictionary::GetProbeOffset(i))); + __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i))); } __ and_(scratch, Operand(esp, 0)); @@ -6510,6 +6723,275 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { } +struct AheadOfTimeWriteBarrierStubList { + Register object, value, address; + RememberedSetAction action; +}; + + +struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { + // Used in RegExpExecStub. + { ebx, eax, edi, EMIT_REMEMBERED_SET }, + // Used in CompileArrayPushCall. + { ebx, ecx, edx, EMIT_REMEMBERED_SET }, + { ebx, edi, edx, OMIT_REMEMBERED_SET }, + // Used in CompileStoreGlobal and CallFunctionStub. + { ebx, ecx, edx, OMIT_REMEMBERED_SET }, + // Used in StoreStubCompiler::CompileStoreField and + // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. + { edx, ecx, ebx, EMIT_REMEMBERED_SET }, + // GenerateStoreField calls the stub with two different permutations of + // registers. This is the second. + { ebx, ecx, edx, EMIT_REMEMBERED_SET }, + // StoreIC::GenerateNormal via GenerateDictionaryStore + { ebx, edi, edx, EMIT_REMEMBERED_SET }, + // KeyedStoreIC::GenerateGeneric. + { ebx, edx, ecx, EMIT_REMEMBERED_SET}, + // KeyedStoreStubCompiler::GenerateStoreFastElement. + { edi, edx, ecx, EMIT_REMEMBERED_SET}, + // Null termination. + { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET} +}; + + +bool RecordWriteStub::IsPregenerated() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + if (object_.is(entry->object) && + value_.is(entry->value) && + address_.is(entry->address) && + remembered_set_action_ == entry->action && + save_fp_regs_mode_ == kDontSaveFPRegs) { + return true; + } + } + return false; +} + + +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { + StoreBufferOverflowStub stub1(kDontSaveFPRegs); + stub1.GetCode()->set_is_pregenerated(true); + + CpuFeatures::TryForceFeatureScope scope(SSE2); + if (CpuFeatures::IsSupported(SSE2)) { + StoreBufferOverflowStub stub2(kSaveFPRegs); + stub2.GetCode()->set_is_pregenerated(true); + } +} + + +void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + RecordWriteStub stub(entry->object, + entry->value, + entry->address, + entry->action, + kDontSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + } +} + + +// Takes the input in 3 registers: address_ value_ and object_. A pointer to +// the value has just been written into the object, now this stub makes sure +// we keep the GC informed. The word in the object where the value has been +// written is in the address register. +void RecordWriteStub::Generate(MacroAssembler* masm) { + Label skip_to_incremental_noncompacting; + Label skip_to_incremental_compacting; + + // The first two instructions are generated with labels so as to get the + // offset fixed up correctly by the bind(Label*) call. We patch it back and + // forth between a compare instructions (a nop in this position) and the + // real branch when we start and stop incremental heap marking. + __ jmp(&skip_to_incremental_noncompacting, Label::kNear); + __ jmp(&skip_to_incremental_compacting, Label::kFar); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ ret(0); + } + + __ bind(&skip_to_incremental_noncompacting); + GenerateIncremental(masm, INCREMENTAL); + + __ bind(&skip_to_incremental_compacting); + GenerateIncremental(masm, INCREMENTAL_COMPACTION); + + // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. + // Will be checked in IncrementalMarking::ActivateGeneratedStub. + masm->set_byte_at(0, kTwoByteNopInstruction); + masm->set_byte_at(2, kFiveByteNopInstruction); +} + + +void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { + regs_.Save(masm); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + Label dont_need_remembered_set; + + __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); + __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. + regs_.scratch0(), + &dont_need_remembered_set); + + __ CheckPageFlag(regs_.object(), + regs_.scratch0(), + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + not_zero, + &dont_need_remembered_set); + + // First notify the incremental marker if necessary, then update the + // remembered set. + CheckNeedsToInformIncrementalMarker( + masm, + kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, + mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + + __ bind(&dont_need_remembered_set); + } + + CheckNeedsToInformIncrementalMarker( + masm, + kReturnOnNoNeedToInformIncrementalMarker, + mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ ret(0); +} + + +void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { + regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); + int argument_count = 3; + __ PrepareCallCFunction(argument_count, regs_.scratch0()); + __ mov(Operand(esp, 0 * kPointerSize), regs_.object()); + if (mode == INCREMENTAL_COMPACTION) { + __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot. + } else { + ASSERT(mode == INCREMENTAL); + __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); + __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value. + } + __ mov(Operand(esp, 2 * kPointerSize), + Immediate(ExternalReference::isolate_address())); + + AllowExternalCallThatCantCauseGC scope(masm); + if (mode == INCREMENTAL_COMPACTION) { + __ CallCFunction( + ExternalReference::incremental_evacuation_record_write_function( + masm->isolate()), + argument_count); + } else { + ASSERT(mode == INCREMENTAL); + __ CallCFunction( + ExternalReference::incremental_marking_record_write_function( + masm->isolate()), + argument_count); + } + regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); +} + + +void RecordWriteStub::CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode) { + Label object_is_black, need_incremental, need_incremental_pop_object; + + // Let's look at the color of the object: If it is not black we don't have + // to inform the incremental marker. + __ JumpIfBlack(regs_.object(), + regs_.scratch0(), + regs_.scratch1(), + &object_is_black, + Label::kNear); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ ret(0); + } + + __ bind(&object_is_black); + + // Get the value from the slot. + __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); + + if (mode == INCREMENTAL_COMPACTION) { + Label ensure_not_white; + + __ CheckPageFlag(regs_.scratch0(), // Contains value. + regs_.scratch1(), // Scratch. + MemoryChunk::kEvacuationCandidateMask, + zero, + &ensure_not_white, + Label::kNear); + + __ CheckPageFlag(regs_.object(), + regs_.scratch1(), // Scratch. + MemoryChunk::kSkipEvacuationSlotsRecordingMask, + not_zero, + &ensure_not_white, + Label::kNear); + + __ jmp(&need_incremental); + + __ bind(&ensure_not_white); + } + + // We need an extra register for this, so we push the object register + // temporarily. + __ push(regs_.object()); + __ EnsureNotWhite(regs_.scratch0(), // The value. + regs_.scratch1(), // Scratch. + regs_.object(), // Scratch. + &need_incremental_pop_object, + Label::kNear); + __ pop(regs_.object()); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ ret(0); + } + + __ bind(&need_incremental_pop_object); + __ pop(regs_.object()); + + __ bind(&need_incremental); + + // Fall through when we need to inform the incremental marker. +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h index fa255da1f..2a7d316f4 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.h +++ b/deps/v8/src/ia32/code-stubs-ia32.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -60,6 +60,25 @@ class TranscendentalCacheStub: public CodeStub { }; +class StoreBufferOverflowStub: public CodeStub { + public: + explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) + : save_doubles_(save_fp) { } + + void Generate(MacroAssembler* masm); + + virtual bool IsPregenerated() { return true; } + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + private: + SaveFPRegsMode save_doubles_; + + Major MajorKey() { return StoreBufferOverflow; } + int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } +}; + + class UnaryOpStub: public CodeStub { public: UnaryOpStub(Token::Value op, @@ -418,6 +437,8 @@ class StringDictionaryLookupStub: public CodeStub { Register r0, Register r1); + virtual bool SometimesSetsUpAFrame() { return false; } + private: static const int kInlinedProbes = 4; static const int kTotalProbes = 20; @@ -430,7 +451,7 @@ class StringDictionaryLookupStub: public CodeStub { StringDictionary::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return StringDictionaryNegativeLookup; } + Major MajorKey() { return StringDictionaryLookup; } int MinorKey() { return DictionaryBits::encode(dictionary_.code()) | @@ -451,6 +472,272 @@ class StringDictionaryLookupStub: public CodeStub { }; +class RecordWriteStub: public CodeStub { + public: + RecordWriteStub(Register object, + Register value, + Register address, + RememberedSetAction remembered_set_action, + SaveFPRegsMode fp_mode) + : object_(object), + value_(value), + address_(address), + remembered_set_action_(remembered_set_action), + save_fp_regs_mode_(fp_mode), + regs_(object, // An input reg. + address, // An input reg. + value) { // One scratch reg. + } + + enum Mode { + STORE_BUFFER_ONLY, + INCREMENTAL, + INCREMENTAL_COMPACTION + }; + + virtual bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8. + static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8. + + static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32. + static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32. + + static Mode GetMode(Code* stub) { + byte first_instruction = stub->instruction_start()[0]; + byte second_instruction = stub->instruction_start()[2]; + + if (first_instruction == kTwoByteJumpInstruction) { + return INCREMENTAL; + } + + ASSERT(first_instruction == kTwoByteNopInstruction); + + if (second_instruction == kFiveByteJumpInstruction) { + return INCREMENTAL_COMPACTION; + } + + ASSERT(second_instruction == kFiveByteNopInstruction); + + return STORE_BUFFER_ONLY; + } + + static void Patch(Code* stub, Mode mode) { + switch (mode) { + case STORE_BUFFER_ONLY: + ASSERT(GetMode(stub) == INCREMENTAL || + GetMode(stub) == INCREMENTAL_COMPACTION); + stub->instruction_start()[0] = kTwoByteNopInstruction; + stub->instruction_start()[2] = kFiveByteNopInstruction; + break; + case INCREMENTAL: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + stub->instruction_start()[0] = kTwoByteJumpInstruction; + break; + case INCREMENTAL_COMPACTION: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + stub->instruction_start()[0] = kTwoByteNopInstruction; + stub->instruction_start()[2] = kFiveByteJumpInstruction; + break; + } + ASSERT(GetMode(stub) == mode); + CPU::FlushICache(stub->instruction_start(), 7); + } + + private: + // This is a helper class for freeing up 3 scratch registers, where the third + // is always ecx (needed for shift operations). The input is two registers + // that must be preserved and one scratch register provided by the caller. + class RegisterAllocation { + public: + RegisterAllocation(Register object, + Register address, + Register scratch0) + : object_orig_(object), + address_orig_(address), + scratch0_orig_(scratch0), + object_(object), + address_(address), + scratch0_(scratch0) { + ASSERT(!AreAliased(scratch0, object, address, no_reg)); + scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_); + if (scratch0.is(ecx)) { + scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_); + } + if (object.is(ecx)) { + object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_); + } + if (address.is(ecx)) { + address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_); + } + ASSERT(!AreAliased(scratch0_, object_, address_, ecx)); + } + + void Save(MacroAssembler* masm) { + ASSERT(!address_orig_.is(object_)); + ASSERT(object_.is(object_orig_) || address_.is(address_orig_)); + ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); + ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_)); + ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_)); + // We don't have to save scratch0_orig_ because it was given to us as + // a scratch register. But if we had to switch to a different reg then + // we should save the new scratch0_. + if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_); + if (!ecx.is(scratch0_orig_) && + !ecx.is(object_orig_) && + !ecx.is(address_orig_)) { + masm->push(ecx); + } + masm->push(scratch1_); + if (!address_.is(address_orig_)) { + masm->push(address_); + masm->mov(address_, address_orig_); + } + if (!object_.is(object_orig_)) { + masm->push(object_); + masm->mov(object_, object_orig_); + } + } + + void Restore(MacroAssembler* masm) { + // These will have been preserved the entire time, so we just need to move + // them back. Only in one case is the orig_ reg different from the plain + // one, since only one of them can alias with ecx. + if (!object_.is(object_orig_)) { + masm->mov(object_orig_, object_); + masm->pop(object_); + } + if (!address_.is(address_orig_)) { + masm->mov(address_orig_, address_); + masm->pop(address_); + } + masm->pop(scratch1_); + if (!ecx.is(scratch0_orig_) && + !ecx.is(object_orig_) && + !ecx.is(address_orig_)) { + masm->pop(ecx); + } + if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_); + } + + // If we have to call into C then we need to save and restore all caller- + // saved registers that were not already preserved. The caller saved + // registers are eax, ecx and edx. The three scratch registers (incl. ecx) + // will be restored by other means so we don't bother pushing them here. + void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { + if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax); + if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx); + if (mode == kSaveFPRegs) { + CpuFeatures::Scope scope(SSE2); + masm->sub(esp, + Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); + // Save all XMM registers except XMM0. + for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) { + XMMRegister reg = XMMRegister::from_code(i); + masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg); + } + } + } + + inline void RestoreCallerSaveRegisters(MacroAssembler*masm, + SaveFPRegsMode mode) { + if (mode == kSaveFPRegs) { + CpuFeatures::Scope scope(SSE2); + // Restore all XMM registers except XMM0. + for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) { + XMMRegister reg = XMMRegister::from_code(i); + masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize)); + } + masm->add(esp, + Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); + } + if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx); + if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax); + } + + inline Register object() { return object_; } + inline Register address() { return address_; } + inline Register scratch0() { return scratch0_; } + inline Register scratch1() { return scratch1_; } + + private: + Register object_orig_; + Register address_orig_; + Register scratch0_orig_; + Register object_; + Register address_; + Register scratch0_; + Register scratch1_; + // Third scratch register is always ecx. + + Register GetRegThatIsNotEcxOr(Register r1, + Register r2, + Register r3) { + for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { + Register candidate = Register::FromAllocationIndex(i); + if (candidate.is(ecx)) continue; + if (candidate.is(r1)) continue; + if (candidate.is(r2)) continue; + if (candidate.is(r3)) continue; + return candidate; + } + UNREACHABLE(); + return no_reg; + } + friend class RecordWriteStub; + }; + + enum OnNoNeedToInformIncrementalMarker { + kReturnOnNoNeedToInformIncrementalMarker, + kUpdateRememberedSetOnNoNeedToInformIncrementalMarker + } +; + void Generate(MacroAssembler* masm); + void GenerateIncremental(MacroAssembler* masm, Mode mode); + void CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode); + void InformIncrementalMarker(MacroAssembler* masm, Mode mode); + + Major MajorKey() { return RecordWrite; } + + int MinorKey() { + return ObjectBits::encode(object_.code()) | + ValueBits::encode(value_.code()) | + AddressBits::encode(address_.code()) | + RememberedSetActionBits::encode(remembered_set_action_) | + SaveFPRegsModeBits::encode(save_fp_regs_mode_); + } + + bool MustBeInStubCache() { + // All stubs must be registered in the stub cache + // otherwise IncrementalMarker would not be able to find + // and patch it. + return true; + } + + void Activate(Code* code) { + code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); + } + + class ObjectBits: public BitField<int, 0, 3> {}; + class ValueBits: public BitField<int, 3, 3> {}; + class AddressBits: public BitField<int, 6, 3> {}; + class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {}; + class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {}; + + Register object_; + Register value_; + Register address_; + RememberedSetAction remembered_set_action_; + SaveFPRegsMode save_fp_regs_mode_; + RegisterAllocation regs_; +}; + + } } // namespace v8::internal #endif // V8_IA32_CODE_STUBS_IA32_H_ diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 3a657bd54..f901b6f88 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -39,12 +39,16 @@ namespace internal { // Platform-specific RuntimeCallHelper functions. void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { - masm->EnterInternalFrame(); + masm->EnterFrame(StackFrame::INTERNAL); + ASSERT(!masm->has_frame()); + masm->set_has_frame(true); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { - masm->LeaveInternalFrame(); + masm->LeaveFrame(StackFrame::INTERNAL); + ASSERT(masm->has_frame()); + masm->set_has_frame(false); } @@ -108,14 +112,14 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ mov(edx, dst); __ and_(edx, 0xF); __ neg(edx); - __ add(Operand(edx), Immediate(16)); - __ add(dst, Operand(edx)); - __ add(src, Operand(edx)); - __ sub(Operand(count), edx); + __ add(edx, Immediate(16)); + __ add(dst, edx); + __ add(src, edx); + __ sub(count, edx); // edi is now aligned. Check if esi is also aligned. Label unaligned_source; - __ test(Operand(src), Immediate(0x0F)); + __ test(src, Immediate(0x0F)); __ j(not_zero, &unaligned_source); { // Copy loop for aligned source and destination. @@ -130,11 +134,11 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ prefetch(Operand(src, 0x20), 1); __ movdqa(xmm0, Operand(src, 0x00)); __ movdqa(xmm1, Operand(src, 0x10)); - __ add(Operand(src), Immediate(0x20)); + __ add(src, Immediate(0x20)); __ movdqa(Operand(dst, 0x00), xmm0); __ movdqa(Operand(dst, 0x10), xmm1); - __ add(Operand(dst), Immediate(0x20)); + __ add(dst, Immediate(0x20)); __ dec(loop_count); __ j(not_zero, &loop); @@ -142,12 +146,12 @@ OS::MemCopyFunction CreateMemCopyFunction() { // At most 31 bytes to copy. Label move_less_16; - __ test(Operand(count), Immediate(0x10)); + __ test(count, Immediate(0x10)); __ j(zero, &move_less_16); __ movdqa(xmm0, Operand(src, 0)); - __ add(Operand(src), Immediate(0x10)); + __ add(src, Immediate(0x10)); __ movdqa(Operand(dst, 0), xmm0); - __ add(Operand(dst), Immediate(0x10)); + __ add(dst, Immediate(0x10)); __ bind(&move_less_16); // At most 15 bytes to copy. Copy 16 bytes at end of string. @@ -176,11 +180,11 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ prefetch(Operand(src, 0x20), 1); __ movdqu(xmm0, Operand(src, 0x00)); __ movdqu(xmm1, Operand(src, 0x10)); - __ add(Operand(src), Immediate(0x20)); + __ add(src, Immediate(0x20)); __ movdqa(Operand(dst, 0x00), xmm0); __ movdqa(Operand(dst, 0x10), xmm1); - __ add(Operand(dst), Immediate(0x20)); + __ add(dst, Immediate(0x20)); __ dec(loop_count); __ j(not_zero, &loop); @@ -188,12 +192,12 @@ OS::MemCopyFunction CreateMemCopyFunction() { // At most 31 bytes to copy. Label move_less_16; - __ test(Operand(count), Immediate(0x10)); + __ test(count, Immediate(0x10)); __ j(zero, &move_less_16); __ movdqu(xmm0, Operand(src, 0)); - __ add(Operand(src), Immediate(0x10)); + __ add(src, Immediate(0x10)); __ movdqa(Operand(dst, 0), xmm0); - __ add(Operand(dst), Immediate(0x10)); + __ add(dst, Immediate(0x10)); __ bind(&move_less_16); // At most 15 bytes to copy. Copy 16 bytes at end of string. @@ -228,10 +232,10 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ mov(edx, dst); __ and_(edx, 0x03); __ neg(edx); - __ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3) - __ add(dst, Operand(edx)); - __ add(src, Operand(edx)); - __ sub(Operand(count), edx); + __ add(edx, Immediate(4)); // edx = 4 - (dst & 3) + __ add(dst, edx); + __ add(src, edx); + __ sub(count, edx); // edi is now aligned, ecx holds number of remaning bytes to copy. __ mov(edx, count); diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc index 238994886..d7184ed20 100644 --- a/deps/v8/src/ia32/debug-ia32.cc +++ b/deps/v8/src/ia32/debug-ia32.cc @@ -100,63 +100,64 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, RegList non_object_regs, bool convert_call_to_jmp) { // Enter an internal frame. - __ EnterInternalFrame(); - - // Store the registers containing live values on the expression stack to - // make sure that these are correctly updated during GC. Non object values - // are stored as a smi causing it to be untouched by GC. - ASSERT((object_regs & ~kJSCallerSaved) == 0); - ASSERT((non_object_regs & ~kJSCallerSaved) == 0); - ASSERT((object_regs & non_object_regs) == 0); - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if ((object_regs & (1 << r)) != 0) { - __ push(reg); - } - if ((non_object_regs & (1 << r)) != 0) { - if (FLAG_debug_code) { - __ test(reg, Immediate(0xc0000000)); - __ Assert(zero, "Unable to encode value as smi"); + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Store the registers containing live values on the expression stack to + // make sure that these are correctly updated during GC. Non object values + // are stored as a smi causing it to be untouched by GC. + ASSERT((object_regs & ~kJSCallerSaved) == 0); + ASSERT((non_object_regs & ~kJSCallerSaved) == 0); + ASSERT((object_regs & non_object_regs) == 0); + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((object_regs & (1 << r)) != 0) { + __ push(reg); + } + if ((non_object_regs & (1 << r)) != 0) { + if (FLAG_debug_code) { + __ test(reg, Immediate(0xc0000000)); + __ Assert(zero, "Unable to encode value as smi"); + } + __ SmiTag(reg); + __ push(reg); } - __ SmiTag(reg); - __ push(reg); } - } #ifdef DEBUG - __ RecordComment("// Calling from debug break to runtime - come in - over"); + __ RecordComment("// Calling from debug break to runtime - come in - over"); #endif - __ Set(eax, Immediate(0)); // No arguments. - __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate()))); - - CEntryStub ceb(1); - __ CallStub(&ceb); - - // Restore the register values containing object pointers from the expression - // stack. - for (int i = kNumJSCallerSaved; --i >= 0;) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if (FLAG_debug_code) { - __ Set(reg, Immediate(kDebugZapValue)); - } - if ((object_regs & (1 << r)) != 0) { - __ pop(reg); - } - if ((non_object_regs & (1 << r)) != 0) { - __ pop(reg); - __ SmiUntag(reg); + __ Set(eax, Immediate(0)); // No arguments. + __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate()))); + + CEntryStub ceb(1); + __ CallStub(&ceb); + + // Restore the register values containing object pointers from the + // expression stack. + for (int i = kNumJSCallerSaved; --i >= 0;) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if (FLAG_debug_code) { + __ Set(reg, Immediate(kDebugZapValue)); + } + if ((object_regs & (1 << r)) != 0) { + __ pop(reg); + } + if ((non_object_regs & (1 << r)) != 0) { + __ pop(reg); + __ SmiUntag(reg); + } } - } - // Get rid of the internal frame. - __ LeaveInternalFrame(); + // Get rid of the internal frame. + } // If this call did not replace a call but patched other code then there will // be an unwanted return address left on the stack. Here we get rid of that. if (convert_call_to_jmp) { - __ add(Operand(esp), Immediate(kPointerSize)); + __ add(esp, Immediate(kPointerSize)); } // Now that the break point has been handled, resume normal execution by @@ -298,7 +299,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); // Re-run JSFunction, edi is function, esi is context. - __ jmp(Operand(edx)); + __ jmp(edx); } const bool Debug::kFrameDropperSupported = true; diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc index e23f3e9ef..02cc4ebd3 100644 --- a/deps/v8/src/ia32/deoptimizer-ia32.cc +++ b/deps/v8/src/ia32/deoptimizer-ia32.cc @@ -116,7 +116,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { new_reloc->GetDataStartAddress() + padding, 0); intptr_t comment_string = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString); - RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string); + RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL); for (int i = 0; i < additional_comments; ++i) { #ifdef DEBUG byte* pos_before = reloc_info_writer.pos(); @@ -174,7 +174,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { // We use RUNTIME_ENTRY for deoptimization bailouts. RelocInfo rinfo(curr_address + 1, // 1 after the call opcode. RelocInfo::RUNTIME_ENTRY, - reinterpret_cast<intptr_t>(deopt_entry)); + reinterpret_cast<intptr_t>(deopt_entry), + NULL); reloc_info_writer.Write(&rinfo); ASSERT_GE(reloc_info_writer.pos(), reloc_info->address() + ByteArray::kHeaderSize); @@ -205,6 +206,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { node->set_next(data->deoptimizing_code_list_); data->deoptimizing_code_list_ = node; + // We might be in the middle of incremental marking with compaction. + // Tell collector to treat this code object in a special way and + // ignore all slots that might have been recorded on it. + isolate->heap()->mark_compact_collector()->InvalidateCode(code); + // Set the code for the function to non-optimized version. function->ReplaceCode(function->shared()->code()); @@ -221,7 +227,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } -void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, +void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, + Address pc_after, Code* check_code, Code* replacement_code) { Address call_target_address = pc_after - kIntSize; @@ -250,6 +257,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, *(call_target_address - 2) = 0x90; // nop Assembler::set_target_address_at(call_target_address, replacement_code->entry()); + + RelocInfo rinfo(call_target_address, + RelocInfo::CODE_TARGET, + 0, + unoptimized_code); + unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode( + unoptimized_code, &rinfo, replacement_code); } @@ -268,6 +282,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, *(call_target_address - 2) = 0x07; // offset Assembler::set_target_address_at(call_target_address, check_code->entry()); + + check_code->GetHeap()->incremental_marking()-> + RecordCodeTargetPatch(call_target_address, check_code); } @@ -415,7 +432,14 @@ void Deoptimizer::DoComputeOsrOutputFrame() { output_[0]->SetPc(reinterpret_cast<uint32_t>(from_)); } else { // Setup the frame pointer and the context pointer. - output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code())); + // All OSR stack frames are dynamically aligned to an 8-byte boundary. + int frame_pointer = input_->GetRegister(ebp.code()); + if ((frame_pointer & 0x4) == 0) { + // Return address at FP + 4 should be aligned, so FP mod 8 should be 4. + frame_pointer -= kPointerSize; + has_alignment_padding_ = 1; + } + output_[0]->SetRegister(ebp.code(), frame_pointer); output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code())); unsigned pc_offset = data->OsrPcOffset()->value(); @@ -480,9 +504,11 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, // top address and the current frame's size. uint32_t top_address; if (is_bottommost) { - // 2 = context and function in the frame. - top_address = - input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes; + // If the optimized frame had alignment padding, adjust the frame pointer + // to point to the new position of the old frame pointer after padding + // is removed. Subtract 2 * kPointerSize for the context and function slots. + top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) - + height_in_bytes + has_alignment_padding_ * kPointerSize; } else { top_address = output_[frame_index - 1]->GetTop() - output_frame_size; } @@ -533,7 +559,9 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, } output_frame->SetFrameSlot(output_offset, value); intptr_t fp_value = top_address + output_offset; - ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value); + ASSERT(!is_bottommost || + input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize + == fp_value); output_frame->SetFp(fp_value); if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value); if (FLAG_trace_deopt) { @@ -638,7 +666,7 @@ void Deoptimizer::EntryGenerator::Generate() { const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumAllocatableRegisters; - __ sub(Operand(esp), Immediate(kDoubleRegsSize)); + __ sub(esp, Immediate(kDoubleRegsSize)); for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); int offset = i * kDoubleSize; @@ -662,7 +690,7 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize)); __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize)); } - __ sub(edx, Operand(ebp)); + __ sub(edx, ebp); __ neg(edx); // Allocate a new deoptimizer object. @@ -675,7 +703,10 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta. __ mov(Operand(esp, 5 * kPointerSize), Immediate(ExternalReference::isolate_address())); - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + } // Preserve deoptimizer object in register eax and get the input // frame descriptor pointer. @@ -698,15 +729,15 @@ void Deoptimizer::EntryGenerator::Generate() { // Remove the bailout id and the double registers from the stack. if (type() == EAGER) { - __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize)); + __ add(esp, Immediate(kDoubleRegsSize + kPointerSize)); } else { - __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize)); + __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize)); } // Compute a pointer to the unwinding limit in register ecx; that is // the first stack slot not part of the input frame. __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); - __ add(ecx, Operand(esp)); + __ add(ecx, esp); // Unwind the stack down to - but not including - the unwinding // limit and copy the contents of the activation frame to the input @@ -715,18 +746,43 @@ void Deoptimizer::EntryGenerator::Generate() { Label pop_loop; __ bind(&pop_loop); __ pop(Operand(edx, 0)); - __ add(Operand(edx), Immediate(sizeof(uint32_t))); - __ cmp(ecx, Operand(esp)); + __ add(edx, Immediate(sizeof(uint32_t))); + __ cmp(ecx, esp); __ j(not_equal, &pop_loop); + // If frame was dynamically aligned, pop padding. + Label sentinel, sentinel_done; + __ pop(ecx); + __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset())); + __ j(equal, &sentinel); + __ push(ecx); + __ jmp(&sentinel_done); + __ bind(&sentinel); + __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()), + Immediate(1)); + __ bind(&sentinel_done); // Compute the output frame in the deoptimizer. __ push(eax); __ PrepareCallCFunction(1, ebx); __ mov(Operand(esp, 0 * kPointerSize), eax); - __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 1); + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction( + ExternalReference::compute_output_frames_function(isolate), 1); + } __ pop(eax); + if (type() == OSR) { + // If alignment padding is added, push the sentinel. + Label no_osr_padding; + __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()), + Immediate(0)); + __ j(equal, &no_osr_padding, Label::kNear); + __ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset())); + __ bind(&no_osr_padding); + } + + // Replace the current frame with the output frames. Label outer_push_loop, inner_push_loop; // Outer loop state: eax = current FrameDescription**, edx = one past the @@ -739,12 +795,12 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(ebx, Operand(eax, 0)); __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); __ bind(&inner_push_loop); - __ sub(Operand(ecx), Immediate(sizeof(uint32_t))); + __ sub(ecx, Immediate(sizeof(uint32_t))); __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset())); - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(not_zero, &inner_push_loop); - __ add(Operand(eax), Immediate(kPointerSize)); - __ cmp(eax, Operand(edx)); + __ add(eax, Immediate(kPointerSize)); + __ cmp(eax, edx); __ j(below, &outer_push_loop); // In case of OSR, we have to restore the XMM registers. diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc index a936277b2..04edc5f42 100644 --- a/deps/v8/src/ia32/disasm-ia32.cc +++ b/deps/v8/src/ia32/disasm-ia32.cc @@ -55,6 +55,7 @@ struct ByteMnemonic { static const ByteMnemonic two_operands_instr[] = { + {0x01, "add", OPER_REG_OP_ORDER}, {0x03, "add", REG_OPER_OP_ORDER}, {0x09, "or", OPER_REG_OP_ORDER}, {0x0B, "or", REG_OPER_OP_ORDER}, @@ -117,6 +118,19 @@ static const ByteMnemonic short_immediate_instr[] = { }; +// Generally we don't want to generate these because they are subject to partial +// register stalls. They are included for completeness and because the cmp +// variant is used by the RecordWrite stub. Because it does not update the +// register it is not subject to partial register stalls. +static ByteMnemonic byte_immediate_instr[] = { + {0x0c, "or", UNSET_OP_ORDER}, + {0x24, "and", UNSET_OP_ORDER}, + {0x34, "xor", UNSET_OP_ORDER}, + {0x3c, "cmp", UNSET_OP_ORDER}, + {-1, "", UNSET_OP_ORDER} +}; + + static const char* const jump_conditional_mnem[] = { /*0*/ "jo", "jno", "jc", "jnc", /*4*/ "jz", "jnz", "jna", "ja", @@ -149,7 +163,8 @@ enum InstructionType { REGISTER_INSTR, MOVE_REG_INSTR, CALL_JUMP_INSTR, - SHORT_IMMEDIATE_INSTR + SHORT_IMMEDIATE_INSTR, + BYTE_IMMEDIATE_INSTR }; @@ -198,6 +213,7 @@ void InstructionTable::Init() { CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR); CopyTable(call_jump_instr, CALL_JUMP_INSTR); CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR); + CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR); AddJumpConditionalShort(); SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc"); SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec"); @@ -912,6 +928,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer, break; } + case BYTE_IMMEDIATE_INSTR: { + AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]); + data += 2; + break; + } + case NO_INSTR: processed = false; break; @@ -1346,11 +1368,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer, data += 2; break; - case 0x2C: - AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1)); - data += 2; - break; - case 0xA9: AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1)); data += 5; diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index 81c9ccb12..33d5cabad 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -138,7 +138,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // function calls. if (info->is_strict_mode() || info->is_native()) { Label ok; - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(zero, &ok, Label::kNear); // +1 for return address. int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize; @@ -147,6 +147,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { __ bind(&ok); } + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + FrameScope frame_scope(masm_, StackFrame::MANUAL); + __ push(ebp); // Caller's frame pointer. __ mov(ebp, esp); __ push(esi); // Callee's context. @@ -200,11 +205,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // Store it in the context. int context_offset = Context::SlotOffset(var->index()); __ mov(Operand(esi, context_offset), eax); - // Update the write barrier. This clobbers all involved - // registers, so we have use a third register to avoid - // clobbering esi. - __ mov(ecx, esi); - __ RecordWrite(ecx, context_offset, eax, ebx); + // Update the write barrier. This clobbers eax and ebx. + __ RecordWriteContextSlot(esi, + context_offset, + eax, + ebx, + kDontSaveFPRegs); } } } @@ -260,7 +266,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { int ignored = 0; - EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored); + EmitDeclaration(scope()->function(), CONST, NULL, &ignored); } VisitDeclarations(scope()->declarations()); } @@ -365,10 +371,10 @@ void FullCodeGenerator::EmitReturnSequence() { void FullCodeGenerator::verify_stack_height() { ASSERT(FLAG_verify_stack_height); - __ sub(Operand(ebp), Immediate(kPointerSize * stack_height())); - __ cmp(ebp, Operand(esp)); + __ sub(ebp, Immediate(kPointerSize * stack_height())); + __ cmp(ebp, esp); __ Assert(equal, "Full codegen stack height not as expected."); - __ add(Operand(ebp), Immediate(kPointerSize * stack_height())); + __ add(ebp, Immediate(kPointerSize * stack_height())); } @@ -597,7 +603,7 @@ void FullCodeGenerator::DoTest(Expression* condition, ToBooleanStub stub(result_register()); __ push(result_register()); __ CallStub(&stub, condition->test_id()); - __ test(result_register(), Operand(result_register())); + __ test(result_register(), result_register()); // The stub returns nonzero for true. Split(not_zero, if_true, if_false, fall_through); } @@ -661,11 +667,12 @@ void FullCodeGenerator::SetVar(Variable* var, ASSERT(!scratch1.is(src)); MemOperand location = VarOperand(var, scratch0); __ mov(location, src); + // Emit the write barrier code if the location is in the heap. if (var->IsContextSlot()) { int offset = Context::SlotOffset(var->index()); ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi)); - __ RecordWrite(scratch0, offset, src, scratch1); + __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs); } } @@ -697,7 +704,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state, void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, - Variable::Mode mode, + VariableMode mode, FunctionLiteral* function, int* global_count) { // If it was not possible to allocate the variable at compile time, we @@ -715,7 +722,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, Comment cmnt(masm_, "[ Declaration"); VisitForAccumulatorValue(function); __ mov(StackOperand(variable), result_register()); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { Comment cmnt(masm_, "[ Declaration"); __ mov(StackOperand(variable), Immediate(isolate()->factory()->the_hole_value())); @@ -738,11 +745,16 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, Comment cmnt(masm_, "[ Declaration"); VisitForAccumulatorValue(function); __ mov(ContextOperand(esi, variable->index()), result_register()); - int offset = Context::SlotOffset(variable->index()); - __ mov(ebx, esi); - __ RecordWrite(ebx, offset, result_register(), ecx); + // We know that we have written a function, which is not a smi. + __ RecordWriteContextSlot(esi, + Context::SlotOffset(variable->index()), + result_register(), + ecx, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); PrepareForBailoutForId(proxy->id(), NO_REGISTERS); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { Comment cmnt(masm_, "[ Declaration"); __ mov(ContextOperand(esi, variable->index()), Immediate(isolate()->factory()->the_hole_value())); @@ -756,10 +768,8 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, __ push(esi); __ push(Immediate(variable->name())); // Declaration nodes are always introduced in one of three modes. - ASSERT(mode == Variable::VAR || - mode == Variable::CONST || - mode == Variable::LET); - PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE; + ASSERT(mode == VAR || mode == CONST || mode == LET); + PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE; __ push(Immediate(Smi::FromInt(attr))); // Push initial value, if any. // Note: For variables we must not push an initial value (such as @@ -768,7 +778,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, increment_stack_height(3); if (function != NULL) { VisitForStackValue(function); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { __ push(Immediate(isolate()->factory()->the_hole_value())); increment_stack_height(); } else { @@ -835,10 +845,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { if (inline_smi_code) { Label slow_case; __ mov(ecx, edx); - __ or_(ecx, Operand(eax)); + __ or_(ecx, eax); patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear); - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); __ j(not_equal, &next_test); __ Drop(1); // Switch value is no longer needed. __ jmp(clause->body_target()); @@ -850,7 +860,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT); __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId()); patch_site.EmitPatchInfo(); - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_equal, &next_test); __ Drop(1); // Switch value is no longer needed. __ jmp(clause->body_target()); @@ -939,7 +949,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // For all objects but the receiver, check that the cache is empty. Label check_prototype; - __ cmp(ecx, Operand(eax)); + __ cmp(ecx, eax); __ j(equal, &check_prototype, Label::kNear); __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset)); __ cmp(edx, isolate()->factory()->empty_fixed_array()); @@ -1021,9 +1031,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ push(ecx); // Enumerable. __ push(ebx); // Current entry. __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION); - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(equal, loop_statement.continue_label()); - __ mov(ebx, Operand(eax)); + __ mov(ebx, eax); // Update the 'each' property or variable from the possibly filtered // entry in register ebx. @@ -1047,7 +1057,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // Remove the pointers stored on the stack. __ bind(loop_statement.break_label()); - __ add(Operand(esp), Immediate(5 * kPointerSize)); + __ add(esp, Immediate(5 * kPointerSize)); decrement_stack_height(ForIn::kElementCount); // Exit and decrement the loop depth. @@ -1189,16 +1199,22 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, // introducing variables. In those cases, we do not want to // perform a runtime call for all variables in the scope // containing the eval. - if (var->mode() == Variable::DYNAMIC_GLOBAL) { + if (var->mode() == DYNAMIC_GLOBAL) { EmitLoadGlobalCheckExtensions(var, typeof_state, slow); __ jmp(done); - } else if (var->mode() == Variable::DYNAMIC_LOCAL) { + } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ mov(eax, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == Variable::CONST) { + if (local->mode() == CONST || + local->mode() == LET) { __ cmp(eax, isolate()->factory()->the_hole_value()); __ j(not_equal, done); - __ mov(eax, isolate()->factory()->undefined_value()); + if (local->mode() == CONST) { + __ mov(eax, isolate()->factory()->undefined_value()); + } else { // LET + __ push(Immediate(var->name())); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + } } __ jmp(done); } @@ -1231,7 +1247,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { Comment cmnt(masm_, var->IsContextSlot() ? "Context variable" : "Stack variable"); - if (var->mode() != Variable::LET && var->mode() != Variable::CONST) { + if (var->mode() != LET && var->mode() != CONST) { context()->Plug(var); } else { // Let and const need a read barrier. @@ -1239,10 +1255,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { GetVar(eax, var); __ cmp(eax, isolate()->factory()->the_hole_value()); __ j(not_equal, &done, Label::kNear); - if (var->mode() == Variable::LET) { + if (var->mode() == LET) { __ push(Immediate(var->name())); __ CallRuntime(Runtime::kThrowReferenceError, 1); - } else { // Variable::CONST + } else { // CONST __ mov(eax, isolate()->factory()->undefined_value()); } __ bind(&done); @@ -1480,8 +1496,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ mov(FieldOperand(ebx, offset), result_register()); + Label no_map_change; + __ JumpIfSmi(result_register(), &no_map_change); // Update the write barrier for the array store. - __ RecordWrite(ebx, offset, result_register(), ecx); + __ RecordWriteField(ebx, offset, result_register(), ecx, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset)); + __ CheckFastSmiOnlyElements(edi, &no_map_change, Label::kNear); + __ push(Operand(esp, 0)); + __ CallRuntime(Runtime::kNonSmiElementStored, 1); + __ bind(&no_map_change); PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); } @@ -1641,7 +1667,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, __ pop(edx); decrement_stack_height(); __ mov(ecx, eax); - __ or_(eax, Operand(edx)); + __ or_(eax, edx); JumpPatchSite patch_site(masm_); patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear); @@ -1691,32 +1717,32 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, break; } case Token::ADD: - __ add(eax, Operand(ecx)); + __ add(eax, ecx); __ j(overflow, &stub_call); break; case Token::SUB: - __ sub(eax, Operand(ecx)); + __ sub(eax, ecx); __ j(overflow, &stub_call); break; case Token::MUL: { __ SmiUntag(eax); - __ imul(eax, Operand(ecx)); + __ imul(eax, ecx); __ j(overflow, &stub_call); - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_zero, &done, Label::kNear); __ mov(ebx, edx); - __ or_(ebx, Operand(ecx)); + __ or_(ebx, ecx); __ j(negative, &stub_call); break; } case Token::BIT_OR: - __ or_(eax, Operand(ecx)); + __ or_(eax, ecx); break; case Token::BIT_AND: - __ and_(eax, Operand(ecx)); + __ and_(eax, ecx); break; case Token::BIT_XOR: - __ xor_(eax, Operand(ecx)); + __ xor_(eax, ecx); break; default: UNREACHABLE(); @@ -1838,7 +1864,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); } - } else if (var->mode() == Variable::LET && op != Token::INIT_LET) { + } else if (var->mode() == LET && op != Token::INIT_LET) { // Non-initializing assignment to let variable needs a write barrier. if (var->IsLookupSlot()) { __ push(eax); // Value. @@ -1859,11 +1885,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ mov(location, eax); if (var->IsContextSlot()) { __ mov(edx, eax); - __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx); + int offset = Context::SlotOffset(var->index()); + __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs); } } - } else if (var->mode() != Variable::CONST) { + } else if (var->mode() != CONST) { // Assignment to var or initializing assignment to let. if (var->IsStackAllocated() || var->IsContextSlot()) { MemOperand location = VarOperand(var, ecx); @@ -1877,7 +1904,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ mov(location, eax); if (var->IsContextSlot()) { __ mov(edx, eax); - __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx); + int offset = Context::SlotOffset(var->index()); + __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs); } } else { ASSERT(var->IsLookupSlot()); @@ -2069,8 +2097,29 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) { } // Record source position for debugger. SetSourcePosition(expr->position()); + + // Record call targets in unoptimized code, but not in the snapshot. + bool record_call_target = !Serializer::enabled(); + if (record_call_target) { + flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET); + } CallFunctionStub stub(arg_count, flags); __ CallStub(&stub); + if (record_call_target) { + // There is a one element cache in the instruction stream. +#ifdef DEBUG + int return_site_offset = masm()->pc_offset(); +#endif + Handle<Object> uninitialized = + CallFunctionStub::UninitializedSentinel(isolate()); + Handle<JSGlobalPropertyCell> cell = + isolate()->factory()->NewJSGlobalPropertyCell(uninitialized); + __ test(eax, Immediate(cell)); + // Patching code in the stub assumes the opcode is 1 byte and there is + // word for a pointer in the operand. + ASSERT(masm()->pc_offset() - return_site_offset >= 1 + kPointerSize); + } + RecordJSReturnSite(expr); // Restore context register. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); @@ -2094,10 +2143,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, // Push the strict mode flag. In harmony mode every eval call // is a strict mode eval call. - StrictModeFlag strict_mode = strict_mode_flag(); - if (FLAG_harmony_block_scoping) { - strict_mode = kStrictMode; - } + StrictModeFlag strict_mode = + FLAG_harmony_scoping ? kStrictMode : strict_mode_flag(); __ push(Immediate(Smi::FromInt(strict_mode))); __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP @@ -2140,7 +2187,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { // context lookup in the runtime system. Label done; Variable* var = proxy->var(); - if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) { + if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) { Label slow; EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow); // Push the function and resolve eval. @@ -2438,9 +2485,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( STATIC_ASSERT(kPointerSize == 4); __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize)); // Calculate location of the first key name. - __ add(Operand(ebx), - Immediate(FixedArray::kHeaderSize + - DescriptorArray::kFirstIndex * kPointerSize)); + __ add(ebx, + Immediate(FixedArray::kHeaderSize + + DescriptorArray::kFirstIndex * kPointerSize)); // Loop through all the keys in the descriptor array. If one of these is the // symbol valueOf the result is false. Label entry, loop; @@ -2449,9 +2496,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ mov(edx, FieldOperand(ebx, 0)); __ cmp(edx, FACTORY->value_of_symbol()); __ j(equal, if_false); - __ add(Operand(ebx), Immediate(kPointerSize)); + __ add(ebx, Immediate(kPointerSize)); __ bind(&entry); - __ cmp(ebx, Operand(ecx)); + __ cmp(ebx, ecx); __ j(not_equal, &loop); // Reload map as register ebx was used as temporary above. @@ -2591,7 +2638,7 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) { __ pop(ebx); decrement_stack_height(); - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(equal, if_true, if_false, fall_through); @@ -2647,20 +2694,24 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) { // Check that the object is a JS object but take special care of JS // functions to make sure they have 'Function' as their class. + // Assume that there are only two callable types, and one of them is at + // either end of the type range for JS object types. Saves extra comparisons. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax); // Map is now in eax. __ j(below, &null); - - // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and - // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after - // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == - LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); - __ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE); - __ j(above_equal, &function); - - // Check if the constructor in the map is a function. + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + __ j(equal, &function); + + __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + __ j(equal, &function); + // Assume that there is no larger type. + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); + + // Check if the constructor in the map is a JS function. __ mov(eax, FieldOperand(eax, Map::kConstructorOffset)); __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx); __ j(not_equal, &non_function_constructor); @@ -2741,8 +2792,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) { if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope fscope(SSE2); __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. - __ movd(xmm1, Operand(ebx)); - __ movd(xmm0, Operand(eax)); + __ movd(xmm1, ebx); + __ movd(xmm0, eax); __ cvtss2sd(xmm1, xmm1); __ xorps(xmm0, xmm1); __ subsd(xmm0, xmm1); @@ -2843,10 +2894,11 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) { // Store the value. __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax); + // Update the write barrier. Save the value as it will be // overwritten by the write barrier code and is needed afterward. __ mov(edx, eax); - __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx); + __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs); __ bind(&done); context()->Plug(eax); @@ -3119,14 +3171,14 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) { __ mov(index_1, Operand(esp, 1 * kPointerSize)); __ mov(index_2, Operand(esp, 0)); __ mov(temp, index_1); - __ or_(temp, Operand(index_2)); + __ or_(temp, index_2); __ JumpIfNotSmi(temp, &slow_case); // Check that both indices are valid. __ mov(temp, FieldOperand(object, JSArray::kLengthOffset)); - __ cmp(temp, Operand(index_1)); + __ cmp(temp, index_1); __ j(below_equal, &slow_case); - __ cmp(temp, Operand(index_2)); + __ cmp(temp, index_2); __ j(below_equal, &slow_case); // Bring addresses into index1 and index2. @@ -3139,16 +3191,35 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) { __ mov(Operand(index_2, 0), object); __ mov(Operand(index_1, 0), temp); - Label new_space; - __ InNewSpace(elements, temp, equal, &new_space); + Label no_remembered_set; + __ CheckPageFlag(elements, + temp, + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + not_zero, + &no_remembered_set, + Label::kNear); + // Possible optimization: do a check that both values are Smis + // (or them and test against Smi mask.) + + // We are swapping two objects in an array and the incremental marker never + // pauses in the middle of scanning a single object. Therefore the + // incremental marker is not disturbed, so we don't need to call the + // RecordWrite stub that notifies the incremental marker. + __ RememberedSetHelper(elements, + index_1, + temp, + kDontSaveFPRegs, + MacroAssembler::kFallThroughAtEnd); + __ RememberedSetHelper(elements, + index_2, + temp, + kDontSaveFPRegs, + MacroAssembler::kFallThroughAtEnd); + + __ bind(&no_remembered_set); - __ mov(object, elements); - __ RecordWriteHelper(object, index_1, temp); - __ RecordWriteHelper(elements, index_2, temp); - - __ bind(&new_space); // We are done. Drop elements from the stack, and return undefined. - __ add(Operand(esp), Immediate(3 * kPointerSize)); + __ add(esp, Immediate(3 * kPointerSize)); __ mov(eax, isolate()->factory()->undefined_value()); __ jmp(&done); @@ -3221,11 +3292,11 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) { __ pop(left); Label done, fail, ok; - __ cmp(left, Operand(right)); + __ cmp(left, right); __ j(equal, &ok); // Fail if either is a non-HeapObject. __ mov(tmp, left); - __ and_(Operand(tmp), right); + __ and_(tmp, right); __ JumpIfSmi(tmp, &fail); __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset)); __ CmpInstanceType(tmp, JS_REGEXP_TYPE); @@ -3316,7 +3387,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { Operand separator_operand = Operand(esp, 2 * kPointerSize); Operand result_operand = Operand(esp, 1 * kPointerSize); Operand array_length_operand = Operand(esp, 0); - __ sub(Operand(esp), Immediate(2 * kPointerSize)); + __ sub(esp, Immediate(2 * kPointerSize)); __ cld(); // Check that the array is a JSArray __ JumpIfSmi(array, &bailout); @@ -3352,7 +3423,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { // Live loop registers: index, array_length, string, // scratch, string_length, elements. if (FLAG_debug_code) { - __ cmp(index, Operand(array_length)); + __ cmp(index, array_length); __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin"); } __ bind(&loop); @@ -3370,8 +3441,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { __ add(string_length, FieldOperand(string, SeqAsciiString::kLengthOffset)); __ j(overflow, &bailout); - __ add(Operand(index), Immediate(1)); - __ cmp(index, Operand(array_length)); + __ add(index, Immediate(1)); + __ cmp(index, array_length); __ j(less, &loop); // If array_length is 1, return elements[0], a string. @@ -3405,10 +3476,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { // to string_length. __ mov(scratch, separator_operand); __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset)); - __ sub(string_length, Operand(scratch)); // May be negative, temporarily. + __ sub(string_length, scratch); // May be negative, temporarily. __ imul(scratch, array_length_operand); __ j(overflow, &bailout); - __ add(string_length, Operand(scratch)); + __ add(string_length, scratch); __ j(overflow, &bailout); __ shr(string_length, 1); @@ -3449,7 +3520,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { __ lea(string, FieldOperand(string, SeqAsciiString::kHeaderSize)); __ CopyBytes(string, result_pos, string_length, scratch); - __ add(Operand(index), Immediate(1)); + __ add(index, Immediate(1)); __ bind(&loop_1_condition); __ cmp(index, array_length_operand); __ j(less, &loop_1); // End while (index < length). @@ -3490,7 +3561,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { __ lea(string, FieldOperand(string, SeqAsciiString::kHeaderSize)); __ CopyBytes(string, result_pos, string_length, scratch); - __ add(Operand(index), Immediate(1)); + __ add(index, Immediate(1)); __ cmp(index, array_length_operand); __ j(less, &loop_2); // End while (index < length). @@ -3531,7 +3602,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { __ lea(string, FieldOperand(string, SeqAsciiString::kHeaderSize)); __ CopyBytes(string, result_pos, string_length, scratch); - __ add(Operand(index), Immediate(1)); + __ add(index, Immediate(1)); __ cmp(index, array_length_operand); __ j(less, &loop_3); // End while (index < length). @@ -3543,7 +3614,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { __ bind(&done); __ mov(eax, result_operand); // Drop temp values from the stack, and restore context register. - __ add(Operand(esp), Immediate(3 * kPointerSize)); + __ add(esp, Immediate(3 * kPointerSize)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); decrement_stack_height(); @@ -3823,9 +3894,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { if (ShouldInlineSmiCase(expr->op())) { if (expr->op() == Token::INC) { - __ add(Operand(eax), Immediate(Smi::FromInt(1))); + __ add(eax, Immediate(Smi::FromInt(1))); } else { - __ sub(Operand(eax), Immediate(Smi::FromInt(1))); + __ sub(eax, Immediate(Smi::FromInt(1))); } __ j(overflow, &stub_call, Label::kNear); // We could eliminate this smi check if we split the code at @@ -3835,9 +3906,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { __ bind(&stub_call); // Call stub. Undo operation first. if (expr->op() == Token::INC) { - __ sub(Operand(eax), Immediate(Smi::FromInt(1))); + __ sub(eax, Immediate(Smi::FromInt(1))); } else { - __ add(Operand(eax), Immediate(Smi::FromInt(1))); + __ add(eax, Immediate(Smi::FromInt(1))); } } @@ -3956,10 +4027,14 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, - Handle<String> check, - Label* if_true, - Label* if_false, - Label* fall_through) { + Handle<String> check) { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + { AccumulatorValueContext context(this); VisitForTypeofValue(expr); } @@ -3998,8 +4073,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, Split(not_zero, if_true, if_false, fall_through); } else if (check->Equals(isolate()->heap()->function_symbol())) { __ JumpIfSmi(eax, if_false); - __ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx); - Split(above_equal, if_true, if_false, fall_through); + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx); + __ j(equal, if_true); + __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE); + Split(equal, if_true, if_false, fall_through); } else if (check->Equals(isolate()->heap()->object_symbol())) { __ JumpIfSmi(eax, if_false); if (!FLAG_harmony_typeof) { @@ -4017,18 +4095,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } else { if (if_false != fall_through) __ jmp(if_false); } -} - - -void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr, - Label* if_true, - Label* if_false, - Label* fall_through) { - VisitForAccumulatorValue(expr); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - - __ cmp(eax, isolate()->factory()->undefined_value()); - Split(equal, if_true, if_false, fall_through); + context()->Plug(if_true, if_false); } @@ -4036,9 +4103,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Comment cmnt(masm_, "[ CompareOperation"); SetSourcePosition(expr->position()); + // First we try a fast inlined version of the compare when one of + // the operands is a literal. + if (TryLiteralCompare(expr)) return; + // Always perform the comparison for its control flow. Pack the result // into the expression's context after the comparison is performed. - Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4046,16 +4116,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - // First we try a fast inlined version of the compare when one of - // the operands is a literal. - if (TryLiteralCompare(expr, if_true, if_false, fall_through)) { - context()->Plug(if_true, if_false); - return; - } - Token::Value op = expr->op(); VisitForStackValue(expr->left()); - switch (expr->op()) { + switch (op) { case Token::IN: VisitForStackValue(expr->right()); __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION); @@ -4071,7 +4134,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { __ CallStub(&stub); decrement_stack_height(2); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - __ test(eax, Operand(eax)); + __ test(eax, eax); // The stub returns 0 for true. Split(zero, if_true, if_false, fall_through); break; @@ -4080,11 +4143,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { default: { VisitForAccumulatorValue(expr->right()); Condition cc = no_condition; - bool strict = false; switch (op) { case Token::EQ_STRICT: - strict = true; - // Fall through case Token::EQ: cc = equal; __ pop(edx); @@ -4120,10 +4180,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { JumpPatchSite patch_site(masm_); if (inline_smi_code) { Label slow_case; - __ mov(ecx, Operand(edx)); - __ or_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ or_(ecx, eax); patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear); - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); Split(cc, if_true, if_false, NULL); __ bind(&slow_case); } @@ -4135,7 +4195,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { patch_site.EmitPatchInfo(); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - __ test(eax, Operand(eax)); + __ test(eax, eax); Split(cc, if_true, if_false, fall_through); } } @@ -4146,7 +4206,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } -void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { +void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, + Expression* sub_expr, + NilValue nil) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4154,15 +4216,20 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - VisitForAccumulatorValue(expr->expression()); + VisitForAccumulatorValue(sub_expr); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - - __ cmp(eax, isolate()->factory()->null_value()); - if (expr->is_strict()) { + Handle<Object> nil_value = nil == kNullValue ? + isolate()->factory()->null_value() : + isolate()->factory()->undefined_value(); + __ cmp(eax, nil_value); + if (expr->op() == Token::EQ_STRICT) { Split(equal, if_true, if_false, fall_through); } else { + Handle<Object> other_nil_value = nil == kNullValue ? + isolate()->factory()->undefined_value() : + isolate()->factory()->null_value(); __ j(equal, if_true); - __ cmp(eax, isolate()->factory()->undefined_value()); + __ cmp(eax, other_nil_value); __ j(equal, if_true); __ JumpIfSmi(eax, if_false); // It can be an undetectable object. @@ -4229,7 +4296,7 @@ void FullCodeGenerator::EnterFinallyBlock() { // Cook return address on top of stack (smi encoded Code* delta) ASSERT(!result_register().is(edx)); __ pop(edx); - __ sub(Operand(edx), Immediate(masm_->CodeObject())); + __ sub(edx, Immediate(masm_->CodeObject())); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); STATIC_ASSERT(kSmiTag == 0); __ SmiTag(edx); @@ -4245,8 +4312,8 @@ void FullCodeGenerator::ExitFinallyBlock() { // Uncook return address. __ pop(edx); __ SmiUntag(edx); - __ add(Operand(edx), Immediate(masm_->CodeObject())); - __ jmp(Operand(edx)); + __ add(edx, Immediate(masm_->CodeObject())); + __ jmp(edx); } diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index 9b5cc5640..8a98b179d 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -212,7 +212,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Update write barrier. Make sure not to clobber the value. __ mov(r1, value); - __ RecordWrite(elements, r0, r1); + __ RecordWrite(elements, r0, r1, kDontSaveFPRegs); } @@ -326,7 +326,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, // Fast case: Do the load. STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0)); __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize)); - __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value())); + __ cmp(scratch, Immediate(FACTORY->the_hole_value())); // In case the loaded value is the_hole we have to consult GetProperty // to ensure the prototype chain is searched. __ j(equal, out_of_range); @@ -394,8 +394,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm, // Check if element is in the range of mapped arguments. If not, jump // to the unmapped lookup with the parameter map in scratch1. __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset)); - __ sub(Operand(scratch2), Immediate(Smi::FromInt(2))); - __ cmp(key, Operand(scratch2)); + __ sub(scratch2, Immediate(Smi::FromInt(2))); + __ cmp(key, scratch2); __ j(greater_equal, unmapped_case); // Load element index and check whether it is the hole. @@ -432,7 +432,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK); __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset)); - __ cmp(key, Operand(scratch)); + __ cmp(key, scratch); __ j(greater_equal, slow_case); return FieldOperand(backing_store, key, @@ -534,7 +534,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ shr(ecx, KeyedLookupCache::kMapHashShift); __ mov(edi, FieldOperand(eax, String::kHashFieldOffset)); __ shr(edi, String::kHashShift); - __ xor_(ecx, Operand(edi)); + __ xor_(ecx, edi); __ and_(ecx, KeyedLookupCache::kCapacityMask); // Load the key (consisting of map and symbol) from the cache and @@ -545,7 +545,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ shl(edi, kPointerSizeLog2 + 1); __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys)); __ j(not_equal, &slow); - __ add(Operand(edi), Immediate(kPointerSize)); + __ add(edi, Immediate(kPointerSize)); __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys)); __ j(not_equal, &slow); @@ -559,12 +559,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ mov(edi, Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets)); __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset)); - __ sub(edi, Operand(ecx)); + __ sub(edi, ecx); __ j(above_equal, &property_array_property); // Load in-object property. __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset)); - __ add(ecx, Operand(edi)); + __ add(ecx, edi); __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0)); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); __ ret(0); @@ -651,8 +651,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { // Check that it has indexed interceptor and access checks // are not enabled for this object. __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset)); - __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask)); - __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor)); + __ and_(ecx, Immediate(kSlowCaseBitFieldMask)); + __ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor)); __ j(not_zero, &slow); // Everything is fine, call runtime. @@ -710,7 +710,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ mov(mapped_location, eax); __ lea(ecx, mapped_location); __ mov(edx, eax); - __ RecordWrite(ebx, ecx, edx); + __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs); __ Ret(); __ bind(¬in); // The unmapped lookup expects that the parameter map is in ebx. @@ -719,7 +719,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ mov(unmapped_location, eax); __ lea(edi, unmapped_location); __ mov(edx, eax); - __ RecordWrite(ebx, edi, edx); + __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs); __ Ret(); __ bind(&slow); GenerateMiss(masm, false); @@ -734,7 +734,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - Label slow, fast, array, extra; + Label slow, fast_object_with_map_check, fast_object_without_map_check; + Label fast_double_with_map_check, fast_double_without_map_check; + Label check_if_double_array, array, extra; // Check that the object isn't a smi. __ JumpIfSmi(edx, &slow); @@ -750,22 +752,18 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ CmpInstanceType(edi, JS_ARRAY_TYPE); __ j(equal, &array); // Check that the object is some kind of JSObject. - __ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE); + __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE); __ j(below, &slow); - __ CmpInstanceType(edi, JS_PROXY_TYPE); - __ j(equal, &slow); - __ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE); - __ j(equal, &slow); // Object case: Check key against length in the elements array. // eax: value // edx: JSObject // ecx: key (a smi) - __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); - // Check that the object is in fast mode and writable. - __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK); - __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); - __ j(below, &fast); + // edi: receiver map + __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); + // Check array bounds. Both the key and the length of FixedArray are smis. + __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); + __ j(below, &fast_object_with_map_check); // Slow case: call runtime. __ bind(&slow); @@ -778,16 +776,28 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // eax: value // edx: receiver, a JSArray // ecx: key, a smi. - // edi: receiver->elements, a FixedArray + // ebx: receiver->elements, a FixedArray + // edi: receiver map // flags: compare (ecx, edx.length()) // do not leave holes in the array: __ j(not_equal, &slow); - __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); + __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); __ j(above_equal, &slow); - // Add 1 to receiver->length, and go to fast array write. + __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset)); + __ cmp(edi, masm->isolate()->factory()->fixed_array_map()); + __ j(not_equal, &check_if_double_array); + // Add 1 to receiver->length, and go to common element store code for Objects. + __ add(FieldOperand(edx, JSArray::kLengthOffset), + Immediate(Smi::FromInt(1))); + __ jmp(&fast_object_without_map_check); + + __ bind(&check_if_double_array); + __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map()); + __ j(not_equal, &slow); + // Add 1 to receiver->length, and go to common element store code for doubles. __ add(FieldOperand(edx, JSArray::kLengthOffset), Immediate(Smi::FromInt(1))); - __ jmp(&fast); + __ jmp(&fast_double_without_map_check); // Array case: Get the length and the elements array from the JS // array. Check that the array is in fast mode (and writable); if it @@ -796,24 +806,54 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // eax: value // edx: receiver, a JSArray // ecx: key, a smi. - __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); - __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK); + // edi: receiver map + __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); - // Check the key against the length in the array, compute the - // address to store into and fall through to fast case. + // Check the key against the length in the array and fall through to the + // common store code. __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // Compare smis. __ j(above_equal, &extra); - // Fast case: Do the store. - __ bind(&fast); + // Fast case: Do the store, could either Object or double. + __ bind(&fast_object_with_map_check); // eax: value // ecx: key (a smi) // edx: receiver - // edi: FixedArray receiver->elements - __ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax); + // ebx: FixedArray receiver->elements + // edi: receiver map + __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset)); + __ cmp(edi, masm->isolate()->factory()->fixed_array_map()); + __ j(not_equal, &fast_double_with_map_check); + __ bind(&fast_object_without_map_check); + // Smi stores don't require further checks. + Label non_smi_value; + __ JumpIfNotSmi(eax, &non_smi_value); + // It's irrelevant whether array is smi-only or not when writing a smi. + __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax); + __ ret(0); + + __ bind(&non_smi_value); + // Escape to slow case when writing non-smi into smi-only array. + __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); + __ CheckFastObjectElements(edi, &slow, Label::kNear); + + // Fast elements array, store the value to the elements backing store. + __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax); // Update write barrier for the elements array address. - __ mov(edx, Operand(eax)); - __ RecordWrite(edi, 0, edx, ecx); + __ mov(edx, eax); // Preserve the value which is returned. + __ RecordWriteArray( + ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ ret(0); + + __ bind(&fast_double_with_map_check); + // Check for fast double array case. If this fails, call through to the + // runtime. + __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map()); + __ j(not_equal, &slow); + __ bind(&fast_double_without_map_check); + // If the value is a number, store it as a double in the FastDoubleElements + // array. + __ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0, &slow, false); __ ret(0); } @@ -951,22 +991,22 @@ static void GenerateCallMiss(MacroAssembler* masm, // Get the receiver of the function from the stack; 1 ~ return address. __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); - // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Push the receiver and the name of the function. - __ push(edx); - __ push(ecx); + // Push the receiver and the name of the function. + __ push(edx); + __ push(ecx); - // Call the entry. - CEntryStub stub(1); - __ mov(eax, Immediate(2)); - __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate()))); - __ CallStub(&stub); + // Call the entry. + CEntryStub stub(1); + __ mov(eax, Immediate(2)); + __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate()))); + __ CallStub(&stub); - // Move result to edi and exit the internal frame. - __ mov(edi, eax); - __ LeaveInternalFrame(); + // Move result to edi and exit the internal frame. + __ mov(edi, eax); + } // Check if the receiver is a global object of some sort. // This can happen only for regular CallIC but not KeyedCallIC. @@ -1111,13 +1151,17 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // This branch is taken when calling KeyedCallIC_Miss is neither required // nor beneficial. __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1); - __ EnterInternalFrame(); - __ push(ecx); // save the key - __ push(edx); // pass the receiver - __ push(ecx); // pass the key - __ CallRuntime(Runtime::kKeyedGetProperty, 2); - __ pop(ecx); // restore the key - __ LeaveInternalFrame(); + + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(ecx); // save the key + __ push(edx); // pass the receiver + __ push(ecx); // pass the key + __ CallRuntime(Runtime::kKeyedGetProperty, 2); + __ pop(ecx); // restore the key + // Leave the internal frame. + } + __ mov(edi, eax); __ jmp(&do_call); diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc index 4e3ea9816..9e1fd34af 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.cc +++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc @@ -70,6 +70,17 @@ bool LCodeGen::GenerateCode() { ASSERT(is_unused()); status_ = GENERATING; CpuFeatures::Scope scope(SSE2); + + CodeStub::GenerateFPStubs(); + + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done in GeneratePrologue). + FrameScope frame_scope(masm_, StackFrame::MANUAL); + + dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 || + info()->osr_ast_id() != AstNode::kNoNumber; + return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && @@ -144,6 +155,29 @@ bool LCodeGen::GeneratePrologue() { __ bind(&ok); } + if (dynamic_frame_alignment_) { + Label do_not_pad, align_loop; + STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); + // Align esp to a multiple of 2 * kPointerSize. + __ test(esp, Immediate(kPointerSize)); + __ j(zero, &do_not_pad, Label::kNear); + __ push(Immediate(0)); + __ mov(ebx, esp); + // Copy arguments, receiver, and return address. + __ mov(ecx, Immediate(scope()->num_parameters() + 2)); + + __ bind(&align_loop); + __ mov(eax, Operand(ebx, 1 * kPointerSize)); + __ mov(Operand(ebx, 0), eax); + __ add(Operand(ebx), Immediate(kPointerSize)); + __ dec(ecx); + __ j(not_zero, &align_loop, Label::kNear); + __ mov(Operand(ebx, 0), + Immediate(isolate()->factory()->frame_alignment_marker())); + + __ bind(&do_not_pad); + } + __ push(ebp); // Caller's frame pointer. __ mov(ebp, esp); __ push(esi); // Callee's context. @@ -204,11 +238,12 @@ bool LCodeGen::GeneratePrologue() { // Store it in the context. int context_offset = Context::SlotOffset(var->index()); __ mov(Operand(esi, context_offset), eax); - // Update the write barrier. This clobbers all involved - // registers, so we have to use a third register to avoid - // clobbering esi. - __ mov(ecx, esi); - __ RecordWrite(ecx, context_offset, eax, ebx); + // Update the write barrier. This clobbers eax and ebx. + __ RecordWriteContextSlot(esi, + context_offset, + eax, + ebx, + kDontSaveFPRegs); } } Comment(";;; End allocate local context"); @@ -260,6 +295,9 @@ bool LCodeGen::GenerateDeferredCode() { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; __ bind(code->entry()); + Comment(";;; Deferred code @%d: %s.", + code->instruction_index(), + code->instr()->Mnemonic()); code->Generate(); __ jmp(code->exit()); } @@ -481,14 +519,18 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, LInstruction* instr, LOperand* context) { - ASSERT(context->IsRegister() || context->IsStackSlot()); if (context->IsRegister()) { if (!ToRegister(context).is(esi)) { __ mov(esi, ToRegister(context)); } - } else { - // Context is stack slot. + } else if (context->IsStackSlot()) { __ mov(esi, ToOperand(context)); + } else if (context->IsConstantOperand()) { + Handle<Object> literal = + chunk_->LookupLiteral(LConstantOperand::cast(context)); + LoadHeapObject(esi, Handle<Context>::cast(literal)); + } else { + UNREACHABLE(); } __ CallRuntimeSaveDoubles(id); @@ -669,7 +711,7 @@ void LCodeGen::RecordSafepoint( int arguments, int deoptimization_index) { ASSERT(kind == expected_safepoint_kind_); - const ZoneList<LOperand*>* operands = pointers->operands(); + const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); Safepoint safepoint = safepoints_.DefineSafepoint(masm(), kind, arguments, deoptimization_index); for (int i = 0; i < operands->length(); i++) { @@ -1200,8 +1242,13 @@ void LCodeGen::DoConstantD(LConstantD* instr) { void LCodeGen::DoConstantT(LConstantT* instr) { - ASSERT(instr->result()->IsRegister()); - __ Set(ToRegister(instr->result()), Immediate(instr->value())); + Register reg = ToRegister(instr->result()); + Handle<Object> handle = instr->value(); + if (handle->IsHeapObject()) { + LoadHeapObject(reg, Handle<HeapObject>::cast(handle)); + } else { + __ Set(reg, Immediate(handle)); + } } @@ -1577,23 +1624,33 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { } -void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { +void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { Register reg = ToRegister(instr->InputAt(0)); + int false_block = chunk_->LookupDestination(instr->false_block_id()); - // TODO(fsc): If the expression is known to be a smi, then it's - // definitely not null. Jump to the false block. + // If the expression is known to be untagged or a smi, then it's definitely + // not null, and it can't be a an undetectable object. + if (instr->hydrogen()->representation().IsSpecialization() || + instr->hydrogen()->type().IsSmi()) { + EmitGoto(false_block); + return; + } int true_block = chunk_->LookupDestination(instr->true_block_id()); - int false_block = chunk_->LookupDestination(instr->false_block_id()); - - __ cmp(reg, factory()->null_value()); - if (instr->is_strict()) { + Handle<Object> nil_value = instr->nil() == kNullValue ? + factory()->null_value() : + factory()->undefined_value(); + __ cmp(reg, nil_value); + if (instr->kind() == kStrictEquality) { EmitBranch(true_block, false_block, equal); } else { + Handle<Object> other_nil_value = instr->nil() == kNullValue ? + factory()->undefined_value() : + factory()->null_value(); Label* true_label = chunk_->GetAssemblyLabel(true_block); Label* false_label = chunk_->GetAssemblyLabel(false_block); __ j(equal, true_label); - __ cmp(reg, factory()->undefined_value()); + __ cmp(reg, other_nil_value); __ j(equal, true_label); __ JumpIfSmi(reg, false_label); // Check for undetectable objects by looking in the bit field in @@ -1745,28 +1802,36 @@ void LCodeGen::EmitClassOfTest(Label* is_true, ASSERT(!input.is(temp)); ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register. __ JumpIfSmi(input, is_false); - __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); - __ j(below, is_false); - // Map is now in temp. - // Functions have class 'Function'. - __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE); if (class_name->IsEqualTo(CStrVector("Function"))) { - __ j(above_equal, is_true); + // Assuming the following assertions, we can use the same compares to test + // for both being a function type and being in the object type range. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); + __ j(below, is_false); + __ j(equal, is_true); + __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE); + __ j(equal, is_true); } else { - __ j(above_equal, is_false); + // Faster code path to avoid two compares: subtract lower bound from the + // actual type and do a signed compare with the width of the type range. + __ mov(temp, FieldOperand(input, HeapObject::kMapOffset)); + __ mov(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); + __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ cmpb(Operand(temp2), + static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - + FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ j(above, is_false); } + // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. // Check if the constructor in the map is a function. __ mov(temp, FieldOperand(temp, Map::kConstructorOffset)); - - // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and - // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after - // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == - LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); - // Objects with a non-function constructor have class 'Object'. __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2); if (class_name->IsEqualTo(CStrVector("Object"))) { @@ -1851,9 +1916,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { virtual void Generate() { codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); } - + virtual LInstruction* instr() { return instr_; } Label* map_check() { return &map_check_; } - private: LInstanceOfKnownGlobal* instr_; Label map_check_; @@ -1991,6 +2055,17 @@ void LCodeGen::DoReturn(LReturn* instr) { } __ mov(esp, ebp); __ pop(ebp); + if (dynamic_frame_alignment_) { + Label aligned; + // Frame alignment marker (padding) is below arguments, + // and receiver, so its return-address-relative offset is + // (num_arguments + 2) words. + __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize), + Immediate(factory()->frame_alignment_marker())); + __ j(not_equal, &aligned); + __ Ret((GetParameterCount() + 2) * kPointerSize, ecx); + __ bind(&aligned); + } __ Ret((GetParameterCount() + 1) * kPointerSize, ecx); } @@ -1998,7 +2073,7 @@ void LCodeGen::DoReturn(LReturn* instr) { void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { Register result = ToRegister(instr->result()); __ mov(result, Operand::Cell(instr->hydrogen()->cell())); - if (instr->hydrogen()->check_hole_value()) { + if (instr->hydrogen()->RequiresHoleCheck()) { __ cmp(result, factory()->the_hole_value()); DeoptimizeIf(equal, instr->environment()); } @@ -2019,20 +2094,34 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { + Register object = ToRegister(instr->TempAt(0)); + Register address = ToRegister(instr->TempAt(1)); Register value = ToRegister(instr->InputAt(0)); - Operand cell_operand = Operand::Cell(instr->hydrogen()->cell()); + ASSERT(!value.is(object)); + Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell()); + + int offset = JSGlobalPropertyCell::kValueOffset; + __ mov(object, Immediate(cell_handle)); // If the cell we are storing to contains the hole it could have // been deleted from the property dictionary. In that case, we need // to update the property details in the property dictionary to mark // it as no longer deleted. We deoptimize in that case. - if (instr->hydrogen()->check_hole_value()) { - __ cmp(cell_operand, factory()->the_hole_value()); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ cmp(FieldOperand(object, offset), factory()->the_hole_value()); DeoptimizeIf(equal, instr->environment()); } // Store the value. - __ mov(cell_operand, value); + __ mov(FieldOperand(object, offset), value); + + // Cells are always in the remembered set. + __ RecordWriteField(object, + offset, + value, + address, + kSaveFPRegs, + OMIT_REMEMBERED_SET); } @@ -2063,7 +2152,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { if (instr->needs_write_barrier()) { Register temp = ToRegister(instr->TempAt(0)); int offset = Context::SlotOffset(instr->slot_index()); - __ RecordWrite(context, offset, value, temp); + __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs); } } @@ -2280,16 +2369,14 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( LLoadKeyedFastDoubleElement* instr) { XMMRegister result = ToDoubleRegister(instr->result()); - if (instr->hydrogen()->RequiresHoleCheck()) { - int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + - sizeof(kHoleNanLower32); - Operand hole_check_operand = BuildFastArrayOperand( - instr->elements(), instr->key(), - FAST_DOUBLE_ELEMENTS, - offset); - __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); - DeoptimizeIf(equal, instr->environment()); - } + int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + + sizeof(kHoleNanLower32); + Operand hole_check_operand = BuildFastArrayOperand( + instr->elements(), instr->key(), + FAST_DOUBLE_ELEMENTS, + offset); + __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); + DeoptimizeIf(equal, instr->environment()); Operand double_load_operand = BuildFastArrayOperand( instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, @@ -2359,6 +2446,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( break; case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: @@ -2680,6 +2768,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { virtual void Generate() { codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); } + virtual LInstruction* instr() { return instr_; } private: LUnaryMathOperation* instr_; }; @@ -3005,7 +3094,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { ASSERT(ToRegister(instr->result()).is(eax)); int arity = instr->arity(); - CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT); + CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ Drop(1); } @@ -3062,7 +3151,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (instr->needs_write_barrier()) { Register temp = ToRegister(instr->TempAt(0)); // Update the write barrier for the object for in-object properties. - __ RecordWrite(object, offset, value, temp); + __ RecordWriteField(object, offset, value, temp, kSaveFPRegs); } } else { Register temp = ToRegister(instr->TempAt(0)); @@ -3071,7 +3160,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (instr->needs_write_barrier()) { // Update the write barrier for the properties array. // object is used as a scratch register. - __ RecordWrite(temp, offset, value, object); + __ RecordWriteField(temp, offset, value, object, kSaveFPRegs); } } } @@ -3130,6 +3219,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( break; case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: @@ -3146,6 +3236,13 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register elements = ToRegister(instr->object()); Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; + // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS + // conversion, so it deopts in that case. + if (instr->hydrogen()->ValueNeedsSmiCheck()) { + __ test(value, Immediate(kSmiTagMask)); + DeoptimizeIf(not_zero, instr->environment()); + } + // Do the store. if (instr->key()->IsConstantOperand()) { ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); @@ -3168,7 +3265,7 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { key, times_pointer_size, FixedArray::kHeaderSize)); - __ RecordWrite(elements, key, value); + __ RecordWrite(elements, key, value, kSaveFPRegs); } } @@ -3212,6 +3309,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStringCharCodeAt* instr_; }; @@ -3334,6 +3432,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStringCharFromCode* instr_; }; @@ -3413,6 +3512,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); } + virtual LInstruction* instr() { return instr_; } private: LNumberTagI* instr_; }; @@ -3480,6 +3580,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } + virtual LInstruction* instr() { return instr_; } private: LNumberTagD* instr_; }; @@ -3581,16 +3682,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, } -class DeferredTaggedToI: public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } - private: - LTaggedToI* instr_; -}; - - void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { Label done, heap_number; Register input_reg = ToRegister(instr->InputAt(0)); @@ -3672,6 +3763,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { void LCodeGen::DoTaggedToI(LTaggedToI* instr) { + class DeferredTaggedToI: public LDeferredCode { + public: + DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } + virtual LInstruction* instr() { return instr_; } + private: + LTaggedToI* instr_; + }; + LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); ASSERT(input->Equals(instr->result())); @@ -3882,9 +3983,16 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckFunction(LCheckFunction* instr) { - ASSERT(instr->InputAt(0)->IsRegister()); - Operand operand = ToOperand(instr->InputAt(0)); - __ cmp(operand, instr->hydrogen()->target()); + Handle<JSFunction> target = instr->hydrogen()->target(); + if (isolate()->heap()->InNewSpace(*target)) { + Register reg = ToRegister(instr->value()); + Handle<JSGlobalPropertyCell> cell = + isolate()->factory()->NewJSGlobalPropertyCell(target); + __ cmp(reg, Operand::Cell(cell)); + } else { + Operand operand = ToOperand(instr->value()); + __ cmp(operand, instr->hydrogen()->target()); + } DeoptimizeIf(not_equal, instr->environment()); } @@ -4188,10 +4296,12 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, final_branch_condition = not_zero; } else if (type_name->Equals(heap()->function_symbol())) { - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label); - __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input); - final_branch_condition = above_equal; + __ CmpObjectType(input, JS_FUNCTION_TYPE, input); + __ j(equal, true_label); + __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE); + final_branch_condition = equal; } else if (type_name->Equals(heap()->object_symbol())) { __ JumpIfSmi(input, false_label); @@ -4303,6 +4413,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStackCheck* instr_; }; diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h index 615632742..6037c0868 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.h +++ b/deps/v8/src/ia32/lithium-codegen-ia32.h @@ -58,6 +58,7 @@ class LCodeGen BASE_EMBEDDED { inlined_function_count_(0), scope_(info->scope()), status_(UNUSED), + dynamic_frame_alignment_(false), deferred_(8), osr_pc_offset_(-1), deoptimization_reloc_size(), @@ -133,6 +134,10 @@ class LCodeGen BASE_EMBEDDED { int strict_mode_flag() const { return info()->is_strict_mode() ? kStrictMode : kNonStrictMode; } + bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; } + void set_dynamic_frame_alignment(bool value) { + dynamic_frame_alignment_ = value; + } LChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } @@ -297,6 +302,7 @@ class LCodeGen BASE_EMBEDDED { int inlined_function_count_; Scope* const scope_; Status status_; + bool dynamic_frame_alignment_; TranslationBuffer translations_; ZoneList<LDeferredCode*> deferred_; int osr_pc_offset_; @@ -346,16 +352,20 @@ class LCodeGen BASE_EMBEDDED { class LDeferredCode: public ZoneObject { public: explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), external_exit_(NULL) { + : codegen_(codegen), + external_exit_(NULL), + instruction_index_(codegen->current_instruction_) { codegen->AddDeferredCode(this); } virtual ~LDeferredCode() { } virtual void Generate() = 0; + virtual LInstruction* instr() = 0; void SetExit(Label *exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } + int instruction_index() const { return instruction_index_; } protected: LCodeGen* codegen() const { return codegen_; } @@ -366,6 +376,7 @@ class LDeferredCode: public ZoneObject { Label entry_; Label exit_; Label* external_exit_; + int instruction_index_; }; } } // namespace v8::internal diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc index 3dc220d3d..856106c79 100644 --- a/deps/v8/src/ia32/lithium-ia32.cc +++ b/deps/v8/src/ia32/lithium-ia32.cc @@ -214,10 +214,11 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) { } -void LIsNullAndBranch::PrintDataTo(StringStream* stream) { +void LIsNilAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if "); InputAt(0)->PrintTo(stream); - stream->Add(is_strict() ? " === null" : " == null"); + stream->Add(kind() == kStrictEquality ? " === " : " == "); + stream->Add(nil() == kNullValue ? "null" : "undefined"); stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); } @@ -351,7 +352,11 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { int LChunk::GetNextSpillIndex(bool is_double) { // Skip a slot if for a double-width slot. - if (is_double) spill_slot_count_++; + if (is_double) { + spill_slot_count_ |= 1; // Make it odd, so incrementing makes it even. + spill_slot_count_++; + num_double_slots_++; + } return spill_slot_count_++; } @@ -707,7 +712,9 @@ LInstruction* LChunkBuilder::DefineFixedDouble( LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { HEnvironment* hydrogen_env = current_block_->last_environment(); - instr->set_environment(CreateEnvironment(hydrogen_env)); + int argument_index_accumulator = 0; + instr->set_environment(CreateEnvironment(hydrogen_env, + &argument_index_accumulator)); return instr; } @@ -994,10 +1001,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { } -LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { +LEnvironment* LChunkBuilder::CreateEnvironment( + HEnvironment* hydrogen_env, + int* argument_index_accumulator) { if (hydrogen_env == NULL) return NULL; - LEnvironment* outer = CreateEnvironment(hydrogen_env->outer()); + LEnvironment* outer = + CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator); int ast_id = hydrogen_env->ast_id(); ASSERT(ast_id != AstNode::kNoNumber); int value_count = hydrogen_env->length(); @@ -1007,7 +1017,6 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { argument_count_, value_count, outer); - int argument_index = 0; for (int i = 0; i < value_count; ++i) { if (hydrogen_env->is_special_index(i)) continue; @@ -1016,7 +1025,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { if (value->IsArgumentsObject()) { op = NULL; } else if (value->IsPushArgument()) { - op = new LArgument(argument_index++); + op = new LArgument((*argument_index_accumulator)++); } else { op = UseAny(value); } @@ -1471,10 +1480,10 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch( } -LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) { +LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) { // We only need a temp register for non-strict compare. - LOperand* temp = instr->is_strict() ? NULL : TempRegister(); - return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp); + LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister(); + return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp); } @@ -1683,7 +1692,13 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { - LOperand* value = UseAtStart(instr->value()); + // If the target is in new space, we'll emit a global cell compare and so + // want the value in a register. If the target gets promoted before we + // emit code, we will still get the register but will do an immediate + // compare instead of the cell compare. This is safe. + LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target()) + ? UseRegisterAtStart(instr->value()) + : UseAtStart(instr->value()); return AssignEnvironment(new LCheckFunction(value)); } @@ -1770,7 +1785,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { LLoadGlobalCell* result = new LLoadGlobalCell; - return instr->check_hole_value() + return instr->RequiresHoleCheck() ? AssignEnvironment(DefineAsRegister(result)) : DefineAsRegister(result); } @@ -1786,8 +1801,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { LStoreGlobalCell* result = - new LStoreGlobalCell(UseRegisterAtStart(instr->value())); - return instr->check_hole_value() ? AssignEnvironment(result) : result; + new LStoreGlobalCell(UseTempRegister(instr->value()), + TempRegister(), + TempRegister()); + return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; } @@ -1808,15 +1825,13 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* context; LOperand* value; LOperand* temp; + LOperand* context = UseRegister(instr->context()); if (instr->NeedsWriteBarrier()) { - context = UseTempRegister(instr->context()); value = UseTempRegister(instr->value()); temp = TempRegister(); } else { - context = UseRegister(instr->context()); value = UseRegister(instr->value()); temp = NULL; } @@ -1944,7 +1959,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement( ASSERT(instr->object()->representation().IsTagged()); ASSERT(instr->key()->representation().IsInteger32()); - LOperand* obj = UseTempRegister(instr->object()); + LOperand* obj = UseRegister(instr->object()); LOperand* val = needs_write_barrier ? UseTempRegister(instr->value()) : UseRegisterAtStart(instr->value()); @@ -2021,9 +2036,14 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { bool needs_write_barrier = instr->NeedsWriteBarrier(); - LOperand* obj = needs_write_barrier - ? UseTempRegister(instr->object()) - : UseRegisterAtStart(instr->object()); + LOperand* obj; + if (needs_write_barrier) { + obj = instr->is_in_object() + ? UseRegister(instr->object()) + : UseTempRegister(instr->object()); + } else { + obj = UseRegisterAtStart(instr->object()); + } LOperand* val = needs_write_barrier ? UseTempRegister(instr->value()) diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h index 038049ca0..3a06ac358 100644 --- a/deps/v8/src/ia32/lithium-ia32.h +++ b/deps/v8/src/ia32/lithium-ia32.h @@ -101,7 +101,7 @@ class LCodeGen; V(Integer32ToDouble) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ - V(IsNullAndBranch) \ + V(IsNilAndBranch) \ V(IsObjectAndBranch) \ V(IsSmiAndBranch) \ V(IsUndetectableAndBranch) \ @@ -615,17 +615,18 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> { }; -class LIsNullAndBranch: public LControlInstruction<1, 1> { +class LIsNilAndBranch: public LControlInstruction<1, 1> { public: - LIsNullAndBranch(LOperand* value, LOperand* temp) { + LIsNilAndBranch(LOperand* value, LOperand* temp) { inputs_[0] = value; temps_[0] = temp; } - DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch) + DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch) - bool is_strict() const { return hydrogen()->is_strict(); } + EqualityKind kind() const { return hydrogen()->kind(); } + NilValue nil() const { return hydrogen()->nil(); } virtual void PrintDataTo(StringStream* stream); }; @@ -1230,10 +1231,12 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> { }; -class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> { +class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> { public: - explicit LStoreGlobalCell(LOperand* value) { + explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) { inputs_[0] = value; + temps_[0] = temp1; + temps_[1] = temp2; } DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell") @@ -1798,6 +1801,8 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> { inputs_[0] = value; } + LOperand* value() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function") DECLARE_HYDROGEN_ACCESSOR(CheckFunction) }; @@ -2070,6 +2075,7 @@ class LChunk: public ZoneObject { graph_(graph), instructions_(32), pointer_maps_(8), + num_double_slots_(0), inlined_closures_(1) { } void AddInstruction(LInstruction* instruction, HBasicBlock* block); @@ -2083,6 +2089,8 @@ class LChunk: public ZoneObject { int ParameterAt(int index); int GetParameterStackSlot(int index) const; int spill_slot_count() const { return spill_slot_count_; } + int num_double_slots() const { return num_double_slots_; } + CompilationInfo* info() const { return info_; } HGraph* graph() const { return graph_; } const ZoneList<LInstruction*>* instructions() const { return &instructions_; } @@ -2124,6 +2132,7 @@ class LChunk: public ZoneObject { HGraph* const graph_; ZoneList<LInstruction*> instructions_; ZoneList<LPointerMap*> pointer_maps_; + int num_double_slots_; ZoneList<Handle<JSFunction> > inlined_closures_; }; @@ -2259,7 +2268,8 @@ class LChunkBuilder BASE_EMBEDDED { LInstruction* instr, int ast_id); void ClearInstructionPendingDeoptimizationEnvironment(); - LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env); + LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, + int* argument_index_accumulator); void VisitInstruction(HInstruction* current); diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index 837112a55..3aaa22acc 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -44,7 +44,8 @@ namespace internal { MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) : Assembler(arg_isolate, buffer, size), generating_stub_(false), - allow_stub_calls_(true) { + allow_stub_calls_(true), + has_frame_(false) { if (isolate() != NULL) { code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), isolate()); @@ -52,33 +53,75 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) } -void MacroAssembler::RecordWriteHelper(Register object, - Register addr, - Register scratch) { - if (emit_debug_code()) { - // Check that the object is not in new space. - Label not_in_new_space; - InNewSpace(object, scratch, not_equal, ¬_in_new_space); - Abort("new-space object passed to RecordWriteHelper"); - bind(¬_in_new_space); +void MacroAssembler::InNewSpace( + Register object, + Register scratch, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance) { + ASSERT(cc == equal || cc == not_equal); + if (scratch.is(object)) { + and_(scratch, Immediate(~Page::kPageAlignmentMask)); + } else { + mov(scratch, Immediate(~Page::kPageAlignmentMask)); + and_(scratch, object); } + // Check that we can use a test_b. + ASSERT(MemoryChunk::IN_FROM_SPACE < 8); + ASSERT(MemoryChunk::IN_TO_SPACE < 8); + int mask = (1 << MemoryChunk::IN_FROM_SPACE) + | (1 << MemoryChunk::IN_TO_SPACE); + // If non-zero, the page belongs to new-space. + test_b(Operand(scratch, MemoryChunk::kFlagsOffset), + static_cast<uint8_t>(mask)); + j(cc, condition_met, condition_met_distance); +} - // Compute the page start address from the heap object pointer, and reuse - // the 'object' register for it. - and_(object, ~Page::kPageAlignmentMask); - - // Compute number of region covering addr. See Page::GetRegionNumberForAddress - // method for more details. - shr(addr, Page::kRegionSizeLog2); - and_(addr, Page::kPageAlignmentMask >> Page::kRegionSizeLog2); - // Set dirty mark for region. - // Bit tests with a memory operand should be avoided on Intel processors, - // as they usually have long latency and multiple uops. We load the bit base - // operand to a register at first and store it back after bit set. - mov(scratch, Operand(object, Page::kDirtyFlagOffset)); - bts(Operand(scratch), addr); - mov(Operand(object, Page::kDirtyFlagOffset), scratch); +void MacroAssembler::RememberedSetHelper( + Register object, // Only used for debug checks. + Register addr, + Register scratch, + SaveFPRegsMode save_fp, + MacroAssembler::RememberedSetFinalAction and_then) { + Label done; + if (FLAG_debug_code) { + Label ok; + JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear); + int3(); + bind(&ok); + } + // Load store buffer top. + ExternalReference store_buffer = + ExternalReference::store_buffer_top(isolate()); + mov(scratch, Operand::StaticVariable(store_buffer)); + // Store pointer to buffer. + mov(Operand(scratch, 0), addr); + // Increment buffer top. + add(scratch, Immediate(kPointerSize)); + // Write back new top of buffer. + mov(Operand::StaticVariable(store_buffer), scratch); + // Call stub on end of buffer. + // Check for end of buffer. + test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); + if (and_then == kReturnAtEnd) { + Label buffer_overflowed; + j(not_equal, &buffer_overflowed, Label::kNear); + ret(0); + bind(&buffer_overflowed); + } else { + ASSERT(and_then == kFallThroughAtEnd); + j(equal, &done, Label::kNear); + } + StoreBufferOverflowStub store_buffer_overflow = + StoreBufferOverflowStub(save_fp); + CallStub(&store_buffer_overflow); + if (and_then == kReturnAtEnd) { + ret(0); + } else { + ASSERT(and_then == kFallThroughAtEnd); + bind(&done); + } } @@ -112,100 +155,144 @@ void MacroAssembler::ClampUint8(Register reg) { } -void MacroAssembler::InNewSpace(Register object, - Register scratch, - Condition cc, - Label* branch, - Label::Distance branch_near) { - ASSERT(cc == equal || cc == not_equal); - if (Serializer::enabled()) { - // Can't do arithmetic on external references if it might get serialized. - mov(scratch, Operand(object)); - // The mask isn't really an address. We load it as an external reference in - // case the size of the new space is different between the snapshot maker - // and the running system. - and_(Operand(scratch), - Immediate(ExternalReference::new_space_mask(isolate()))); - cmp(Operand(scratch), - Immediate(ExternalReference::new_space_start(isolate()))); - j(cc, branch, branch_near); - } else { - int32_t new_space_start = reinterpret_cast<int32_t>( - ExternalReference::new_space_start(isolate()).address()); - lea(scratch, Operand(object, -new_space_start)); - and_(scratch, isolate()->heap()->NewSpaceMask()); - j(cc, branch, branch_near); +void MacroAssembler::RecordWriteArray(Register object, + Register value, + Register index, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { + // First, check if a write barrier is even needed. The tests below + // catch stores of Smis. + Label done; + + // Skip barrier if writing a smi. + if (smi_check == INLINE_SMI_CHECK) { + ASSERT_EQ(0, kSmiTag); + test(value, Immediate(kSmiTagMask)); + j(zero, &done); + } + + // Array access: calculate the destination address in the same manner as + // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset + // into an array of words. + Register dst = index; + lea(dst, Operand(object, index, times_half_pointer_size, + FixedArray::kHeaderSize - kHeapObjectTag)); + + RecordWrite( + object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK); + + bind(&done); + + // Clobber clobbered input registers when running with the debug-code flag + // turned on to provoke errors. + if (emit_debug_code()) { + mov(value, Immediate(BitCast<int32_t>(kZapValue))); + mov(index, Immediate(BitCast<int32_t>(kZapValue))); } } -void MacroAssembler::RecordWrite(Register object, - int offset, - Register value, - Register scratch) { +void MacroAssembler::RecordWriteField( + Register object, + int offset, + Register value, + Register dst, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { // First, check if a write barrier is even needed. The tests below - // catch stores of Smis and stores into young gen. + // catch stores of Smis. Label done; // Skip barrier if writing a smi. - STATIC_ASSERT(kSmiTag == 0); - JumpIfSmi(value, &done, Label::kNear); - - InNewSpace(object, value, equal, &done, Label::kNear); + if (smi_check == INLINE_SMI_CHECK) { + JumpIfSmi(value, &done, Label::kNear); + } - // The offset is relative to a tagged or untagged HeapObject pointer, - // so either offset or offset + kHeapObjectTag must be a - // multiple of kPointerSize. - ASSERT(IsAligned(offset, kPointerSize) || - IsAligned(offset + kHeapObjectTag, kPointerSize)); + // Although the object register is tagged, the offset is relative to the start + // of the object, so so offset must be a multiple of kPointerSize. + ASSERT(IsAligned(offset, kPointerSize)); - Register dst = scratch; - if (offset != 0) { - lea(dst, Operand(object, offset)); - } else { - // Array access: calculate the destination address in the same manner as - // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset - // into an array of words. - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); - lea(dst, Operand(object, dst, times_half_pointer_size, - FixedArray::kHeaderSize - kHeapObjectTag)); + lea(dst, FieldOperand(object, offset)); + if (emit_debug_code()) { + Label ok; + test_b(dst, (1 << kPointerSizeLog2) - 1); + j(zero, &ok, Label::kNear); + int3(); + bind(&ok); } - RecordWriteHelper(object, dst, value); + + RecordWrite( + object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK); bind(&done); - // Clobber all input registers when running with the debug-code flag + // Clobber clobbered input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - mov(object, Immediate(BitCast<int32_t>(kZapValue))); mov(value, Immediate(BitCast<int32_t>(kZapValue))); - mov(scratch, Immediate(BitCast<int32_t>(kZapValue))); + mov(dst, Immediate(BitCast<int32_t>(kZapValue))); } } void MacroAssembler::RecordWrite(Register object, Register address, - Register value) { + Register value, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { + ASSERT(!object.is(value)); + ASSERT(!object.is(address)); + ASSERT(!value.is(address)); + if (emit_debug_code()) { + AbortIfSmi(object); + } + + if (remembered_set_action == OMIT_REMEMBERED_SET && + !FLAG_incremental_marking) { + return; + } + + if (FLAG_debug_code) { + Label ok; + cmp(value, Operand(address, 0)); + j(equal, &ok, Label::kNear); + int3(); + bind(&ok); + } + // First, check if a write barrier is even needed. The tests below // catch stores of Smis and stores into young gen. Label done; - // Skip barrier if writing a smi. - STATIC_ASSERT(kSmiTag == 0); - JumpIfSmi(value, &done, Label::kNear); - - InNewSpace(object, value, equal, &done); - - RecordWriteHelper(object, address, value); + if (smi_check == INLINE_SMI_CHECK) { + // Skip barrier if writing a smi. + JumpIfSmi(value, &done, Label::kNear); + } + + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + zero, + &done, + Label::kNear); + CheckPageFlag(object, + value, // Used as scratch. + MemoryChunk::kPointersFromHereAreInterestingMask, + zero, + &done, + Label::kNear); + + RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); + CallStub(&stub); bind(&done); - // Clobber all input registers when running with the debug-code flag + // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - mov(object, Immediate(BitCast<int32_t>(kZapValue))); mov(address, Immediate(BitCast<int32_t>(kZapValue))); mov(value, Immediate(BitCast<int32_t>(kZapValue))); } @@ -224,7 +311,7 @@ void MacroAssembler::DebugBreak() { void MacroAssembler::Set(Register dst, const Immediate& x) { if (x.is_zero()) { - xor_(dst, Operand(dst)); // Shorter than mov. + xor_(dst, dst); // Shorter than mov. } else { mov(dst, x); } @@ -287,13 +374,111 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { void MacroAssembler::CheckFastElements(Register map, Label* fail, Label::Distance distance) { - STATIC_ASSERT(FAST_ELEMENTS == 0); + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); + cmpb(FieldOperand(map, Map::kBitField2Offset), + Map::kMaximumBitField2FastElementValue); + j(above, fail, distance); +} + + +void MacroAssembler::CheckFastObjectElements(Register map, + Label* fail, + Label::Distance distance) { + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); + cmpb(FieldOperand(map, Map::kBitField2Offset), + Map::kMaximumBitField2FastSmiOnlyElementValue); + j(below_equal, fail, distance); cmpb(FieldOperand(map, Map::kBitField2Offset), Map::kMaximumBitField2FastElementValue); j(above, fail, distance); } +void MacroAssembler::CheckFastSmiOnlyElements(Register map, + Label* fail, + Label::Distance distance) { + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + cmpb(FieldOperand(map, Map::kBitField2Offset), + Map::kMaximumBitField2FastSmiOnlyElementValue); + j(above, fail, distance); +} + + +void MacroAssembler::StoreNumberToDoubleElements( + Register maybe_number, + Register elements, + Register key, + Register scratch1, + XMMRegister scratch2, + Label* fail, + bool specialize_for_processor) { + Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value; + JumpIfSmi(maybe_number, &smi_value, Label::kNear); + + CheckMap(maybe_number, + isolate()->factory()->heap_number_map(), + fail, + DONT_DO_SMI_CHECK); + + // Double value, canonicalize NaN. + uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); + cmp(FieldOperand(maybe_number, offset), + Immediate(kNaNOrInfinityLowerBoundUpper32)); + j(greater_equal, &maybe_nan, Label::kNear); + + bind(¬_nan); + ExternalReference canonical_nan_reference = + ExternalReference::address_of_canonical_non_hole_nan(); + if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { + CpuFeatures::Scope use_sse2(SSE2); + movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); + bind(&have_double_value); + movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize), + scratch2); + } else { + fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset)); + bind(&have_double_value); + fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize)); + } + jmp(&done); + + bind(&maybe_nan); + // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise + // it's an Infinity, and the non-NaN code path applies. + j(greater, &is_nan, Label::kNear); + cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); + j(zero, ¬_nan); + bind(&is_nan); + if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { + CpuFeatures::Scope use_sse2(SSE2); + movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference)); + } else { + fld_d(Operand::StaticVariable(canonical_nan_reference)); + } + jmp(&have_double_value, Label::kNear); + + bind(&smi_value); + // Value is a smi. Convert to a double and store. + // Preserve original value. + mov(scratch1, maybe_number); + SmiUntag(scratch1); + if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { + CpuFeatures::Scope fscope(SSE2); + cvtsi2sd(scratch2, scratch1); + movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize), + scratch2); + } else { + push(scratch1); + fild_s(Operand(esp, 0)); + pop(scratch1); + fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize)); + } + bind(&done); +} + + void MacroAssembler::CheckMap(Register obj, Handle<Map> map, Label* fail, @@ -345,7 +530,7 @@ void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch, Label* fail) { movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset)); - sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); cmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); j(above, fail); @@ -402,7 +587,7 @@ void MacroAssembler::AbortIfSmi(Register object) { void MacroAssembler::EnterFrame(StackFrame::Type type) { push(ebp); - mov(ebp, Operand(esp)); + mov(ebp, esp); push(esi); push(Immediate(Smi::FromInt(type))); push(Immediate(CodeObject())); @@ -429,7 +614,7 @@ void MacroAssembler::EnterExitFramePrologue() { ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); push(ebp); - mov(ebp, Operand(esp)); + mov(ebp, esp); // Reserve room for entry stack pointer and push the code object. ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); @@ -451,14 +636,14 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { if (save_doubles) { CpuFeatures::Scope scope(SSE2); int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; - sub(Operand(esp), Immediate(space)); + sub(esp, Immediate(space)); const int offset = -2 * kPointerSize; for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); } } else { - sub(Operand(esp), Immediate(argc * kPointerSize)); + sub(esp, Immediate(argc * kPointerSize)); } // Get the required frame alignment for the OS. @@ -478,7 +663,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) { // Setup argc and argv in callee-saved registers. int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; - mov(edi, Operand(eax)); + mov(edi, eax); lea(esi, Operand(ebp, eax, times_4, offset)); // Reserve space for argc, argv and isolate. @@ -532,7 +717,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() { void MacroAssembler::LeaveApiExitFrame() { - mov(esp, Operand(ebp)); + mov(esp, ebp); pop(ebp); LeaveExitFrameEpilogue(); @@ -580,7 +765,7 @@ void MacroAssembler::PopTryHandler() { STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); pop(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress, isolate()))); - add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize)); + add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize)); } @@ -612,7 +797,7 @@ void MacroAssembler::Throw(Register value) { // (edx == ENTRY) == (ebp == 0) == (esi == 0), so we could test any // of them. Label skip; - cmp(Operand(edx), Immediate(StackHandler::ENTRY)); + cmp(edx, Immediate(StackHandler::ENTRY)); j(equal, &skip, Label::kNear); mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); bind(&skip); @@ -696,7 +881,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, // When generating debug code, make sure the lexical context is set. if (emit_debug_code()) { - cmp(Operand(scratch), Immediate(0)); + cmp(scratch, Immediate(0)); Check(not_equal, "we should not have an empty lexical context"); } // Load the global context of the current context. @@ -784,23 +969,23 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, mov(r1, r0); not_(r0); shl(r1, 15); - add(r0, Operand(r1)); + add(r0, r1); // hash = hash ^ (hash >> 12); mov(r1, r0); shr(r1, 12); - xor_(r0, Operand(r1)); + xor_(r0, r1); // hash = hash + (hash << 2); lea(r0, Operand(r0, r0, times_4, 0)); // hash = hash ^ (hash >> 4); mov(r1, r0); shr(r1, 4); - xor_(r0, Operand(r1)); + xor_(r0, r1); // hash = hash * 2057; imul(r0, r0, 2057); // hash = hash ^ (hash >> 16); mov(r1, r0); shr(r1, 16); - xor_(r0, Operand(r1)); + xor_(r0, r1); // Compute capacity mask. mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset)); @@ -814,9 +999,9 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, mov(r2, r0); // Compute the masked index: (hash + i + i * i) & mask. if (i > 0) { - add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i))); + add(r2, Immediate(NumberDictionary::GetProbeOffset(i))); } - and_(r2, Operand(r1)); + and_(r2, r1); // Scale the index by multiplying by the entry size. ASSERT(NumberDictionary::kEntrySize == 3); @@ -872,7 +1057,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result, if (scratch.is(no_reg)) { mov(result, Operand::StaticVariable(new_space_allocation_top)); } else { - mov(Operand(scratch), Immediate(new_space_allocation_top)); + mov(scratch, Immediate(new_space_allocation_top)); mov(result, Operand(scratch, 0)); } } @@ -931,7 +1116,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size, if (!top_reg.is(result)) { mov(top_reg, result); } - add(Operand(top_reg), Immediate(object_size)); + add(top_reg, Immediate(object_size)); j(carry, gc_required); cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit)); j(above, gc_required); @@ -942,12 +1127,12 @@ void MacroAssembler::AllocateInNewSpace(int object_size, // Tag result if requested. if (top_reg.is(result)) { if ((flags & TAG_OBJECT) != 0) { - sub(Operand(result), Immediate(object_size - kHeapObjectTag)); + sub(result, Immediate(object_size - kHeapObjectTag)); } else { - sub(Operand(result), Immediate(object_size)); + sub(result, Immediate(object_size)); } } else if ((flags & TAG_OBJECT) != 0) { - add(Operand(result), Immediate(kHeapObjectTag)); + add(result, Immediate(kHeapObjectTag)); } } @@ -985,7 +1170,7 @@ void MacroAssembler::AllocateInNewSpace(int header_size, // We assume that element_count*element_size + header_size does not // overflow. lea(result_end, Operand(element_count, element_size, header_size)); - add(result_end, Operand(result)); + add(result_end, result); j(carry, gc_required); cmp(result_end, Operand::StaticVariable(new_space_allocation_limit)); j(above, gc_required); @@ -1030,7 +1215,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, if (!object_size.is(result_end)) { mov(result_end, object_size); } - add(result_end, Operand(result)); + add(result_end, result); j(carry, gc_required); cmp(result_end, Operand::StaticVariable(new_space_allocation_limit)); j(above, gc_required); @@ -1050,7 +1235,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) { ExternalReference::new_space_allocation_top_address(isolate()); // Make sure the object has no tag before resetting top. - and_(Operand(object), Immediate(~kHeapObjectTagMask)); + and_(object, Immediate(~kHeapObjectTagMask)); #ifdef DEBUG cmp(object, Operand::StaticVariable(new_space_allocation_top)); Check(below, "Undo allocation of non allocated memory"); @@ -1089,7 +1274,7 @@ void MacroAssembler::AllocateTwoByteString(Register result, ASSERT(kShortSize == 2); // scratch1 = length * 2 + kObjectAlignmentMask. lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask)); - and_(Operand(scratch1), Immediate(~kObjectAlignmentMask)); + and_(scratch1, Immediate(~kObjectAlignmentMask)); // Allocate two byte string in new space. AllocateInNewSpace(SeqTwoByteString::kHeaderSize, @@ -1123,8 +1308,8 @@ void MacroAssembler::AllocateAsciiString(Register result, ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); mov(scratch1, length); ASSERT(kCharSize == 1); - add(Operand(scratch1), Immediate(kObjectAlignmentMask)); - and_(Operand(scratch1), Immediate(~kObjectAlignmentMask)); + add(scratch1, Immediate(kObjectAlignmentMask)); + and_(scratch1, Immediate(~kObjectAlignmentMask)); // Allocate ascii string in new space. AllocateInNewSpace(SeqAsciiString::kHeaderSize, @@ -1258,7 +1443,7 @@ void MacroAssembler::CopyBytes(Register source, Register scratch) { Label loop, done, short_string, short_loop; // Experimentation shows that the short string loop is faster if length < 10. - cmp(Operand(length), Immediate(10)); + cmp(length, Immediate(10)); j(less_equal, &short_string); ASSERT(source.is(esi)); @@ -1273,12 +1458,12 @@ void MacroAssembler::CopyBytes(Register source, mov(scratch, ecx); shr(ecx, 2); rep_movs(); - and_(Operand(scratch), Immediate(0x3)); - add(destination, Operand(scratch)); + and_(scratch, Immediate(0x3)); + add(destination, scratch); jmp(&done); bind(&short_string); - test(length, Operand(length)); + test(length, length); j(zero, &done); bind(&short_loop); @@ -1293,13 +1478,27 @@ void MacroAssembler::CopyBytes(Register source, } +void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler) { + Label loop, entry; + jmp(&entry); + bind(&loop); + mov(Operand(start_offset, 0), filler); + add(start_offset, Immediate(kPointerSize)); + bind(&entry); + cmp(start_offset, end_offset); + j(less, &loop); +} + + void MacroAssembler::NegativeZeroTest(Register result, Register op, Label* then_label) { Label ok; - test(result, Operand(result)); + test(result, result); j(not_zero, &ok); - test(op, Operand(op)); + test(op, op); j(sign, then_label); bind(&ok); } @@ -1311,10 +1510,10 @@ void MacroAssembler::NegativeZeroTest(Register result, Register scratch, Label* then_label) { Label ok; - test(result, Operand(result)); + test(result, result); j(not_zero, &ok); - mov(scratch, Operand(op1)); - or_(scratch, Operand(op2)); + mov(scratch, op1); + or_(scratch, op2); j(sign, then_label); bind(&ok); } @@ -1344,7 +1543,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, // If the prototype or initial map is the hole, don't return it and // simply miss the cache instead. This will allow us to allocate a // prototype object on-demand in the runtime system. - cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value())); + cmp(result, Immediate(isolate()->factory()->the_hole_value())); j(equal, miss); // If the function does not have an initial map, we're done. @@ -1367,13 +1566,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) { - ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. + ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs. call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); } MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. + ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -1384,13 +1583,12 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) { void MacroAssembler::TailCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. + ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); jmp(stub->GetCode(), RelocInfo::CODE_TARGET); } MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -1406,9 +1604,15 @@ void MacroAssembler::StubReturn(int argc) { } +bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { + if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; + return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); +} + + void MacroAssembler::IllegalOperation(int num_arguments) { if (num_arguments > 0) { - add(Operand(esp), Immediate(num_arguments * kPointerSize)); + add(esp, Immediate(num_arguments * kPointerSize)); } mov(eax, Immediate(isolate()->factory()->undefined_value())); } @@ -1442,8 +1646,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); Set(eax, Immediate(function->nargs)); mov(ebx, Immediate(ExternalReference(function, isolate()))); - CEntryStub ces(1); - ces.SaveDoubles(); + CEntryStub ces(1, kSaveFPRegs); CallStub(&ces); } @@ -1623,7 +1826,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function, Label leave_exit_frame; // Check if the result handle holds 0. - test(eax, Operand(eax)); + test(eax, eax); j(zero, &empty_handle); // It was non-zero. Dereference to get the result value. mov(eax, Operand(eax, 0)); @@ -1664,7 +1867,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function, mov(edi, eax); mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address())); mov(eax, Immediate(delete_extensions)); - call(Operand(eax)); + call(eax); mov(eax, edi); jmp(&leave_exit_frame); @@ -1698,10 +1901,10 @@ void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) { if (call_kind == CALL_AS_FUNCTION) { // Set to some non-zero smi by updating the least significant // byte. - mov_b(Operand(dst), 1 << kSmiTagSize); + mov_b(dst, 1 << kSmiTagSize); } else { // Set to smi zero by clearing the register. - xor_(dst, Operand(dst)); + xor_(dst, dst); } } @@ -1746,7 +1949,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } else if (!expected.reg().is(actual.reg())) { // Both expected and actual are in (different) registers. This // is the case when we invoke functions using call and apply. - cmp(expected.reg(), Operand(actual.reg())); + cmp(expected.reg(), actual.reg()); j(equal, &invoke); ASSERT(actual.reg().is(eax)); ASSERT(expected.reg().is(ebx)); @@ -1758,7 +1961,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, isolate()->builtins()->ArgumentsAdaptorTrampoline(); if (!code_constant.is_null()) { mov(edx, Immediate(code_constant)); - add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); + add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); } else if (!code_operand.is_reg(edx)) { mov(edx, code_operand); } @@ -1784,6 +1987,9 @@ void MacroAssembler::InvokeCode(const Operand& code, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + Label done; InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, Label::kNear, call_wrapper, @@ -1809,8 +2015,11 @@ void MacroAssembler::InvokeCode(Handle<Code> code, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + Label done; - Operand dummy(eax); + Operand dummy(eax, 0); InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear, call_wrapper, call_kind); if (flag == CALL_FUNCTION) { @@ -1832,6 +2041,9 @@ void MacroAssembler::InvokeFunction(Register fun, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + ASSERT(fun.is(edi)); mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); @@ -1849,6 +2061,9 @@ void MacroAssembler::InvokeFunction(JSFunction* function, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + ASSERT(function->is_compiled()); // Get the function and setup the context. mov(edi, Immediate(Handle<JSFunction>(function))); @@ -1872,8 +2087,8 @@ void MacroAssembler::InvokeFunction(JSFunction* function, void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper& call_wrapper) { - // Calls are not allowed in some stubs. - ASSERT(flag == JUMP_FUNCTION || allow_stub_calls()); + // You can't call a builtin without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); // Rely on the assertion to check that the number of provided // arguments match the expected number of arguments. Fake a @@ -1884,6 +2099,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, expected, expected, flag, call_wrapper, CALL_AS_METHOD); } + void MacroAssembler::GetBuiltinFunction(Register target, Builtins::JavaScript id) { // Load the JavaScript builtin function from the builtins object. @@ -1893,6 +2109,7 @@ void MacroAssembler::GetBuiltinFunction(Register target, JSBuiltinsObject::OffsetOfFunctionWithId(id))); } + void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { ASSERT(!target.is(edi)); // Load the JavaScript builtin function from the builtins object. @@ -1994,7 +2211,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) { ret(bytes_dropped); } else { pop(scratch); - add(Operand(esp), Immediate(bytes_dropped)); + add(esp, Immediate(bytes_dropped)); push(scratch); ret(0); } @@ -2005,7 +2222,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) { void MacroAssembler::Drop(int stack_elements) { if (stack_elements > 0) { - add(Operand(esp), Immediate(stack_elements * kPointerSize)); + add(esp, Immediate(stack_elements * kPointerSize)); } } @@ -2148,13 +2365,19 @@ void MacroAssembler::Abort(const char* msg) { RecordComment(msg); } #endif - // Disable stub call restrictions to always allow calls to abort. - AllowStubCallsScope allow_scope(this, true); push(eax); push(Immediate(p0)); push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0)))); - CallRuntime(Runtime::kAbort, 2); + // Disable stub call restrictions to always allow calls to abort. + if (!has_frame_) { + // We don't actually want to generate a pile of code for this, so just + // claim there is a stack frame, without generating one. + FrameScope scope(this, StackFrame::NONE); + CallRuntime(Runtime::kAbort, 2); + } else { + CallRuntime(Runtime::kAbort, 2); + } // will not return here int3(); } @@ -2177,7 +2400,7 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst, ASSERT(is_uintn(power + HeapNumber::kExponentBias, HeapNumber::kExponentBits)); mov(scratch, Immediate(power + HeapNumber::kExponentBias)); - movd(dst, Operand(scratch)); + movd(dst, scratch); psllq(dst, HeapNumber::kMantissaBits); } @@ -2203,8 +2426,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1, Label* failure) { // Check that both objects are not smis. STATIC_ASSERT(kSmiTag == 0); - mov(scratch1, Operand(object1)); - and_(scratch1, Operand(object2)); + mov(scratch1, object1); + and_(scratch1, object2); JumpIfSmi(scratch1, failure); // Load instance type for both strings. @@ -2233,12 +2456,12 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { // Make stack end at alignment and make room for num_arguments words // and the original value of esp. mov(scratch, esp); - sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize)); + sub(esp, Immediate((num_arguments + 1) * kPointerSize)); ASSERT(IsPowerOf2(frame_alignment)); and_(esp, -frame_alignment); mov(Operand(esp, num_arguments * kPointerSize), scratch); } else { - sub(Operand(esp), Immediate(num_arguments * kPointerSize)); + sub(esp, Immediate(num_arguments * kPointerSize)); } } @@ -2246,27 +2469,39 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { // Trashing eax is ok as it will be the return value. - mov(Operand(eax), Immediate(function)); + mov(eax, Immediate(function)); CallCFunction(eax, num_arguments); } void MacroAssembler::CallCFunction(Register function, int num_arguments) { + ASSERT(has_frame()); // Check stack alignment. if (emit_debug_code()) { CheckStackAlignment(); } - call(Operand(function)); + call(function); if (OS::ActivationFrameAlignment() != 0) { mov(esp, Operand(esp, num_arguments * kPointerSize)); } else { - add(Operand(esp), Immediate(num_arguments * kPointerSize)); + add(esp, Immediate(num_arguments * kPointerSize)); } } +bool AreAliased(Register r1, Register r2, Register r3, Register r4) { + if (r1.is(r2)) return true; + if (r1.is(r3)) return true; + if (r1.is(r4)) return true; + if (r2.is(r3)) return true; + if (r2.is(r4)) return true; + if (r3.is(r4)) return true; + return false; +} + + CodePatcher::CodePatcher(byte* address, int size) : address_(address), size_(size), @@ -2288,6 +2523,198 @@ CodePatcher::~CodePatcher() { } +void MacroAssembler::CheckPageFlag( + Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance) { + ASSERT(cc == zero || cc == not_zero); + if (scratch.is(object)) { + and_(scratch, Immediate(~Page::kPageAlignmentMask)); + } else { + mov(scratch, Immediate(~Page::kPageAlignmentMask)); + and_(scratch, object); + } + if (mask < (1 << kBitsPerByte)) { + test_b(Operand(scratch, MemoryChunk::kFlagsOffset), + static_cast<uint8_t>(mask)); + } else { + test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); + } + j(cc, condition_met, condition_met_distance); +} + + +void MacroAssembler::JumpIfBlack(Register object, + Register scratch0, + Register scratch1, + Label* on_black, + Label::Distance on_black_near) { + HasColor(object, scratch0, scratch1, + on_black, on_black_near, + 1, 0); // kBlackBitPattern. + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); +} + + +void MacroAssembler::HasColor(Register object, + Register bitmap_scratch, + Register mask_scratch, + Label* has_color, + Label::Distance has_color_distance, + int first_bit, + int second_bit) { + ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx)); + + GetMarkBits(object, bitmap_scratch, mask_scratch); + + Label other_color, word_boundary; + test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear); + add(mask_scratch, mask_scratch); // Shift left 1 by adding. + j(zero, &word_boundary, Label::kNear); + test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance); + jmp(&other_color, Label::kNear); + + bind(&word_boundary); + test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1); + + j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance); + bind(&other_color); +} + + +void MacroAssembler::GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg) { + ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx)); + mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); + and_(bitmap_reg, addr_reg); + mov(ecx, addr_reg); + int shift = + Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; + shr(ecx, shift); + and_(ecx, + (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1)); + + add(bitmap_reg, ecx); + mov(ecx, addr_reg); + shr(ecx, kPointerSizeLog2); + and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1); + mov(mask_reg, Immediate(1)); + shl_cl(mask_reg); +} + + +void MacroAssembler::EnsureNotWhite( + Register value, + Register bitmap_scratch, + Register mask_scratch, + Label* value_is_white_and_not_data, + Label::Distance distance) { + ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx)); + GetMarkBits(value, bitmap_scratch, mask_scratch); + + // If the value is black or grey we don't need to do anything. + ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); + ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + + Label done; + + // Since both black and grey have a 1 in the first position and white does + // not have a 1 there we only need to check one bit. + test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + j(not_zero, &done, Label::kNear); + + if (FLAG_debug_code) { + // Check for impossible bit pattern. + Label ok; + push(mask_scratch); + // shl. May overflow making the check conservative. + add(mask_scratch, mask_scratch); + test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + j(zero, &ok, Label::kNear); + int3(); + bind(&ok); + pop(mask_scratch); + } + + // Value is white. We check whether it is data that doesn't need scanning. + // Currently only checks for HeapNumber and non-cons strings. + Register map = ecx; // Holds map while checking type. + Register length = ecx; // Holds length of object after checking type. + Label not_heap_number; + Label is_data_object; + + // Check for heap-number + mov(map, FieldOperand(value, HeapObject::kMapOffset)); + cmp(map, FACTORY->heap_number_map()); + j(not_equal, ¬_heap_number, Label::kNear); + mov(length, Immediate(HeapNumber::kSize)); + jmp(&is_data_object, Label::kNear); + + bind(¬_heap_number); + // Check for strings. + ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + // If it's a string and it's not a cons string then it's an object containing + // no GC pointers. + Register instance_type = ecx; + movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); + test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask); + j(not_zero, value_is_white_and_not_data); + // It's a non-indirect (non-cons and non-slice) string. + // If it's external, the length is just ExternalString::kSize. + // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). + Label not_external; + // External strings are the only ones with the kExternalStringTag bit + // set. + ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); + ASSERT_EQ(0, kConsStringTag & kExternalStringTag); + test_b(instance_type, kExternalStringTag); + j(zero, ¬_external, Label::kNear); + mov(length, Immediate(ExternalString::kSize)); + jmp(&is_data_object, Label::kNear); + + bind(¬_external); + // Sequential string, either ASCII or UC16. + ASSERT(kAsciiStringTag == 0x04); + and_(length, Immediate(kStringEncodingMask)); + xor_(length, Immediate(kStringEncodingMask)); + add(length, Immediate(0x04)); + // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted + // by 2. If we multiply the string length as smi by this, it still + // won't overflow a 32-bit value. + ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize); + ASSERT(SeqAsciiString::kMaxSize <= + static_cast<int>(0xffffffffu >> (2 + kSmiTagSize))); + imul(length, FieldOperand(value, String::kLengthOffset)); + shr(length, 2 + kSmiTagSize + kSmiShiftSize); + add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); + and_(length, Immediate(~kObjectAlignmentMask)); + + bind(&is_data_object); + // Value is a data object, and it is white. Mark it black. Since we know + // that the object is white we can make it black by flipping one bit. + or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); + + and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask)); + add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), + length); + if (FLAG_debug_code) { + mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); + cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset)); + Check(less_equal, "Live Bytes Count overflow chunk size"); + } + + bind(&done); +} + } } // namespace v8::internal #endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index 1906644c3..a1b42c280 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -29,6 +29,7 @@ #define V8_IA32_MACRO_ASSEMBLER_IA32_H_ #include "assembler.h" +#include "frames.h" #include "v8globals.h" namespace v8 { @@ -50,6 +51,13 @@ enum AllocationFlags { // distinguish memory operands from other operands on ia32. typedef Operand MemOperand; +enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; +enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; + + +bool AreAliased(Register r1, Register r2, Register r3, Register r4); + + // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: @@ -61,42 +69,130 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // GC Support + enum RememberedSetFinalAction { + kReturnAtEnd, + kFallThroughAtEnd + }; + + // Record in the remembered set the fact that we have a pointer to new space + // at the address pointed to by the addr register. Only works if addr is not + // in new space. + void RememberedSetHelper(Register object, // Used for debug code. + Register addr, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetFinalAction and_then); + + void CheckPageFlag(Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance = Label::kFar); + + // Check if object is in new space. Jumps if the object is not in new space. + // The register scratch can be object itself, but scratch will be clobbered. + void JumpIfNotInNewSpace(Register object, + Register scratch, + Label* branch, + Label::Distance distance = Label::kFar) { + InNewSpace(object, scratch, zero, branch, distance); + } - // For page containing |object| mark region covering |addr| dirty. - // RecordWriteHelper only works if the object is not in new - // space. - void RecordWriteHelper(Register object, - Register addr, - Register scratch); + // Check if object is in new space. Jumps if the object is in new space. + // The register scratch can be object itself, but it will be clobbered. + void JumpIfInNewSpace(Register object, + Register scratch, + Label* branch, + Label::Distance distance = Label::kFar) { + InNewSpace(object, scratch, not_zero, branch, distance); + } - // Check if object is in new space. - // scratch can be object itself, but it will be clobbered. - void InNewSpace(Register object, - Register scratch, - Condition cc, // equal for new space, not_equal otherwise. - Label* branch, - Label::Distance branch_near = Label::kFar); + // Check if an object has a given incremental marking color. Also uses ecx! + void HasColor(Register object, + Register scratch0, + Register scratch1, + Label* has_color, + Label::Distance has_color_distance, + int first_bit, + int second_bit); + + void JumpIfBlack(Register object, + Register scratch0, + Register scratch1, + Label* on_black, + Label::Distance on_black_distance = Label::kFar); + + // Checks the color of an object. If the object is already grey or black + // then we just fall through, since it is already live. If it is white and + // we can determine that it doesn't need to be scanned, then we just mark it + // black and fall through. For the rest we jump to the label so the + // incremental marker can fix its assumptions. + void EnsureNotWhite(Register object, + Register scratch1, + Register scratch2, + Label* object_is_white_and_not_data, + Label::Distance distance); + + // Notify the garbage collector that we wrote a pointer into an object. + // |object| is the object being stored into, |value| is the object being + // stored. value and scratch registers are clobbered by the operation. + // The offset is the offset from the start of the object, not the offset from + // the tagged HeapObject pointer. For use with FieldOperand(reg, off). + void RecordWriteField( + Register object, + int offset, + Register value, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); + + // As above, but the offset has the tag presubtracted. For use with + // Operand(reg, off). + void RecordWriteContextSlot( + Register context, + int offset, + Register value, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK) { + RecordWriteField(context, + offset + kHeapObjectTag, + value, + scratch, + save_fp, + remembered_set_action, + smi_check); + } - // For page containing |object| mark region covering [object+offset] - // dirty. |object| is the object being stored into, |value| is the - // object being stored. If offset is zero, then the scratch register - // contains the array index into the elements array represented as a - // Smi. All registers are clobbered by the operation. RecordWrite + // Notify the garbage collector that we wrote a pointer into a fixed array. + // |array| is the array being stored into, |value| is the + // object being stored. |index| is the array index represented as a + // Smi. All registers are clobbered by the operation RecordWriteArray // filters out smis so it does not update the write barrier if the // value is a smi. - void RecordWrite(Register object, - int offset, - Register value, - Register scratch); + void RecordWriteArray( + Register array, + Register value, + Register index, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); // For page containing |object| mark region covering |address| // dirty. |object| is the object being stored into, |value| is the - // object being stored. All registers are clobbered by the + // object being stored. The address and value registers are clobbered by the // operation. RecordWrite filters out smis so it does not update the // write barrier if the value is a smi. - void RecordWrite(Register object, - Register address, - Register value); + void RecordWrite( + Register object, + Register address, + Register value, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); #ifdef ENABLE_DEBUGGER_SUPPORT // --------------------------------------------------------------------------- @@ -105,15 +201,6 @@ class MacroAssembler: public Assembler { void DebugBreak(); #endif - // --------------------------------------------------------------------------- - // Activation frames - - void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } - void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } - - void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } - void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } - // Enter specific kind of exit frame. Expects the number of // arguments in register eax and sets up the number of arguments in // register edi and the pointer to the first argument in register @@ -159,6 +246,15 @@ class MacroAssembler: public Assembler { void SetCallKind(Register dst, CallKind kind); // Invoke the JavaScript function code by either calling or jumping. + void InvokeCode(Register code, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper, + CallKind call_kind) { + InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind); + } + void InvokeCode(const Operand& code, const ParameterCount& expected, const ParameterCount& actual, @@ -225,6 +321,29 @@ class MacroAssembler: public Assembler { Label* fail, Label::Distance distance = Label::kFar); + // Check if a map for a JSObject indicates that the object can have both smi + // and HeapObject elements. Jump to the specified label if it does not. + void CheckFastObjectElements(Register map, + Label* fail, + Label::Distance distance = Label::kFar); + + // Check if a map for a JSObject indicates that the object has fast smi only + // elements. Jump to the specified label if it does not. + void CheckFastSmiOnlyElements(Register map, + Label* fail, + Label::Distance distance = Label::kFar); + + // Check to see if maybe_number can be stored as a double in + // FastDoubleElements. If it can, store it at the index specified by key in + // the FastDoubleElements array elements, otherwise jump to fail. + void StoreNumberToDoubleElements(Register maybe_number, + Register elements, + Register key, + Register scratch1, + XMMRegister scratch2, + Label* fail, + bool specialize_for_processor); + // Check if the map of an object is equal to a specified map and branch to // label if not. Skip the smi check if not required (object is known to be a // heap object) @@ -277,7 +396,7 @@ class MacroAssembler: public Assembler { void SmiTag(Register reg) { STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize == 1); - add(reg, Operand(reg)); + add(reg, reg); } void SmiUntag(Register reg) { sar(reg, kSmiTagSize); @@ -465,6 +584,13 @@ class MacroAssembler: public Assembler { Register length, Register scratch); + // Initialize fields with filler values. Fields starting at |start_offset| + // not including end_offset are overwritten with the value in |filler|. At + // the end the loop, |start_offset| takes the value of |end_offset|. + void InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler); + // --------------------------------------------------------------------------- // Support functions. @@ -667,6 +793,9 @@ class MacroAssembler: public Assembler { bool generating_stub() { return generating_stub_; } void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } bool allow_stub_calls() { return allow_stub_calls_; } + void set_has_frame(bool value) { has_frame_ = value; } + bool has_frame() { return has_frame_; } + inline bool AllowThisStubCall(CodeStub* stub); // --------------------------------------------------------------------------- // String utilities. @@ -690,9 +819,14 @@ class MacroAssembler: public Assembler { return SafepointRegisterStackIndex(reg.code()); } + // Activation support. + void EnterFrame(StackFrame::Type type); + void LeaveFrame(StackFrame::Type type); + private: bool generating_stub_; bool allow_stub_calls_; + bool has_frame_; // This handle will be patched with the code object on installation. Handle<Object> code_object_; @@ -703,14 +837,10 @@ class MacroAssembler: public Assembler { const Operand& code_operand, Label* done, InvokeFlag flag, - Label::Distance done_near = Label::kFar, + Label::Distance done_distance, const CallWrapper& call_wrapper = NullCallWrapper(), CallKind call_kind = CALL_AS_METHOD); - // Activation support. - void EnterFrame(StackFrame::Type type); - void LeaveFrame(StackFrame::Type type); - void EnterExitFramePrologue(); void EnterExitFrameEpilogue(int argc, bool save_doubles); @@ -729,6 +859,20 @@ class MacroAssembler: public Assembler { Register scratch, bool gc_allowed); + // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. + void InNewSpace(Register object, + Register scratch, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance = Label::kFar); + + // Helper for finding the mark bits for an address. Afterwards, the + // bitmap register points at the word with the mark bits and the mask + // the position of the first bit. Uses ecx as scratch and leaves addr_reg + // unchanged. + inline void GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg); // Compute memory operands for safepoint stack slots. Operand SafepointRegisterSlot(Register reg); diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc index d175d9e03..8b0b9ab91 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2008-2009 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -134,7 +134,7 @@ int RegExpMacroAssemblerIA32::stack_limit_slack() { void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) { if (by != 0) { - __ add(Operand(edi), Immediate(by * char_size())); + __ add(edi, Immediate(by * char_size())); } } @@ -152,8 +152,8 @@ void RegExpMacroAssemblerIA32::Backtrack() { CheckPreemption(); // Pop Code* offset from backtrack stack, add Code* and jump to location. Pop(ebx); - __ add(Operand(ebx), Immediate(masm_->CodeObject())); - __ jmp(Operand(ebx)); + __ add(ebx, Immediate(masm_->CodeObject())); + __ jmp(ebx); } @@ -219,7 +219,7 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str, int byte_offset = cp_offset * char_size(); if (check_end_of_string) { // Check that there are at least str.length() characters left in the input. - __ cmp(Operand(edi), Immediate(-(byte_offset + byte_length))); + __ cmp(edi, Immediate(-(byte_offset + byte_length))); BranchOrBacktrack(greater, on_failure); } @@ -288,7 +288,7 @@ void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) { Label fallthrough; __ cmp(edi, Operand(backtrack_stackpointer(), 0)); __ j(not_equal, &fallthrough); - __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); // Pop. + __ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop. BranchOrBacktrack(no_condition, on_equal); __ bind(&fallthrough); } @@ -300,7 +300,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( Label fallthrough; __ mov(edx, register_location(start_reg)); // Index of start of capture __ mov(ebx, register_location(start_reg + 1)); // Index of end of capture - __ sub(ebx, Operand(edx)); // Length of capture. + __ sub(ebx, edx); // Length of capture. // The length of a capture should not be negative. This can only happen // if the end of the capture is unrecorded, or at a point earlier than @@ -320,9 +320,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( __ push(backtrack_stackpointer()); // After this, the eax, ecx, and edi registers are available. - __ add(edx, Operand(esi)); // Start of capture - __ add(edi, Operand(esi)); // Start of text to match against capture. - __ add(ebx, Operand(edi)); // End of text to match against capture. + __ add(edx, esi); // Start of capture + __ add(edi, esi); // Start of text to match against capture. + __ add(ebx, edi); // End of text to match against capture. Label loop; __ bind(&loop); @@ -339,15 +339,15 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( __ movzx_b(ecx, Operand(edx, 0)); __ or_(ecx, 0x20); - __ cmp(eax, Operand(ecx)); + __ cmp(eax, ecx); __ j(not_equal, &fail); __ bind(&loop_increment); // Increment pointers into match and capture strings. - __ add(Operand(edx), Immediate(1)); - __ add(Operand(edi), Immediate(1)); + __ add(edx, Immediate(1)); + __ add(edi, Immediate(1)); // Compare to end of match, and loop if not done. - __ cmp(edi, Operand(ebx)); + __ cmp(edi, ebx); __ j(below, &loop); __ jmp(&success); @@ -361,9 +361,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( // Restore original value before continuing. __ pop(backtrack_stackpointer()); // Drop original value of character position. - __ add(Operand(esp), Immediate(kPointerSize)); + __ add(esp, Immediate(kPointerSize)); // Compute new value of character position after the matched part. - __ sub(edi, Operand(esi)); + __ sub(edi, esi); } else { ASSERT(mode_ == UC16); // Save registers before calling C function. @@ -389,16 +389,19 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( // Set byte_offset2. // Found by adding negative string-end offset of current position (edi) // to end of string. - __ add(edi, Operand(esi)); + __ add(edi, esi); __ mov(Operand(esp, 1 * kPointerSize), edi); // Set byte_offset1. // Start of capture, where edx already holds string-end negative offset. - __ add(edx, Operand(esi)); + __ add(edx, esi); __ mov(Operand(esp, 0 * kPointerSize), edx); - ExternalReference compare = - ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); - __ CallCFunction(compare, argument_count); + { + AllowExternalCallThatCantCauseGC scope(masm_); + ExternalReference compare = + ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); + __ CallCFunction(compare, argument_count); + } // Pop original values before reacting on result value. __ pop(ebx); __ pop(backtrack_stackpointer()); @@ -406,10 +409,10 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( __ pop(esi); // Check if function returned non-zero for success or zero for failure. - __ or_(eax, Operand(eax)); + __ or_(eax, eax); BranchOrBacktrack(zero, on_no_match); // On success, increment position by length of capture. - __ add(edi, Operand(ebx)); + __ add(edi, ebx); } __ bind(&fallthrough); } @@ -425,7 +428,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( // Find length of back-referenced capture. __ mov(edx, register_location(start_reg)); __ mov(eax, register_location(start_reg + 1)); - __ sub(eax, Operand(edx)); // Length to check. + __ sub(eax, edx); // Length to check. // Fail on partial or illegal capture (start of capture after end of capture). BranchOrBacktrack(less, on_no_match); // Succeed on empty capture (including no capture) @@ -433,7 +436,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( // Check that there are sufficient characters left in the input. __ mov(ebx, edi); - __ add(ebx, Operand(eax)); + __ add(ebx, eax); BranchOrBacktrack(greater, on_no_match); // Save register to make it available below. @@ -441,7 +444,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( // Compute pointers to match string and capture string __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match. - __ add(edx, Operand(esi)); // Start of capture. + __ add(edx, esi); // Start of capture. __ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match Label loop; @@ -456,10 +459,10 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( } __ j(not_equal, &fail); // Increment pointers into capture and match string. - __ add(Operand(edx), Immediate(char_size())); - __ add(Operand(ebx), Immediate(char_size())); + __ add(edx, Immediate(char_size())); + __ add(ebx, Immediate(char_size())); // Check if we have reached end of match area. - __ cmp(ebx, Operand(ecx)); + __ cmp(ebx, ecx); __ j(below, &loop); __ jmp(&success); @@ -471,7 +474,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( __ bind(&success); // Move current character position to position after match. __ mov(edi, ecx); - __ sub(Operand(edi), esi); + __ sub(edi, esi); // Restore backtrack stackpointer. __ pop(backtrack_stackpointer()); @@ -574,17 +577,17 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, return true; case '.': { // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029) - __ mov(Operand(eax), current_character()); - __ xor_(Operand(eax), Immediate(0x01)); + __ mov(eax, current_character()); + __ xor_(eax, Immediate(0x01)); // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c - __ sub(Operand(eax), Immediate(0x0b)); + __ sub(eax, Immediate(0x0b)); __ cmp(eax, 0x0c - 0x0b); BranchOrBacktrack(below_equal, on_no_match); if (mode_ == UC16) { // Compare original value to 0x2028 and 0x2029, using the already // computed (current_char ^ 0x01 - 0x0b). I.e., check for // 0x201d (0x2028 - 0x0b) or 0x201e. - __ sub(Operand(eax), Immediate(0x2028 - 0x0b)); + __ sub(eax, Immediate(0x2028 - 0x0b)); __ cmp(eax, 0x2029 - 0x2028); BranchOrBacktrack(below_equal, on_no_match); } @@ -593,7 +596,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, case 'w': { if (mode_ != ASCII) { // Table is 128 entries, so all ASCII characters can be tested. - __ cmp(Operand(current_character()), Immediate('z')); + __ cmp(current_character(), Immediate('z')); BranchOrBacktrack(above, on_no_match); } ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char. @@ -607,7 +610,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, Label done; if (mode_ != ASCII) { // Table is 128 entries, so all ASCII characters can be tested. - __ cmp(Operand(current_character()), Immediate('z')); + __ cmp(current_character(), Immediate('z')); __ j(above, &done); } ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char. @@ -627,10 +630,10 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, case 'n': { // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029). // The opposite of '.'. - __ mov(Operand(eax), current_character()); - __ xor_(Operand(eax), Immediate(0x01)); + __ mov(eax, current_character()); + __ xor_(eax, Immediate(0x01)); // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c - __ sub(Operand(eax), Immediate(0x0b)); + __ sub(eax, Immediate(0x0b)); __ cmp(eax, 0x0c - 0x0b); if (mode_ == ASCII) { BranchOrBacktrack(above, on_no_match); @@ -641,7 +644,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, // Compare original value to 0x2028 and 0x2029, using the already // computed (current_char ^ 0x01 - 0x0b). I.e., check for // 0x201d (0x2028 - 0x0b) or 0x201e. - __ sub(Operand(eax), Immediate(0x2028 - 0x0b)); + __ sub(eax, Immediate(0x2028 - 0x0b)); __ cmp(eax, 1); BranchOrBacktrack(above, on_no_match); __ bind(&done); @@ -668,7 +671,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { // Entry code: __ bind(&entry_label_); - // Start new stack frame. + + // Tell the system that we have a stack frame. Because the type is MANUAL, no + // code is generated. + FrameScope scope(masm_, StackFrame::MANUAL); + + // Actually emit code to start a new stack frame. __ push(ebp); __ mov(ebp, esp); // Save callee-save registers. Order here should correspond to order of @@ -699,7 +707,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { __ bind(&stack_limit_hit); CallCheckStackGuardState(ebx); - __ or_(eax, Operand(eax)); + __ or_(eax, eax); // If returned value is non-zero, we exit with the returned value as result. __ j(not_zero, &exit_label_); @@ -708,13 +716,13 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { __ mov(ebx, Operand(ebp, kStartIndex)); // Allocate space on stack for registers. - __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize)); + __ sub(esp, Immediate(num_registers_ * kPointerSize)); // Load string length. __ mov(esi, Operand(ebp, kInputEnd)); // Load input position. __ mov(edi, Operand(ebp, kInputStart)); // Set up edi to be negative offset from string end. - __ sub(edi, Operand(esi)); + __ sub(edi, esi); // Set eax to address of char before start of the string. // (effectively string position -1). @@ -736,7 +744,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { Label init_loop; __ bind(&init_loop); __ mov(Operand(ebp, ecx, times_1, +0), eax); - __ sub(Operand(ecx), Immediate(kPointerSize)); + __ sub(ecx, Immediate(kPointerSize)); __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize); __ j(greater, &init_loop); } @@ -777,12 +785,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { if (mode_ == UC16) { __ lea(ecx, Operand(ecx, edx, times_2, 0)); } else { - __ add(ecx, Operand(edx)); + __ add(ecx, edx); } for (int i = 0; i < num_saved_registers_; i++) { __ mov(eax, register_location(i)); // Convert to index from start of string, not end. - __ add(eax, Operand(ecx)); + __ add(eax, ecx); if (mode_ == UC16) { __ sar(eax, 1); // Convert byte index to character index. } @@ -819,7 +827,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { __ push(edi); CallCheckStackGuardState(ebx); - __ or_(eax, Operand(eax)); + __ or_(eax, eax); // If returning non-zero, we should end execution with the given // result as return value. __ j(not_zero, &exit_label_); @@ -854,7 +862,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { __ CallCFunction(grow_stack, num_arguments); // If return NULL, we have failed to grow the stack, and // must exit with a stack-overflow exception. - __ or_(eax, Operand(eax)); + __ or_(eax, eax); __ j(equal, &exit_with_exception); // Otherwise use return value as new stack pointer. __ mov(backtrack_stackpointer(), eax); @@ -1183,8 +1191,8 @@ void RegExpMacroAssemblerIA32::SafeCall(Label* to) { void RegExpMacroAssemblerIA32::SafeReturn() { __ pop(ebx); - __ add(Operand(ebx), Immediate(masm_->CodeObject())); - __ jmp(Operand(ebx)); + __ add(ebx, Immediate(masm_->CodeObject())); + __ jmp(ebx); } @@ -1196,14 +1204,14 @@ void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) { void RegExpMacroAssemblerIA32::Push(Register source) { ASSERT(!source.is(backtrack_stackpointer())); // Notice: This updates flags, unlike normal Push. - __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); + __ sub(backtrack_stackpointer(), Immediate(kPointerSize)); __ mov(Operand(backtrack_stackpointer(), 0), source); } void RegExpMacroAssemblerIA32::Push(Immediate value) { // Notice: This updates flags, unlike normal Push. - __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); + __ sub(backtrack_stackpointer(), Immediate(kPointerSize)); __ mov(Operand(backtrack_stackpointer(), 0), value); } @@ -1212,7 +1220,7 @@ void RegExpMacroAssemblerIA32::Pop(Register target) { ASSERT(!target.is(backtrack_stackpointer())); __ mov(target, Operand(backtrack_stackpointer(), 0)); // Notice: This updates flags, unlike normal Pop. - __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); + __ add(backtrack_stackpointer(), Immediate(kPointerSize)); } diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index ab62764e6..07cb14d02 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -66,8 +66,8 @@ static void ProbeTable(Isolate* isolate, __ j(not_equal, &miss); // Jump to the first instruction in the code stub. - __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(Operand(extra)); + __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(extra); __ bind(&miss); } else { @@ -92,8 +92,8 @@ static void ProbeTable(Isolate* isolate, __ mov(offset, Operand::StaticArray(offset, times_2, value_offset)); // Jump to the first instruction in the code stub. - __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(Operand(offset)); + __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(offset); // Pop at miss. __ bind(&miss); @@ -204,8 +204,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); __ xor_(scratch, flags); __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize); - __ sub(scratch, Operand(name)); - __ add(Operand(scratch), Immediate(flags)); + __ sub(scratch, name); + __ add(scratch, Immediate(flags)); __ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize); // Probe the secondary table. @@ -318,7 +318,7 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, Register scratch2, Label* miss_label) { __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); - __ mov(eax, Operand(scratch1)); + __ mov(eax, scratch1); __ ret(0); } @@ -406,7 +406,7 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) { // frame. // ----------------------------------- __ pop(scratch); - __ add(Operand(esp), Immediate(kPointerSize * kFastApiCallArguments)); + __ add(esp, Immediate(kPointerSize * kFastApiCallArguments)); __ push(scratch); } @@ -462,7 +462,7 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm, __ PrepareCallApiFunction(kApiArgc + kApiStackSpace); __ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_. - __ add(Operand(eax), Immediate(argc * kPointerSize)); + __ add(eax, Immediate(argc * kPointerSize)); __ mov(ApiParameterOperand(2), eax); // v8::Arguments::values_. __ Set(ApiParameterOperand(3), Immediate(argc)); // v8::Arguments::length_. // v8::Arguments::is_construct_call_. @@ -651,7 +651,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { scratch1, scratch2, scratch3, name, miss_label); - __ EnterInternalFrame(); + FrameScope scope(masm, StackFrame::INTERNAL); // Save the name_ register across the call. __ push(name_); @@ -668,7 +668,8 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Restore the name_ register. __ pop(name_); - __ LeaveInternalFrame(); + + // Leave the internal frame. } void LoadWithInterceptor(MacroAssembler* masm, @@ -676,19 +677,21 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register holder, JSObject* holder_obj, Label* interceptor_succeeded) { - __ EnterInternalFrame(); - __ push(holder); // Save the holder. - __ push(name_); // Save the name. - - CompileCallLoadPropertyWithInterceptor(masm, - receiver, - holder, - name_, - holder_obj); - - __ pop(name_); // Restore the name. - __ pop(receiver); // Restore the holder. - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(holder); // Save the holder. + __ push(name_); // Save the name. + + CompileCallLoadPropertyWithInterceptor(masm, + receiver, + holder, + name_, + holder_obj); + + __ pop(name_); // Restore the name. + __ pop(receiver); // Restore the holder. + // Leave the internal frame. + } __ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel()); __ j(not_equal, interceptor_succeeded); @@ -786,8 +789,12 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. - __ mov(name_reg, Operand(eax)); - __ RecordWrite(receiver_reg, offset, name_reg, scratch); + __ mov(name_reg, eax); + __ RecordWriteField(receiver_reg, + offset, + name_reg, + scratch, + kDontSaveFPRegs); } else { // Write to the properties array. int offset = index * kPointerSize + FixedArray::kHeaderSize; @@ -797,8 +804,12 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. - __ mov(name_reg, Operand(eax)); - __ RecordWrite(scratch, offset, name_reg, receiver_reg); + __ mov(name_reg, eax); + __ RecordWriteField(scratch, + offset, + name_reg, + receiver_reg, + kDontSaveFPRegs); } // Return the value (register eax). @@ -932,7 +943,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object, } else if (heap()->InNewSpace(prototype)) { // Get the map of the current object. __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); - __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map()))); + __ cmp(scratch1, Immediate(Handle<Map>(current->map()))); // Branch on the result of the map check. __ j(not_equal, miss); // Check access rights to the global object. This has to happen @@ -1053,7 +1064,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, __ pop(scratch3); // Get return address to place it below. __ push(receiver); // receiver - __ mov(scratch2, Operand(esp)); + __ mov(scratch2, esp); ASSERT(!scratch2.is(reg)); __ push(reg); // holder // Push data from AccessorInfo. @@ -1084,7 +1095,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, __ PrepareCallApiFunction(kApiArgc); __ mov(ApiParameterOperand(0), ebx); // name. - __ add(Operand(ebx), Immediate(kPointerSize)); + __ add(ebx, Immediate(kPointerSize)); __ mov(ApiParameterOperand(1), ebx); // arguments pointer. // Emitting a stub call may try to allocate (if the code is not @@ -1158,40 +1169,42 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Save necessary data before invoking an interceptor. // Requires a frame to make GC aware of pushed pointers. - __ EnterInternalFrame(); + { + FrameScope frame_scope(masm(), StackFrame::INTERNAL); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - // CALLBACKS case needs a receiver to be passed into C++ callback. - __ push(receiver); - } - __ push(holder_reg); - __ push(name_reg); - - // Invoke an interceptor. Note: map checks from receiver to - // interceptor's holder has been compiled before (see a caller - // of this method.) - CompileCallLoadPropertyWithInterceptor(masm(), - receiver, - holder_reg, - name_reg, - interceptor_holder); - - // Check if interceptor provided a value for property. If it's - // the case, return immediately. - Label interceptor_failed; - __ cmp(eax, factory()->no_interceptor_result_sentinel()); - __ j(equal, &interceptor_failed); - __ LeaveInternalFrame(); - __ ret(0); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + // CALLBACKS case needs a receiver to be passed into C++ callback. + __ push(receiver); + } + __ push(holder_reg); + __ push(name_reg); - __ bind(&interceptor_failed); - __ pop(name_reg); - __ pop(holder_reg); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - __ pop(receiver); - } + // Invoke an interceptor. Note: map checks from receiver to + // interceptor's holder has been compiled before (see a caller + // of this method.) + CompileCallLoadPropertyWithInterceptor(masm(), + receiver, + holder_reg, + name_reg, + interceptor_holder); + + // Check if interceptor provided a value for property. If it's + // the case, return immediately. + Label interceptor_failed; + __ cmp(eax, factory()->no_interceptor_result_sentinel()); + __ j(equal, &interceptor_failed); + frame_scope.GenerateLeaveFrame(); + __ ret(0); - __ LeaveInternalFrame(); + __ bind(&interceptor_failed); + __ pop(name_reg); + __ pop(holder_reg); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + __ pop(receiver); + } + + // Leave the internal frame. + } // Check that the maps from interceptor's holder to lookup's holder // haven't changed. And load lookup's holder into holder_reg. @@ -1259,7 +1272,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) { if (kind_ == Code::KEYED_CALL_IC) { - __ cmp(Operand(ecx), Immediate(Handle<String>(name))); + __ cmp(ecx, Immediate(Handle<String>(name))); __ j(not_equal, miss); } } @@ -1316,7 +1329,7 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell, Immediate(Handle<SharedFunctionInfo>(function->shared()))); __ j(not_equal, miss); } else { - __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function))); + __ cmp(edi, Immediate(Handle<JSFunction>(function))); __ j(not_equal, miss); } } @@ -1441,21 +1454,25 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ j(not_equal, &call_builtin); if (argc == 1) { // Otherwise fall through to call builtin. - Label exit, with_write_barrier, attempt_to_grow_elements; + Label attempt_to_grow_elements, with_write_barrier; // Get the array's length into eax and calculate new length. __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - __ add(Operand(eax), Immediate(Smi::FromInt(argc))); + __ add(eax, Immediate(Smi::FromInt(argc))); // Get the element's length into ecx. __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); // Check if we could survive without allocation. - __ cmp(eax, Operand(ecx)); + __ cmp(eax, ecx); __ j(greater, &attempt_to_grow_elements); + // Check if value is a smi. + __ mov(ecx, Operand(esp, argc * kPointerSize)); + __ JumpIfNotSmi(ecx, &with_write_barrier); + // Save new length. __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax); @@ -1463,20 +1480,27 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ lea(edx, FieldOperand(ebx, eax, times_half_pointer_size, FixedArray::kHeaderSize - argc * kPointerSize)); - __ mov(ecx, Operand(esp, argc * kPointerSize)); __ mov(Operand(edx, 0), ecx); - // Check if value is a smi. - __ JumpIfNotSmi(ecx, &with_write_barrier); - - __ bind(&exit); __ ret((argc + 1) * kPointerSize); __ bind(&with_write_barrier); - __ InNewSpace(ebx, ecx, equal, &exit); + __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); + __ CheckFastObjectElements(edi, &call_builtin); + + // Save new length. + __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax); + + // Push the element. + __ lea(edx, FieldOperand(ebx, + eax, times_half_pointer_size, + FixedArray::kHeaderSize - argc * kPointerSize)); + __ mov(Operand(edx, 0), ecx); + + __ RecordWrite( + ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); - __ RecordWriteHelper(ebx, edx, ecx); __ ret((argc + 1) * kPointerSize); __ bind(&attempt_to_grow_elements); @@ -1484,6 +1508,19 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ jmp(&call_builtin); } + __ mov(edi, Operand(esp, argc * kPointerSize)); + // Growing elements that are SMI-only requires special handling in case + // the new element is non-Smi. For now, delegate to the builtin. + Label no_fast_elements_check; + __ JumpIfSmi(edi, &no_fast_elements_check); + __ mov(esi, FieldOperand(edx, HeapObject::kMapOffset)); + __ CheckFastObjectElements(esi, &call_builtin, Label::kFar); + __ bind(&no_fast_elements_check); + + // We could be lucky and the elements array could be at the top of + // new-space. In this case we can just grow it in place by moving the + // allocation pointer up. + ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate()); ExternalReference new_space_allocation_limit = @@ -1497,33 +1534,43 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ lea(edx, FieldOperand(ebx, eax, times_half_pointer_size, FixedArray::kHeaderSize - argc * kPointerSize)); - __ cmp(edx, Operand(ecx)); + __ cmp(edx, ecx); __ j(not_equal, &call_builtin); - __ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize)); + __ add(ecx, Immediate(kAllocationDelta * kPointerSize)); __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit)); __ j(above, &call_builtin); // We fit and could grow elements. __ mov(Operand::StaticVariable(new_space_allocation_top), ecx); - __ mov(ecx, Operand(esp, argc * kPointerSize)); // Push the argument... - __ mov(Operand(edx, 0), ecx); + __ mov(Operand(edx, 0), edi); // ... and fill the rest with holes. for (int i = 1; i < kAllocationDelta; i++) { __ mov(Operand(edx, i * kPointerSize), Immediate(factory()->the_hole_value())); } + // We know the elements array is in new space so we don't need the + // remembered set, but we just pushed a value onto it so we may have to + // tell the incremental marker to rescan the object that we just grew. We + // don't need to worry about the holes because they are in old space and + // already marked black. + __ RecordWrite(ebx, edx, edi, kDontSaveFPRegs, OMIT_REMEMBERED_SET); + // Restore receiver to edx as finish sequence assumes it's here. __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // Increment element's and array's sizes. __ add(FieldOperand(ebx, FixedArray::kLengthOffset), Immediate(Smi::FromInt(kAllocationDelta))); + + // NOTE: This only happen in new-space, where we don't + // care about the black-byte-count on pages. Otherwise we should + // update that too if the object is black. + __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax); - // Elements are in new space, so write barrier is not required. __ ret((argc + 1) * kPointerSize); } @@ -1585,7 +1632,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object, // Get the array's length into ecx and calculate new length. __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset)); - __ sub(Operand(ecx), Immediate(Smi::FromInt(1))); + __ sub(ecx, Immediate(Smi::FromInt(1))); __ j(negative, &return_undefined); // Get the last element. @@ -1594,7 +1641,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object, __ mov(eax, FieldOperand(ebx, ecx, times_half_pointer_size, FixedArray::kHeaderSize)); - __ cmp(Operand(eax), Immediate(factory()->the_hole_value())); + __ cmp(eax, Immediate(factory()->the_hole_value())); __ j(equal, &call_builtin); // Set the array's length. @@ -2058,10 +2105,10 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object, __ sar(ebx, kBitsPerInt - 1); // Do bitwise not or do nothing depending on ebx. - __ xor_(eax, Operand(ebx)); + __ xor_(eax, ebx); // Add 1 or do nothing depending on ebx. - __ sub(eax, Operand(ebx)); + __ sub(eax, ebx); // If the result is still negative, go to the slow case. // This only happens for the most negative smi. @@ -2144,7 +2191,7 @@ MaybeObject* CallStubCompiler::CompileFastApiCall( // Allocate space for v8::Arguments implicit values. Must be initialized // before calling any runtime function. - __ sub(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize)); + __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize)); // Check that the maps haven't changed and find a Holder as a side effect. CheckPrototypes(JSObject::cast(object), edx, holder, @@ -2160,7 +2207,7 @@ MaybeObject* CallStubCompiler::CompileFastApiCall( if (result->IsFailure()) return result; __ bind(&miss); - __ add(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize)); + __ add(esp, Immediate(kFastApiCallArguments * kPointerSize)); __ bind(&miss_before_stack_reserved); MaybeObject* maybe_result = GenerateMissBranch(); @@ -2599,13 +2646,9 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, Immediate(Handle<Map>(object->map()))); __ j(not_equal, &miss); - // Compute the cell operand to use. - Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell)); - if (Serializer::enabled()) { - __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell))); - cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset); - } + __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell))); + Operand cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset); // Check that the value in the cell is not the hole. If it is, this // cell could have been deleted and reintroducing the global needs @@ -2616,8 +2659,23 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // Store the value in the cell. __ mov(cell_operand, eax); + Label done; + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &done); + + __ mov(ecx, eax); + __ lea(edx, cell_operand); + // Cells are always in the remembered set. + __ RecordWrite(ebx, // Object. + edx, // Address. + ecx, // Value. + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); // Return the value (register eax). + __ bind(&done); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_store_global_inline(), 1); __ ret(0); @@ -2649,7 +2707,7 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, __ IncrementCounter(counters->keyed_store_field(), 1); // Check that the name has not changed. - __ cmp(Operand(ecx), Immediate(Handle<String>(name))); + __ cmp(ecx, Immediate(Handle<String>(name))); __ j(not_equal, &miss); // Generate store field code. Trashes the name register. @@ -2697,9 +2755,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) { } -MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic( +MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic( MapList* receiver_maps, - CodeList* handler_ics) { + CodeList* handler_stubs, + MapList* transitioned_maps) { // ----------- S t a t e ------------- // -- eax : value // -- ecx : key @@ -2707,15 +2766,21 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic( // -- esp[0] : return address // ----------------------------------- Label miss; - __ JumpIfSmi(edx, &miss); - - Register map_reg = ebx; - __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset)); - int receiver_count = receiver_maps->length(); - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map(receiver_maps->at(current)); - __ cmp(map_reg, map); - __ j(equal, Handle<Code>(handler_ics->at(current))); + __ JumpIfSmi(edx, &miss, Label::kNear); + __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); + // ebx: receiver->map(). + for (int i = 0; i < receiver_maps->length(); ++i) { + Handle<Map> map(receiver_maps->at(i)); + __ cmp(edi, map); + if (transitioned_maps->at(i) == NULL) { + __ j(equal, Handle<Code>(handler_stubs->at(i))); + } else { + Label next_map; + __ j(not_equal, &next_map, Label::kNear); + __ mov(ebx, Immediate(Handle<Map>(transitioned_maps->at(i)))); + __ jmp(Handle<Code>(handler_stubs->at(i)), RelocInfo::CODE_TARGET); + __ bind(&next_map); + } } __ bind(&miss); Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss(); @@ -2941,7 +3006,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name, __ IncrementCounter(counters->keyed_load_field(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss); @@ -2971,7 +3036,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback( __ IncrementCounter(counters->keyed_load_callback(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx, @@ -3006,7 +3071,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name, __ IncrementCounter(counters->keyed_load_constant_function(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi, @@ -3034,7 +3099,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, __ IncrementCounter(counters->keyed_load_interceptor(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); LookupResult lookup; @@ -3070,7 +3135,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) { __ IncrementCounter(counters->keyed_load_array_length(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); GenerateLoadArrayLength(masm(), edx, ecx, &miss); @@ -3095,7 +3160,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { __ IncrementCounter(counters->keyed_load_string_length(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true); @@ -3120,7 +3185,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { __ IncrementCounter(counters->keyed_load_function_prototype(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle<String>(name))); + __ cmp(eax, Immediate(Handle<String>(name))); __ j(not_equal, &miss); GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss); @@ -3155,7 +3220,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) { } -MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic( +MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic( MapList* receiver_maps, CodeList* handler_ics) { // ----------- S t a t e ------------- @@ -3298,7 +3363,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { // Move argc to ebx and retrieve and tag the JSObject to return. __ mov(ebx, eax); __ pop(eax); - __ or_(Operand(eax), Immediate(kHeapObjectTag)); + __ or_(eax, Immediate(kHeapObjectTag)); // Remove caller arguments and receiver from the stack and return. __ pop(ecx); @@ -3679,10 +3744,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // If the value is NaN or +/-infinity, the result is 0x80000000, // which is automatically zero when taken mod 2^n, n < 32. __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ sub(Operand(esp), Immediate(2 * kPointerSize)); + __ sub(esp, Immediate(2 * kPointerSize)); __ fisttp_d(Operand(esp, 0)); __ pop(ebx); - __ add(Operand(esp), Immediate(kPointerSize)); + __ add(esp, Immediate(kPointerSize)); } else { ASSERT(CpuFeatures::IsSupported(SSE2)); CpuFeatures::Scope scope(SSE2); @@ -3838,15 +3903,17 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( } -void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, - bool is_js_array) { +void KeyedStoreStubCompiler::GenerateStoreFastElement( + MacroAssembler* masm, + bool is_js_array, + ElementsKind elements_kind) { // ----------- S t a t e ------------- // -- eax : value // -- ecx : key // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - Label miss_force_generic; + Label miss_force_generic, transition_elements_kind; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -3870,11 +3937,28 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, __ j(above_equal, &miss_force_generic); } - // Do the store and update the write barrier. Make sure to preserve - // the value in register eax. - __ mov(edx, Operand(eax)); - __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax); - __ RecordWrite(edi, 0, edx, ecx); + if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + __ JumpIfNotSmi(eax, &transition_elements_kind); + // ecx is a smi, use times_half_pointer_size instead of + // times_pointer_size + __ mov(FieldOperand(edi, + ecx, + times_half_pointer_size, + FixedArray::kHeaderSize), eax); + } else { + ASSERT(elements_kind == FAST_ELEMENTS); + // Do the store and update the write barrier. + // ecx is a smi, use times_half_pointer_size instead of + // times_pointer_size + __ lea(ecx, FieldOperand(edi, + ecx, + times_half_pointer_size, + FixedArray::kHeaderSize)); + __ mov(Operand(ecx, 0), eax); + // Make sure to preserve the value in register eax. + __ mov(edx, eax); + __ RecordWrite(edi, ecx, edx, kDontSaveFPRegs); + } // Done. __ ret(0); @@ -3884,6 +3968,11 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, Handle<Code> ic_force_generic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ jmp(ic_force_generic, RelocInfo::CODE_TARGET); + + // Handle transition to other elements kinds without using the generic stub. + __ bind(&transition_elements_kind); + Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); + __ jmp(ic_miss, RelocInfo::CODE_TARGET); } @@ -3896,8 +3985,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - Label miss_force_generic, smi_value, is_nan, maybe_nan; - Label have_double_value, not_nan; + Label miss_force_generic, transition_elements_kind; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -3918,59 +4006,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( } __ j(above_equal, &miss_force_generic); - __ JumpIfSmi(eax, &smi_value, Label::kNear); - - __ CheckMap(eax, - masm->isolate()->factory()->heap_number_map(), - &miss_force_generic, - DONT_DO_SMI_CHECK); - - // Double value, canonicalize NaN. - uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); - __ cmp(FieldOperand(eax, offset), Immediate(kNaNOrInfinityLowerBoundUpper32)); - __ j(greater_equal, &maybe_nan, Label::kNear); - - __ bind(¬_nan); - ExternalReference canonical_nan_reference = - ExternalReference::address_of_canonical_non_hole_nan(); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset)); - __ bind(&have_double_value); - __ movdbl(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize), - xmm0); - __ ret(0); - } else { - __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ bind(&have_double_value); - __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize)); - __ ret(0); - } - - __ bind(&maybe_nan); - // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise - // it's an Infinity, and the non-NaN code path applies. - __ j(greater, &is_nan, Label::kNear); - __ cmp(FieldOperand(eax, HeapNumber::kValueOffset), Immediate(0)); - __ j(zero, ¬_nan); - __ bind(&is_nan); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - __ movdbl(xmm0, Operand::StaticVariable(canonical_nan_reference)); - } else { - __ fld_d(Operand::StaticVariable(canonical_nan_reference)); - } - __ jmp(&have_double_value, Label::kNear); - - __ bind(&smi_value); - // Value is a smi. convert to a double and store. - // Preserve original value. - __ mov(edx, eax); - __ SmiUntag(edx); - __ push(edx); - __ fild_s(Operand(esp, 0)); - __ pop(edx); - __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize)); + __ StoreNumberToDoubleElements(eax, + edi, + ecx, + edx, + xmm0, + &transition_elements_kind, + true); __ ret(0); // Handle store cache miss, replacing the ic with the generic stub. @@ -3978,6 +4020,11 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Handle<Code> ic_force_generic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ jmp(ic_force_generic, RelocInfo::CODE_TARGET); + + // Handle transition to other elements kinds without using the generic stub. + __ bind(&transition_elements_kind); + Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); + __ jmp(ic_miss, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h index b4f789cb4..498cf3af3 100644 --- a/deps/v8/src/ic-inl.h +++ b/deps/v8/src/ic-inl.h @@ -87,6 +87,8 @@ void IC::SetTargetAtAddress(Address address, Code* target) { } #endif Assembler::set_target_address_at(address, target->instruction_start()); + target->GetHeap()->incremental_marking()->RecordCodeTargetPatch(address, + target); } diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index 0f76a9a06..d5056a9ce 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -167,7 +167,7 @@ static bool HasNormalObjectsInPrototypeChain(Isolate* isolate, LookupResult* lookup, Object* receiver) { Object* end = lookup->IsProperty() - ? lookup->holder() : isolate->heap()->null_value(); + ? lookup->holder() : Object::cast(isolate->heap()->null_value()); for (Object* current = receiver; current != end; current = current->GetPrototype()) { @@ -1084,14 +1084,22 @@ MaybeObject* KeyedLoadIC::GetElementStubWithoutMapCheck( } -MaybeObject* KeyedLoadIC::ConstructMegamorphicStub( +MaybeObject* KeyedLoadIC::ComputePolymorphicStub( MapList* receiver_maps, - CodeList* targets, StrictModeFlag strict_mode) { + CodeList handler_ics(receiver_maps->length()); + for (int i = 0; i < receiver_maps->length(); ++i) { + Map* receiver_map(receiver_maps->at(i)); + MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck( + receiver_map, strict_mode); + Code* cached_stub; + if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub; + handler_ics.Add(cached_stub); + } Object* object; KeyedLoadStubCompiler compiler; - MaybeObject* maybe_code = compiler.CompileLoadMegamorphic(receiver_maps, - targets); + MaybeObject* maybe_code = compiler.CompileLoadPolymorphic(receiver_maps, + &handler_ics); if (!maybe_code->ToObject(&object)) return maybe_code; isolate()->counters()->keyed_load_polymorphic_stubs()->Increment(); PROFILE(isolate(), CodeCreateEvent( @@ -1243,7 +1251,7 @@ MaybeObject* KeyedLoadIC::Load(State state, stub = indexed_interceptor_stub(); } else if (key->IsSmi() && (target() != non_strict_arguments_stub())) { MaybeObject* maybe_stub = ComputeStub(receiver, - false, + LOAD, kNonStrictMode, stub); stub = maybe_stub->IsFailure() ? @@ -1351,7 +1359,7 @@ static bool StoreICableLookup(LookupResult* lookup) { } -static bool LookupForWrite(JSReceiver* receiver, +static bool LookupForWrite(JSObject* receiver, String* name, LookupResult* lookup) { receiver->LocalLookup(name, lookup); @@ -1359,12 +1367,10 @@ static bool LookupForWrite(JSReceiver* receiver, return false; } - if (lookup->type() == INTERCEPTOR) { - JSObject* object = JSObject::cast(receiver); - if (object->GetNamedInterceptor()->setter()->IsUndefined()) { - object->LocalLookupRealNamedProperty(name, lookup); - return StoreICableLookup(lookup); - } + if (lookup->type() == INTERCEPTOR && + receiver->GetNamedInterceptor()->setter()->IsUndefined()) { + receiver->LocalLookupRealNamedProperty(name, lookup); + return StoreICableLookup(lookup); } return true; @@ -1376,28 +1382,28 @@ MaybeObject* StoreIC::Store(State state, Handle<Object> object, Handle<String> name, Handle<Object> value) { - // If the object is undefined or null it's illegal to try to set any - // properties on it; throw a TypeError in that case. - if (object->IsUndefined() || object->IsNull()) { - return TypeError("non_object_property_store", object, name); - } + if (!object->IsJSObject()) { + // Handle proxies. + if (object->IsJSProxy()) { + return JSProxy::cast(*object)-> + SetProperty(*name, *value, NONE, strict_mode); + } + + // If the object is undefined or null it's illegal to try to set any + // properties on it; throw a TypeError in that case. + if (object->IsUndefined() || object->IsNull()) { + return TypeError("non_object_property_store", object, name); + } - if (!object->IsJSReceiver()) { // The length property of string values is read-only. Throw in strict mode. if (strict_mode == kStrictMode && object->IsString() && name->Equals(isolate()->heap()->length_symbol())) { return TypeError("strict_read_only_property", object, name); } - // Ignore stores where the receiver is not a JSObject. + // Ignore other stores where the receiver is not a JSObject. return *value; } - // Handle proxies. - if (object->IsJSProxy()) { - return JSReceiver::cast(*object)-> - SetProperty(*name, *value, NONE, strict_mode); - } - Handle<JSObject> receiver = Handle<JSObject>::cast(object); // Check if the given name is an array index. @@ -1595,14 +1601,15 @@ void KeyedIC::GetReceiverMapsForStub(Code* stub, MapList* result) { MaybeObject* KeyedIC::ComputeStub(JSObject* receiver, - bool is_store, + StubKind stub_kind, StrictModeFlag strict_mode, Code* generic_stub) { State ic_state = target()->ic_state(); - if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) { + if ((ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) && + !IsTransitionStubKind(stub_kind)) { Code* monomorphic_stub; MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver, - is_store, + stub_kind, strict_mode, generic_stub); if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub; @@ -1621,9 +1628,21 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver, // Determine the list of receiver maps that this call site has seen, // adding the map that was just encountered. MapList target_receiver_maps; - GetReceiverMapsForStub(target(), &target_receiver_maps); - if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver->map())) { - // If the miss wasn't due to an unseen map, a MEGAMORPHIC stub + if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) { + target_receiver_maps.Add(receiver->map()); + } else { + GetReceiverMapsForStub(target(), &target_receiver_maps); + } + bool map_added = + AddOneReceiverMapIfMissing(&target_receiver_maps, receiver->map()); + if (IsTransitionStubKind(stub_kind)) { + MaybeObject* maybe_map = ComputeTransitionedMap(receiver, stub_kind); + Map* new_map = NULL; + if (!maybe_map->To(&new_map)) return maybe_map; + map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps, new_map); + } + if (!map_added) { + // If the miss wasn't due to an unseen map, a polymorphic stub // won't help, use the generic stub. return generic_stub; } @@ -1644,21 +1663,9 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver, ASSERT(maybe_cached_stub->IsCode()); return Code::cast(maybe_cached_stub); } - // Collect MONOMORPHIC stubs for all target_receiver_maps. - CodeList handler_ics(target_receiver_maps.length()); - for (int i = 0; i < target_receiver_maps.length(); ++i) { - Map* receiver_map(target_receiver_maps.at(i)); - MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck( - receiver_map, strict_mode); - Code* cached_stub; - if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub; - handler_ics.Add(cached_stub); - } - // Build the MEGAMORPHIC stub. + MaybeObject* maybe_stub = + ComputePolymorphicStub(&target_receiver_maps, strict_mode); Code* stub; - MaybeObject* maybe_stub = ConstructMegamorphicStub(&target_receiver_maps, - &handler_ics, - strict_mode); if (!maybe_stub->To(&stub)) return maybe_stub; MaybeObject* maybe_update = cache->Update(&target_receiver_maps, flags, stub); if (maybe_update->IsFailure()) return maybe_update; @@ -1675,6 +1682,7 @@ MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck( } else { ASSERT(receiver_map->has_dictionary_elements() || receiver_map->has_fast_elements() || + receiver_map->has_fast_smi_only_elements() || receiver_map->has_fast_double_elements() || receiver_map->has_external_array_elements()); bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; @@ -1685,17 +1693,18 @@ MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck( MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver, - bool is_store, + StubKind stub_kind, StrictModeFlag strict_mode, Code* generic_stub) { Code* result = NULL; if (receiver->HasFastElements() || + receiver->HasFastSmiOnlyElements() || receiver->HasExternalArrayElements() || receiver->HasFastDoubleElements() || receiver->HasDictionaryElements()) { MaybeObject* maybe_stub = isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement( - receiver, is_store, strict_mode); + receiver, stub_kind, strict_mode); if (!maybe_stub->To(&result)) return maybe_stub; } else { result = generic_stub; @@ -1704,6 +1713,21 @@ MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver, } +MaybeObject* KeyedIC::ComputeTransitionedMap(JSObject* receiver, + StubKind stub_kind) { + switch (stub_kind) { + case KeyedIC::STORE_TRANSITION_SMI_TO_OBJECT: + case KeyedIC::STORE_TRANSITION_DOUBLE_TO_OBJECT: + return receiver->GetElementsTransitionMap(FAST_ELEMENTS); + case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE: + return receiver->GetElementsTransitionMap(FAST_DOUBLE_ELEMENTS); + default: + UNREACHABLE(); + return NULL; + } +} + + MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck( bool is_js_array, ElementsKind elements_kind) { @@ -1711,14 +1735,88 @@ MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck( } -MaybeObject* KeyedStoreIC::ConstructMegamorphicStub( +// If |map| is contained in |maps_list|, returns |map|; otherwise returns NULL. +Map* GetMapIfPresent(Map* map, MapList* maps_list) { + for (int i = 0; i < maps_list->length(); ++i) { + if (maps_list->at(i) == map) return map; + } + return NULL; +} + + +// Returns the most generic transitioned map for |map| that's found in +// |maps_list|, or NULL if no transitioned map for |map| is found at all. +Map* GetTransitionedMap(Map* map, MapList* maps_list) { + ElementsKind elements_kind = map->elements_kind(); + if (elements_kind == FAST_ELEMENTS) { + return NULL; + } + if (elements_kind == FAST_DOUBLE_ELEMENTS) { + bool dummy = true; + Map* fast_map = map->LookupElementsTransitionMap(FAST_ELEMENTS, &dummy); + if (fast_map == NULL) return NULL; + return GetMapIfPresent(fast_map, maps_list); + } + if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + bool dummy = true; + Map* double_map = map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, + &dummy); + // In the current implementation, if the DOUBLE map doesn't exist, the + // FAST map can't exist either. + if (double_map == NULL) return NULL; + Map* fast_map = map->LookupElementsTransitionMap(FAST_ELEMENTS, &dummy); + if (fast_map == NULL) { + return GetMapIfPresent(double_map, maps_list); + } + // Both double_map and fast_map are non-NULL. Return fast_map if it's in + // maps_list, double_map otherwise. + Map* fast_map_present = GetMapIfPresent(fast_map, maps_list); + if (fast_map_present != NULL) return fast_map_present; + return GetMapIfPresent(double_map, maps_list); + } + return NULL; +} + + +MaybeObject* KeyedStoreIC::ComputePolymorphicStub( MapList* receiver_maps, - CodeList* targets, StrictModeFlag strict_mode) { + // TODO(yangguo): <remove> + Code* generic_stub = (strict_mode == kStrictMode) + ? isolate()->builtins()->builtin(Builtins::kKeyedStoreIC_Generic_Strict) + : isolate()->builtins()->builtin(Builtins::kKeyedStoreIC_Generic); + // </remove> + + // Collect MONOMORPHIC stubs for all target_receiver_maps. + CodeList handler_ics(receiver_maps->length()); + MapList transitioned_maps(receiver_maps->length()); + for (int i = 0; i < receiver_maps->length(); ++i) { + Map* receiver_map(receiver_maps->at(i)); + MaybeObject* maybe_cached_stub = NULL; + Map* transitioned_map = GetTransitionedMap(receiver_map, receiver_maps); + if (transitioned_map != NULL) { + // TODO(yangguo): Enable this code! + // maybe_cached_stub = FastElementsConversionStub( + // receiver_map->elements_kind(), // original elements_kind + // transitioned_map->elements_kind(), + // receiver_map->instance_type() == JS_ARRAY_TYPE, // is_js_array + // strict_mode_).TryGetCode(); + // TODO(yangguo): <remove> + maybe_cached_stub = generic_stub; + // </remove> + } else { + maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck( + receiver_map, strict_mode); + } + Code* cached_stub; + if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub; + handler_ics.Add(cached_stub); + transitioned_maps.Add(transitioned_map); + } Object* object; KeyedStoreStubCompiler compiler(strict_mode); - MaybeObject* maybe_code = compiler.CompileStoreMegamorphic(receiver_maps, - targets); + MaybeObject* maybe_code = compiler.CompileStorePolymorphic( + receiver_maps, &handler_ics, &transitioned_maps); if (!maybe_code->ToObject(&object)) return maybe_code; isolate()->counters()->keyed_store_polymorphic_stubs()->Increment(); PROFILE(isolate(), CodeCreateEvent( @@ -1786,9 +1884,21 @@ MaybeObject* KeyedStoreIC::Store(State state, stub = non_strict_arguments_stub(); } else if (!force_generic) { if (key->IsSmi() && (target() != non_strict_arguments_stub())) { + StubKind stub_kind = STORE_NO_TRANSITION; + if (receiver->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) { + if (value->IsHeapNumber()) { + stub_kind = STORE_TRANSITION_SMI_TO_DOUBLE; + } else if (value->IsHeapObject()) { + stub_kind = STORE_TRANSITION_SMI_TO_OBJECT; + } + } else if (receiver->GetElementsKind() == FAST_DOUBLE_ELEMENTS) { + if (!value->IsSmi() && !value->IsHeapNumber()) { + stub_kind = STORE_TRANSITION_DOUBLE_TO_OBJECT; + } + } HandleScope scope(isolate()); MaybeObject* maybe_stub = ComputeStub(receiver, - true, + stub_kind, strict_mode, stub); stub = maybe_stub->IsFailure() ? @@ -2402,7 +2512,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) { Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate); bool caught_exception; - Object** builtin_args[] = { right.location() }; + Handle<Object> builtin_args[] = { right }; Handle<Object> result = Execution::Call(builtin_function, left, ARRAY_SIZE(builtin_args), diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h index ece5be9f0..ca8447eb8 100644 --- a/deps/v8/src/ic.h +++ b/deps/v8/src/ic.h @@ -342,6 +342,13 @@ class LoadIC: public IC { class KeyedIC: public IC { public: + enum StubKind { + LOAD, + STORE_NO_TRANSITION, + STORE_TRANSITION_SMI_TO_OBJECT, + STORE_TRANSITION_SMI_TO_DOUBLE, + STORE_TRANSITION_DOUBLE_TO_OBJECT + }; explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {} virtual ~KeyedIC() {} @@ -357,26 +364,30 @@ class KeyedIC: public IC { virtual Code::Kind kind() const = 0; MaybeObject* ComputeStub(JSObject* receiver, - bool is_store, + StubKind stub_kind, StrictModeFlag strict_mode, Code* default_stub); - virtual MaybeObject* ConstructMegamorphicStub( - MapList* receiver_maps, - CodeList* targets, - StrictModeFlag strict_mode) = 0; - - private: - void GetReceiverMapsForStub(Code* stub, MapList* result); + virtual MaybeObject* ComputePolymorphicStub(MapList* receiver_maps, + StrictModeFlag strict_mode) = 0; MaybeObject* ComputeMonomorphicStubWithoutMapCheck( Map* receiver_map, StrictModeFlag strict_mode); + private: + void GetReceiverMapsForStub(Code* stub, MapList* result); + MaybeObject* ComputeMonomorphicStub(JSObject* receiver, - bool is_store, + StubKind stub_kind, StrictModeFlag strict_mode, Code* default_stub); + + MaybeObject* ComputeTransitionedMap(JSObject* receiver, StubKind stub_kind); + + static bool IsTransitionStubKind(StubKind stub_kind) { + return stub_kind > STORE_NO_TRANSITION; + } }; @@ -419,9 +430,8 @@ class KeyedLoadIC: public KeyedIC { protected: virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; } - virtual MaybeObject* ConstructMegamorphicStub( + virtual MaybeObject* ComputePolymorphicStub( MapList* receiver_maps, - CodeList* targets, StrictModeFlag strict_mode); virtual Code* string_stub() { @@ -570,9 +580,8 @@ class KeyedStoreIC: public KeyedIC { protected: virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; } - virtual MaybeObject* ConstructMegamorphicStub( + virtual MaybeObject* ComputePolymorphicStub( MapList* receiver_maps, - CodeList* targets, StrictModeFlag strict_mode); private: diff --git a/deps/v8/src/incremental-marking-inl.h b/deps/v8/src/incremental-marking-inl.h new file mode 100644 index 000000000..43fe0f553 --- /dev/null +++ b/deps/v8/src/incremental-marking-inl.h @@ -0,0 +1,155 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_INCREMENTAL_MARKING_INL_H_ +#define V8_INCREMENTAL_MARKING_INL_H_ + +#include "incremental-marking.h" + +namespace v8 { +namespace internal { + + +bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, + Object** slot, + Object* value) { + if (IsMarking() && value->IsHeapObject()) { + MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value)); + if (Marking::IsWhite(value_bit)) { + MarkBit obj_bit = Marking::MarkBitFrom(obj); + if (Marking::IsBlack(obj_bit)) { + BlackToGreyAndUnshift(obj, obj_bit); + RestartIfNotMarking(); + } + + // Object is either grey or white it will be scanned if survives. + return false; + } + return true; + } + return false; +} + + +void IncrementalMarking::RecordWrite(HeapObject* obj, + Object** slot, + Object* value) { + if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) { + MarkBit obj_bit = Marking::MarkBitFrom(obj); + if (Marking::IsBlack(obj_bit)) { + // Object is not going to be rescanned we need to record the slot. + heap_->mark_compact_collector()->RecordSlot( + HeapObject::RawField(obj, 0), slot, value); + } + } +} + + +void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj, + RelocInfo* rinfo, + Object* value) { + if (IsMarking() && value->IsHeapObject()) { + MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value)); + if (Marking::IsWhite(value_bit)) { + MarkBit obj_bit = Marking::MarkBitFrom(obj); + if (Marking::IsBlack(obj_bit)) { + BlackToGreyAndUnshift(obj, obj_bit); + RestartIfNotMarking(); + } + + // Object is either grey or white it will be scanned if survives. + return; + } + + if (is_compacting_) { + MarkBit obj_bit = Marking::MarkBitFrom(obj); + if (Marking::IsBlack(obj_bit)) { + // Object is not going to be rescanned we need to record the slot. + heap_->mark_compact_collector()->RecordRelocSlot(rinfo, + Code::cast(value)); + } + } + } +} + + +void IncrementalMarking::RecordWrites(HeapObject* obj) { + if (IsMarking()) { + MarkBit obj_bit = Marking::MarkBitFrom(obj); + if (Marking::IsBlack(obj_bit)) { + BlackToGreyAndUnshift(obj, obj_bit); + RestartIfNotMarking(); + } + } +} + + +void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj, + MarkBit mark_bit) { + ASSERT(Marking::MarkBitFrom(obj) == mark_bit); + ASSERT(obj->Size() >= 2*kPointerSize); + ASSERT(IsMarking()); + Marking::BlackToGrey(mark_bit); + int obj_size = obj->Size(); + MemoryChunk::IncrementLiveBytes(obj->address(), -obj_size); + int64_t old_bytes_rescanned = bytes_rescanned_; + bytes_rescanned_ = old_bytes_rescanned + obj_size; + if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) { + if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) { + // If we have queued twice the heap size for rescanning then we are + // going around in circles, scanning the same objects again and again + // as the program mutates the heap faster than we can incrementally + // trace it. In this case we switch to non-incremental marking in + // order to finish off this marking phase. + if (FLAG_trace_gc) { + PrintF("Hurrying incremental marking because of lack of progress\n"); + } + allocation_marking_factor_ = kMaxAllocationMarkingFactor; + } + } + + marking_deque_.UnshiftGrey(obj); +} + + +void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) { + WhiteToGrey(obj, mark_bit); + marking_deque_.PushGrey(obj); +} + + +void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) { + ASSERT(Marking::MarkBitFrom(obj) == mark_bit); + ASSERT(obj->Size() >= 2*kPointerSize); + ASSERT(IsMarking()); + Marking::WhiteToGrey(mark_bit); +} + + +} } // namespace v8::internal + +#endif // V8_INCREMENTAL_MARKING_INL_H_ diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc new file mode 100644 index 000000000..88ebd783e --- /dev/null +++ b/deps/v8/src/incremental-marking.cc @@ -0,0 +1,818 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "incremental-marking.h" + +#include "code-stubs.h" +#include "compilation-cache.h" +#include "v8conversions.h" + +namespace v8 { +namespace internal { + + +IncrementalMarking::IncrementalMarking(Heap* heap) + : heap_(heap), + state_(STOPPED), + marking_deque_memory_(NULL), + steps_count_(0), + steps_took_(0), + longest_step_(0.0), + old_generation_space_available_at_start_of_incremental_(0), + old_generation_space_used_at_start_of_incremental_(0), + steps_count_since_last_gc_(0), + steps_took_since_last_gc_(0), + should_hurry_(false), + allocation_marking_factor_(0), + allocated_(0) { +} + + +void IncrementalMarking::TearDown() { + delete marking_deque_memory_; +} + + +void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, + Object* value, + Isolate* isolate) { + ASSERT(obj->IsHeapObject()); + + // Fast cases should already be covered by RecordWriteStub. + ASSERT(value->IsHeapObject()); + ASSERT(!value->IsHeapNumber()); + ASSERT(!value->IsString() || + value->IsConsString() || + value->IsSlicedString()); + ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value)))); + + IncrementalMarking* marking = isolate->heap()->incremental_marking(); + ASSERT(!marking->is_compacting_); + marking->RecordWrite(obj, NULL, value); +} + + +void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj, + Object** slot, + Isolate* isolate) { + IncrementalMarking* marking = isolate->heap()->incremental_marking(); + ASSERT(marking->is_compacting_); + marking->RecordWrite(obj, slot, *slot); +} + + +void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) { + if (IsMarking()) { + Code* host = heap_->isolate()->inner_pointer_to_code_cache()-> + GcSafeFindCodeForInnerPointer(pc); + RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); + RecordWriteIntoCode(host, &rinfo, value); + } +} + + +void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, + Object** slot, + Code* value) { + if (BaseRecordWrite(host, slot, value) && is_compacting_) { + ASSERT(slot != NULL); + heap_->mark_compact_collector()-> + RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value); + } +} + + + +class IncrementalMarkingMarkingVisitor : public ObjectVisitor { + public: + IncrementalMarkingMarkingVisitor(Heap* heap, + IncrementalMarking* incremental_marking) + : heap_(heap), + incremental_marking_(incremental_marking) { + } + + void VisitEmbeddedPointer(RelocInfo* rinfo) { + ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); + Object* target = rinfo->target_object(); + if (target->NonFailureIsHeapObject()) { + heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target); + MarkObject(target); + } + } + + void VisitCodeTarget(RelocInfo* rinfo) { + ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); + Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); + heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target)); + MarkObject(target); + } + + void VisitDebugTarget(RelocInfo* rinfo) { + ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && + rinfo->IsPatchedReturnSequence()) || + (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && + rinfo->IsPatchedDebugBreakSlotSequence())); + Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); + heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target)); + MarkObject(target); + } + + void VisitCodeEntry(Address entry_address) { + Object* target = Code::GetObjectFromEntryAddress(entry_address); + heap_->mark_compact_collector()-> + RecordCodeEntrySlot(entry_address, Code::cast(target)); + MarkObject(target); + } + + void VisitPointer(Object** p) { + Object* obj = *p; + if (obj->NonFailureIsHeapObject()) { + heap_->mark_compact_collector()->RecordSlot(p, p, obj); + MarkObject(obj); + } + } + + void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) { + Object* obj = *p; + if (obj->NonFailureIsHeapObject()) { + heap_->mark_compact_collector()->RecordSlot(start, p, obj); + MarkObject(obj); + } + } + } + + private: + // Mark object pointed to by p. + INLINE(void MarkObject(Object* obj)) { + HeapObject* heap_object = HeapObject::cast(obj); + MarkBit mark_bit = Marking::MarkBitFrom(heap_object); + if (mark_bit.data_only()) { + if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) { + MemoryChunk::IncrementLiveBytes(heap_object->address(), + heap_object->Size()); + } + } else if (Marking::IsWhite(mark_bit)) { + incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit); + } + } + + Heap* heap_; + IncrementalMarking* incremental_marking_; +}; + + +class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { + public: + IncrementalMarkingRootMarkingVisitor(Heap* heap, + IncrementalMarking* incremental_marking) + : heap_(heap), + incremental_marking_(incremental_marking) { + } + + void VisitPointer(Object** p) { + MarkObjectByPointer(p); + } + + void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) MarkObjectByPointer(p); + } + + private: + void MarkObjectByPointer(Object** p) { + Object* obj = *p; + if (!obj->IsHeapObject()) return; + + HeapObject* heap_object = HeapObject::cast(obj); + MarkBit mark_bit = Marking::MarkBitFrom(heap_object); + if (mark_bit.data_only()) { + if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) { + MemoryChunk::IncrementLiveBytes(heap_object->address(), + heap_object->Size()); + } + } else { + if (Marking::IsWhite(mark_bit)) { + incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit); + } + } + } + + Heap* heap_; + IncrementalMarking* incremental_marking_; +}; + + +void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk, + bool is_marking, + bool is_compacting) { + if (is_marking) { + chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); + chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + + // It's difficult to filter out slots recorded for large objects. + if (chunk->owner()->identity() == LO_SPACE && + chunk->size() > static_cast<size_t>(Page::kPageSize) && + is_compacting) { + chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION); + } + } else if (chunk->owner()->identity() == CELL_SPACE || + chunk->scan_on_scavenge()) { + chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); + chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + } else { + chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); + chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + } +} + + +void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk, + bool is_marking) { + chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); + if (is_marking) { + chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + } else { + chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); + } + chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE); +} + + +void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( + PagedSpace* space) { + PageIterator it(space); + while (it.has_next()) { + Page* p = it.next(); + SetOldSpacePageFlags(p, false, false); + } +} + + +void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( + NewSpace* space) { + NewSpacePageIterator it(space); + while (it.has_next()) { + NewSpacePage* p = it.next(); + SetNewSpacePageFlags(p, false); + } +} + + +void IncrementalMarking::DeactivateIncrementalWriteBarrier() { + DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space()); + DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space()); + DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space()); + DeactivateIncrementalWriteBarrierForSpace(heap_->map_space()); + DeactivateIncrementalWriteBarrierForSpace(heap_->code_space()); + DeactivateIncrementalWriteBarrierForSpace(heap_->new_space()); + + LargePage* lop = heap_->lo_space()->first_page(); + while (lop->is_valid()) { + SetOldSpacePageFlags(lop, false, false); + lop = lop->next_page(); + } +} + + +void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) { + PageIterator it(space); + while (it.has_next()) { + Page* p = it.next(); + SetOldSpacePageFlags(p, true, is_compacting_); + } +} + + +void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) { + NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); + while (it.has_next()) { + NewSpacePage* p = it.next(); + SetNewSpacePageFlags(p, true); + } +} + + +void IncrementalMarking::ActivateIncrementalWriteBarrier() { + ActivateIncrementalWriteBarrier(heap_->old_pointer_space()); + ActivateIncrementalWriteBarrier(heap_->old_data_space()); + ActivateIncrementalWriteBarrier(heap_->cell_space()); + ActivateIncrementalWriteBarrier(heap_->map_space()); + ActivateIncrementalWriteBarrier(heap_->code_space()); + ActivateIncrementalWriteBarrier(heap_->new_space()); + + LargePage* lop = heap_->lo_space()->first_page(); + while (lop->is_valid()) { + SetOldSpacePageFlags(lop, true, is_compacting_); + lop = lop->next_page(); + } +} + + +bool IncrementalMarking::WorthActivating() { +#ifndef DEBUG + static const intptr_t kActivationThreshold = 8 * MB; +#else + // TODO(gc) consider setting this to some low level so that some + // debug tests run with incremental marking and some without. + static const intptr_t kActivationThreshold = 0; +#endif + + return FLAG_incremental_marking && + !Serializer::enabled() && + heap_->PromotedSpaceSize() > kActivationThreshold; +} + + +void IncrementalMarking::ActivateGeneratedStub(Code* stub) { + ASSERT(RecordWriteStub::GetMode(stub) == + RecordWriteStub::STORE_BUFFER_ONLY); + + if (!IsMarking()) { + // Initially stub is generated in STORE_BUFFER_ONLY mode thus + // we don't need to do anything if incremental marking is + // not active. + } else if (IsCompacting()) { + RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); + } else { + RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); + } +} + + +static void PatchIncrementalMarkingRecordWriteStubs( + Heap* heap, RecordWriteStub::Mode mode) { + NumberDictionary* stubs = heap->code_stubs(); + + int capacity = stubs->Capacity(); + for (int i = 0; i < capacity; i++) { + Object* k = stubs->KeyAt(i); + if (stubs->IsKey(k)) { + uint32_t key = NumberToUint32(k); + + if (CodeStub::MajorKeyFromKey(key) == + CodeStub::RecordWrite) { + Object* e = stubs->ValueAt(i); + if (e->IsCode()) { + RecordWriteStub::Patch(Code::cast(e), mode); + } + } + } + } +} + + +void IncrementalMarking::EnsureMarkingDequeIsCommitted() { + if (marking_deque_memory_ == NULL) { + marking_deque_memory_ = new VirtualMemory(4 * MB); + marking_deque_memory_->Commit( + reinterpret_cast<Address>(marking_deque_memory_->address()), + marking_deque_memory_->size(), + false); // Not executable. + } +} + + +void IncrementalMarking::Start() { + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Start\n"); + } + ASSERT(FLAG_incremental_marking); + ASSERT(state_ == STOPPED); + + ResetStepCounters(); + + if (heap_->old_pointer_space()->IsSweepingComplete() && + heap_->old_data_space()->IsSweepingComplete()) { + StartMarking(ALLOW_COMPACTION); + } else { + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Start sweeping.\n"); + } + state_ = SWEEPING; + } + + heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold); +} + + +static void MarkObjectGreyDoNotEnqueue(Object* obj) { + if (obj->IsHeapObject()) { + HeapObject* heap_obj = HeapObject::cast(obj); + MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj)); + if (Marking::IsBlack(mark_bit)) { + MemoryChunk::IncrementLiveBytes(heap_obj->address(), + -heap_obj->Size()); + } + Marking::AnyToGrey(mark_bit); + } +} + + +void IncrementalMarking::StartMarking(CompactionFlag flag) { + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Start marking\n"); + } + + is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) && + heap_->mark_compact_collector()->StartCompaction(); + + state_ = MARKING; + + RecordWriteStub::Mode mode = is_compacting_ ? + RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL; + + PatchIncrementalMarkingRecordWriteStubs(heap_, mode); + + EnsureMarkingDequeIsCommitted(); + + // Initialize marking stack. + Address addr = static_cast<Address>(marking_deque_memory_->address()); + size_t size = marking_deque_memory_->size(); + if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; + marking_deque_.Initialize(addr, addr + size); + + ActivateIncrementalWriteBarrier(); + +#ifdef DEBUG + // Marking bits are cleared by the sweeper. + heap_->mark_compact_collector()->VerifyMarkbitsAreClean(); +#endif + + heap_->CompletelyClearInstanceofCache(); + heap_->isolate()->compilation_cache()->MarkCompactPrologue(); + + if (FLAG_cleanup_code_caches_at_gc) { + // We will mark cache black with a separate pass + // when we finish marking. + MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache()); + } + + // Mark strong roots grey. + IncrementalMarkingRootMarkingVisitor visitor(heap_, this); + heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); + + // Ready to start incremental marking. + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Running\n"); + } +} + + +void IncrementalMarking::PrepareForScavenge() { + if (!IsMarking()) return; + NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(), + heap_->new_space()->FromSpaceEnd()); + while (it.has_next()) { + Bitmap::Clear(it.next()); + } +} + + +void IncrementalMarking::UpdateMarkingDequeAfterScavenge() { + if (!IsMarking()) return; + + int current = marking_deque_.bottom(); + int mask = marking_deque_.mask(); + int limit = marking_deque_.top(); + HeapObject** array = marking_deque_.array(); + int new_top = current; + + Map* filler_map = heap_->one_pointer_filler_map(); + + while (current != limit) { + HeapObject* obj = array[current]; + ASSERT(obj->IsHeapObject()); + current = ((current + 1) & mask); + if (heap_->InNewSpace(obj)) { + MapWord map_word = obj->map_word(); + if (map_word.IsForwardingAddress()) { + HeapObject* dest = map_word.ToForwardingAddress(); + array[new_top] = dest; + new_top = ((new_top + 1) & mask); + ASSERT(new_top != marking_deque_.bottom()); +#ifdef DEBUG + MarkBit mark_bit = Marking::MarkBitFrom(obj); + ASSERT(Marking::IsGrey(mark_bit) || + (obj->IsFiller() && Marking::IsWhite(mark_bit))); +#endif + } + } else if (obj->map() != filler_map) { + // Skip one word filler objects that appear on the + // stack when we perform in place array shift. + array[new_top] = obj; + new_top = ((new_top + 1) & mask); + ASSERT(new_top != marking_deque_.bottom()); +#ifdef DEBUG + MarkBit mark_bit = Marking::MarkBitFrom(obj); + ASSERT(Marking::IsGrey(mark_bit) || + (obj->IsFiller() && Marking::IsWhite(mark_bit))); +#endif + } + } + marking_deque_.set_top(new_top); + + steps_took_since_last_gc_ = 0; + steps_count_since_last_gc_ = 0; + longest_step_ = 0.0; +} + + +void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) { + v->VisitPointers( + HeapObject::RawField( + ctx, Context::MarkCompactBodyDescriptor::kStartOffset), + HeapObject::RawField( + ctx, Context::MarkCompactBodyDescriptor::kEndOffset)); + + MarkCompactCollector* collector = heap_->mark_compact_collector(); + for (int idx = Context::FIRST_WEAK_SLOT; + idx < Context::GLOBAL_CONTEXT_SLOTS; + ++idx) { + Object** slot = + HeapObject::RawField(ctx, FixedArray::OffsetOfElementAt(idx)); + collector->RecordSlot(slot, slot, *slot); + } +} + + +void IncrementalMarking::Hurry() { + if (state() == MARKING) { + double start = 0.0; + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Hurry\n"); + start = OS::TimeCurrentMillis(); + } + // TODO(gc) hurry can mark objects it encounters black as mutator + // was stopped. + Map* filler_map = heap_->one_pointer_filler_map(); + Map* global_context_map = heap_->global_context_map(); + IncrementalMarkingMarkingVisitor marking_visitor(heap_, this); + while (!marking_deque_.IsEmpty()) { + HeapObject* obj = marking_deque_.Pop(); + + // Explicitly skip one word fillers. Incremental markbit patterns are + // correct only for objects that occupy at least two words. + Map* map = obj->map(); + if (map == filler_map) { + continue; + } else if (map == global_context_map) { + // Global contexts have weak fields. + VisitGlobalContext(Context::cast(obj), &marking_visitor); + } else { + obj->Iterate(&marking_visitor); + } + + MarkBit mark_bit = Marking::MarkBitFrom(obj); + ASSERT(!Marking::IsBlack(mark_bit)); + Marking::MarkBlack(mark_bit); + MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size()); + } + state_ = COMPLETE; + if (FLAG_trace_incremental_marking) { + double end = OS::TimeCurrentMillis(); + PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n", + static_cast<int>(end - start)); + } + } + + if (FLAG_cleanup_code_caches_at_gc) { + PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache(); + Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache)); + MemoryChunk::IncrementLiveBytes(poly_cache->address(), + PolymorphicCodeCache::kSize); + } + + Object* context = heap_->global_contexts_list(); + while (!context->IsUndefined()) { + NormalizedMapCache* cache = Context::cast(context)->normalized_map_cache(); + MarkBit mark_bit = Marking::MarkBitFrom(cache); + if (Marking::IsGrey(mark_bit)) { + Marking::GreyToBlack(mark_bit); + MemoryChunk::IncrementLiveBytes(cache->address(), cache->Size()); + } + context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); + } +} + + +void IncrementalMarking::Abort() { + if (IsStopped()) return; + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Aborting.\n"); + } + heap_->new_space()->LowerInlineAllocationLimit(0); + IncrementalMarking::set_should_hurry(false); + ResetStepCounters(); + if (IsMarking()) { + PatchIncrementalMarkingRecordWriteStubs(heap_, + RecordWriteStub::STORE_BUFFER_ONLY); + DeactivateIncrementalWriteBarrier(); + + if (is_compacting_) { + LargeObjectIterator it(heap_->lo_space()); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + Page* p = Page::FromAddress(obj->address()); + if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { + p->ClearFlag(Page::RESCAN_ON_EVACUATION); + } + } + } + } + heap_->isolate()->stack_guard()->Continue(GC_REQUEST); + state_ = STOPPED; + is_compacting_ = false; +} + + +void IncrementalMarking::Finalize() { + Hurry(); + state_ = STOPPED; + is_compacting_ = false; + heap_->new_space()->LowerInlineAllocationLimit(0); + IncrementalMarking::set_should_hurry(false); + ResetStepCounters(); + PatchIncrementalMarkingRecordWriteStubs(heap_, + RecordWriteStub::STORE_BUFFER_ONLY); + DeactivateIncrementalWriteBarrier(); + ASSERT(marking_deque_.IsEmpty()); + heap_->isolate()->stack_guard()->Continue(GC_REQUEST); +} + + +void IncrementalMarking::MarkingComplete() { + state_ = COMPLETE; + // We will set the stack guard to request a GC now. This will mean the rest + // of the GC gets performed as soon as possible (we can't do a GC here in a + // record-write context). If a few things get allocated between now and then + // that shouldn't make us do a scavenge and keep being incremental, so we set + // the should-hurry flag to indicate that there can't be much work left to do. + set_should_hurry(true); + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Complete (normal).\n"); + } + heap_->isolate()->stack_guard()->RequestGC(); +} + + +void IncrementalMarking::Step(intptr_t allocated_bytes) { + if (heap_->gc_state() != Heap::NOT_IN_GC || + !FLAG_incremental_marking || + !FLAG_incremental_marking_steps || + (state_ != SWEEPING && state_ != MARKING)) { + return; + } + + allocated_ += allocated_bytes; + + if (allocated_ < kAllocatedThreshold) return; + + intptr_t bytes_to_process = allocated_ * allocation_marking_factor_; + + double start = 0; + + if (FLAG_trace_incremental_marking || FLAG_trace_gc) { + start = OS::TimeCurrentMillis(); + } + + if (state_ == SWEEPING) { + if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) && + heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) { + StartMarking(PREVENT_COMPACTION); + } + } else if (state_ == MARKING) { + Map* filler_map = heap_->one_pointer_filler_map(); + Map* global_context_map = heap_->global_context_map(); + IncrementalMarkingMarkingVisitor marking_visitor(heap_, this); + while (!marking_deque_.IsEmpty() && bytes_to_process > 0) { + HeapObject* obj = marking_deque_.Pop(); + + // Explicitly skip one word fillers. Incremental markbit patterns are + // correct only for objects that occupy at least two words. + Map* map = obj->map(); + if (map == filler_map) continue; + + int size = obj->SizeFromMap(map); + bytes_to_process -= size; + MarkBit map_mark_bit = Marking::MarkBitFrom(map); + if (Marking::IsWhite(map_mark_bit)) { + WhiteToGreyAndPush(map, map_mark_bit); + } + + // TODO(gc) switch to static visitor instead of normal visitor. + if (map == global_context_map) { + // Global contexts have weak fields. + Context* ctx = Context::cast(obj); + + // We will mark cache black with a separate pass + // when we finish marking. + MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache()); + + VisitGlobalContext(ctx, &marking_visitor); + } else { + obj->IterateBody(map->instance_type(), size, &marking_visitor); + } + + MarkBit obj_mark_bit = Marking::MarkBitFrom(obj); + ASSERT(Marking::IsGrey(obj_mark_bit) || + (obj->IsFiller() && Marking::IsWhite(obj_mark_bit))); + Marking::MarkBlack(obj_mark_bit); + MemoryChunk::IncrementLiveBytes(obj->address(), size); + } + if (marking_deque_.IsEmpty()) MarkingComplete(); + } + + allocated_ = 0; + + steps_count_++; + steps_count_since_last_gc_++; + + bool speed_up = false; + + if (old_generation_space_available_at_start_of_incremental_ < 10 * MB || + SpaceLeftInOldSpace() < + old_generation_space_available_at_start_of_incremental_ >> 1) { + // Half of the space that was available is gone while we were + // incrementally marking. + speed_up = true; + old_generation_space_available_at_start_of_incremental_ = + SpaceLeftInOldSpace(); + } + + if (heap_->PromotedTotalSize() > + old_generation_space_used_at_start_of_incremental_ << 1) { + // Size of old space doubled while we were incrementally marking. + speed_up = true; + old_generation_space_used_at_start_of_incremental_ = + heap_->PromotedTotalSize(); + } + + if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0 && + allocation_marking_factor_ < kMaxAllocationMarkingFactor) { + speed_up = true; + } + + if (speed_up && 0) { + allocation_marking_factor_ += kAllocationMarkingFactorSpeedup; + allocation_marking_factor_ = + static_cast<int>(allocation_marking_factor_ * 1.3); + if (FLAG_trace_gc) { + PrintF("Marking speed increased to %d\n", allocation_marking_factor_); + } + } + + if (FLAG_trace_incremental_marking || FLAG_trace_gc) { + double end = OS::TimeCurrentMillis(); + double delta = (end - start); + longest_step_ = Max(longest_step_, delta); + steps_took_ += delta; + steps_took_since_last_gc_ += delta; + } +} + + +void IncrementalMarking::ResetStepCounters() { + steps_count_ = 0; + steps_took_ = 0; + longest_step_ = 0.0; + old_generation_space_available_at_start_of_incremental_ = + SpaceLeftInOldSpace(); + old_generation_space_used_at_start_of_incremental_ = + heap_->PromotedTotalSize(); + steps_count_since_last_gc_ = 0; + steps_took_since_last_gc_ = 0; + bytes_rescanned_ = 0; + allocation_marking_factor_ = kInitialAllocationMarkingFactor; +} + + +int64_t IncrementalMarking::SpaceLeftInOldSpace() { + return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize(); +} + +} } // namespace v8::internal diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h new file mode 100644 index 000000000..d1627bcba --- /dev/null +++ b/deps/v8/src/incremental-marking.h @@ -0,0 +1,256 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_INCREMENTAL_MARKING_H_ +#define V8_INCREMENTAL_MARKING_H_ + + +#include "execution.h" +#include "mark-compact.h" +#include "objects.h" + +namespace v8 { +namespace internal { + + +class IncrementalMarking { + public: + enum State { + STOPPED, + SWEEPING, + MARKING, + COMPLETE + }; + + explicit IncrementalMarking(Heap* heap); + + void TearDown(); + + State state() { + ASSERT(state_ == STOPPED || FLAG_incremental_marking); + return state_; + } + + bool should_hurry() { return should_hurry_; } + + inline bool IsStopped() { return state() == STOPPED; } + + inline bool IsMarking() { return state() >= MARKING; } + + inline bool IsMarkingIncomplete() { return state() == MARKING; } + + bool WorthActivating(); + + void Start(); + + void Stop(); + + void PrepareForScavenge(); + + void UpdateMarkingDequeAfterScavenge(); + + void Hurry(); + + void Finalize(); + + void Abort(); + + void MarkingComplete(); + + // It's hard to know how much work the incremental marker should do to make + // progress in the face of the mutator creating new work for it. We start + // of at a moderate rate of work and gradually increase the speed of the + // incremental marker until it completes. + // Do some marking every time this much memory has been allocated. + static const intptr_t kAllocatedThreshold = 65536; + // Start off by marking this many times more memory than has been allocated. + static const intptr_t kInitialAllocationMarkingFactor = 1; + // But if we are promoting a lot of data we need to mark faster to keep up + // with the data that is entering the old space through promotion. + static const intptr_t kFastMarking = 3; + // After this many steps we increase the marking/allocating factor. + static const intptr_t kAllocationMarkingFactorSpeedupInterval = 1024; + // This is how much we increase the marking/allocating factor by. + static const intptr_t kAllocationMarkingFactorSpeedup = 2; + static const intptr_t kMaxAllocationMarkingFactor = 1000000000; + + void OldSpaceStep(intptr_t allocated) { + Step(allocated * kFastMarking / kInitialAllocationMarkingFactor); + } + void Step(intptr_t allocated); + + inline void RestartIfNotMarking() { + if (state_ == COMPLETE) { + state_ = MARKING; + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Restarting (new grey objects)\n"); + } + } + } + + static void RecordWriteFromCode(HeapObject* obj, + Object* value, + Isolate* isolate); + + static void RecordWriteForEvacuationFromCode(HeapObject* obj, + Object** slot, + Isolate* isolate); + + inline bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value); + + + inline void RecordWrite(HeapObject* obj, Object** slot, Object* value); + inline void RecordWriteIntoCode(HeapObject* obj, + RelocInfo* rinfo, + Object* value); + void RecordCodeTargetPatch(Address pc, HeapObject* value); + void RecordWriteOfCodeEntry(JSFunction* host, Object** slot, Code* value); + + inline void RecordWrites(HeapObject* obj); + + inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit); + + inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit); + + inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit); + + // Does white->black or keeps gray or black color. Returns true if converting + // white to black. + inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) { + ASSERT(!Marking::IsImpossible(mark_bit)); + if (mark_bit.Get()) { + // Grey or black: Keep the color. + return false; + } + mark_bit.Set(); + ASSERT(Marking::IsBlack(mark_bit)); + return true; + } + + inline int steps_count() { + return steps_count_; + } + + inline double steps_took() { + return steps_took_; + } + + inline double longest_step() { + return longest_step_; + } + + inline int steps_count_since_last_gc() { + return steps_count_since_last_gc_; + } + + inline double steps_took_since_last_gc() { + return steps_took_since_last_gc_; + } + + inline void SetOldSpacePageFlags(MemoryChunk* chunk) { + SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting()); + } + + inline void SetNewSpacePageFlags(NewSpacePage* chunk) { + SetNewSpacePageFlags(chunk, IsMarking()); + } + + MarkingDeque* marking_deque() { return &marking_deque_; } + + bool IsCompacting() { return IsMarking() && is_compacting_; } + + void ActivateGeneratedStub(Code* stub); + + void NotifyOfHighPromotionRate() { + if (IsMarking()) { + if (allocation_marking_factor_ < kFastMarking) { + if (FLAG_trace_gc) { + PrintF("Increasing marking speed to %d due to high promotion rate\n", + static_cast<int>(kFastMarking)); + } + allocation_marking_factor_ = kFastMarking; + } + } + } + + private: + void set_should_hurry(bool val) { + should_hurry_ = val; + } + + int64_t SpaceLeftInOldSpace(); + + void ResetStepCounters(); + + enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION }; + + void StartMarking(CompactionFlag flag); + + void ActivateIncrementalWriteBarrier(PagedSpace* space); + static void ActivateIncrementalWriteBarrier(NewSpace* space); + void ActivateIncrementalWriteBarrier(); + + static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space); + static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space); + void DeactivateIncrementalWriteBarrier(); + + static void SetOldSpacePageFlags(MemoryChunk* chunk, + bool is_marking, + bool is_compacting); + + static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking); + + void EnsureMarkingDequeIsCommitted(); + + void VisitGlobalContext(Context* ctx, ObjectVisitor* v); + + Heap* heap_; + + State state_; + bool is_compacting_; + + VirtualMemory* marking_deque_memory_; + MarkingDeque marking_deque_; + + int steps_count_; + double steps_took_; + double longest_step_; + int64_t old_generation_space_available_at_start_of_incremental_; + int64_t old_generation_space_used_at_start_of_incremental_; + int steps_count_since_last_gc_; + double steps_took_since_last_gc_; + int64_t bytes_rescanned_; + bool should_hurry_; + int allocation_marking_factor_; + intptr_t allocated_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking); +}; + +} } // namespace v8::internal + +#endif // V8_INCREMENTAL_MARKING_H_ diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h index aa6b5372c..d6e613176 100644 --- a/deps/v8/src/isolate-inl.h +++ b/deps/v8/src/isolate-inl.h @@ -36,6 +36,21 @@ namespace v8 { namespace internal { +SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) { + if (isolate->context() != NULL) { + context_ = Handle<Context>(isolate->context()); +#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300 + dummy_ = Handle<Context>(isolate->context()); +#endif + } + isolate->set_save_context(this); + + // If there is no JS frame under the current C frame, use the value 0. + JavaScriptFrameIterator it(isolate); + js_sp_ = it.done() ? 0 : it.frame()->sp(); +} + + bool Isolate::DebuggerHasBreakPoints() { #ifdef ENABLE_DEBUGGER_SUPPORT return debug()->has_break_points(); diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc index fd0f673e7..492694e60 100644 --- a/deps/v8/src/isolate.cc +++ b/deps/v8/src/isolate.cc @@ -98,6 +98,14 @@ void ThreadLocalTop::InitializeInternal() { failed_access_check_callback_ = NULL; save_context_ = NULL; catcher_ = NULL; + + // These members are re-initialized later after deserialization + // is complete. + pending_exception_ = NULL; + has_pending_message_ = false; + pending_message_obj_ = NULL; + pending_message_script_ = NULL; + scheduled_exception_ = NULL; } @@ -1284,6 +1292,9 @@ char* Isolate::ArchiveThread(char* to) { memcpy(to, reinterpret_cast<char*>(thread_local_top()), sizeof(ThreadLocalTop)); InitializeThreadLocal(); + clear_pending_exception(); + clear_pending_message(); + clear_scheduled_exception(); return to + sizeof(ThreadLocalTop); } @@ -1403,11 +1414,12 @@ Isolate::Isolate() in_use_list_(0), free_list_(0), preallocated_storage_preallocated_(false), - pc_to_code_cache_(NULL), + inner_pointer_to_code_cache_(NULL), write_input_buffer_(NULL), global_handles_(NULL), context_switcher_(NULL), thread_manager_(NULL), + fp_stubs_generated_(false), string_tracker_(NULL), regexp_stack_(NULL), embedder_data_(NULL) { @@ -1575,8 +1587,8 @@ Isolate::~Isolate() { compilation_cache_ = NULL; delete bootstrapper_; bootstrapper_ = NULL; - delete pc_to_code_cache_; - pc_to_code_cache_ = NULL; + delete inner_pointer_to_code_cache_; + inner_pointer_to_code_cache_ = NULL; delete write_input_buffer_; write_input_buffer_ = NULL; @@ -1610,9 +1622,6 @@ Isolate::~Isolate() { void Isolate::InitializeThreadLocal() { thread_local_top_.isolate_ = this; thread_local_top_.Initialize(); - clear_pending_exception(); - clear_pending_message(); - clear_scheduled_exception(); } @@ -1700,7 +1709,7 @@ bool Isolate::Init(Deserializer* des) { context_slot_cache_ = new ContextSlotCache(); descriptor_lookup_cache_ = new DescriptorLookupCache(); unicode_cache_ = new UnicodeCache(); - pc_to_code_cache_ = new PcToCodeCache(this); + inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this); write_input_buffer_ = new StringInputBuffer(); global_handles_ = new GlobalHandles(this); bootstrapper_ = new Bootstrapper(); @@ -1767,9 +1776,14 @@ bool Isolate::Init(Deserializer* des) { // If we are deserializing, read the state into the now-empty heap. if (des != NULL) { des->Deserialize(); - stub_cache_->Clear(); + stub_cache_->Initialize(true); } + // Finish initialization of ThreadLocal after deserialization is done. + clear_pending_exception(); + clear_pending_message(); + clear_scheduled_exception(); + // Deserializing may put strange things in the root array's copy of the // stack guard. heap_.SetStackLimits(); diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h index 2582da644..01ab04e60 100644 --- a/deps/v8/src/isolate.h +++ b/deps/v8/src/isolate.h @@ -66,7 +66,7 @@ class HandleScopeImplementer; class HeapProfiler; class InlineRuntimeFunctionsTable; class NoAllocationStringAllocator; -class PcToCodeCache; +class InnerPointerToCodeCache; class PreallocatedMemoryThread; class RegExpStack; class SaveContext; @@ -841,7 +841,9 @@ class Isolate { return unicode_cache_; } - PcToCodeCache* pc_to_code_cache() { return pc_to_code_cache_; } + InnerPointerToCodeCache* inner_pointer_to_code_cache() { + return inner_pointer_to_code_cache_; + } StringInputBuffer* write_input_buffer() { return write_input_buffer_; } @@ -879,6 +881,12 @@ class Isolate { RuntimeState* runtime_state() { return &runtime_state_; } + void set_fp_stubs_generated(bool value) { + fp_stubs_generated_ = value; + } + + bool fp_stubs_generated() { return fp_stubs_generated_; } + StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() { return &compiler_safe_string_input_buffer_; } @@ -1130,12 +1138,13 @@ class Isolate { PreallocatedStorage in_use_list_; PreallocatedStorage free_list_; bool preallocated_storage_preallocated_; - PcToCodeCache* pc_to_code_cache_; + InnerPointerToCodeCache* inner_pointer_to_code_cache_; StringInputBuffer* write_input_buffer_; GlobalHandles* global_handles_; ContextSwitcher* context_switcher_; ThreadManager* thread_manager_; RuntimeState runtime_state_; + bool fp_stubs_generated_; StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_; Builtins builtins_; StringTracker* string_tracker_; @@ -1210,19 +1219,7 @@ class Isolate { // versions of GCC. See V8 issue 122 for details. class SaveContext BASE_EMBEDDED { public: - explicit SaveContext(Isolate* isolate) : prev_(isolate->save_context()) { - if (isolate->context() != NULL) { - context_ = Handle<Context>(isolate->context()); -#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300 - dummy_ = Handle<Context>(isolate->context()); -#endif - } - isolate->set_save_context(this); - - // If there is no JS frame under the current C frame, use the value 0. - JavaScriptFrameIterator it(isolate); - js_sp_ = it.done() ? 0 : it.frame()->sp(); - } + inline explicit SaveContext(Isolate* isolate); ~SaveContext() { if (context_.is_null()) { diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h index 68eab65fd..ca796a699 100644 --- a/deps/v8/src/json-parser.h +++ b/deps/v8/src/json-parser.h @@ -165,7 +165,7 @@ class JsonParser BASE_EMBEDDED { template <bool seq_ascii> Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) { - isolate_ = source->map()->isolate(); + isolate_ = source->map()->GetHeap()->isolate(); FlattenString(source); source_ = source; source_length_ = source_->length(); diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc index 3ebfbdfc9..c1a9e067c 100644 --- a/deps/v8/src/jsregexp.cc +++ b/deps/v8/src/jsregexp.cc @@ -68,9 +68,9 @@ Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor, Handle<String> flags, bool* has_pending_exception) { // Call the construct code with 2 arguments. - Object** argv[2] = { Handle<Object>::cast(pattern).location(), - Handle<Object>::cast(flags).location() }; - return Execution::New(constructor, 2, argv, has_pending_exception); + Handle<Object> argv[] = { pattern, flags }; + return Execution::New(constructor, ARRAY_SIZE(argv), argv, + has_pending_exception); } @@ -4723,7 +4723,6 @@ bool OutSet::Get(unsigned value) { const uc16 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar; -const DispatchTable::Entry DispatchTable::Config::kNoValue; void DispatchTable::AddRange(CharacterRange full_range, int value) { diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h index 54297a49a..df110d1c2 100644 --- a/deps/v8/src/jsregexp.h +++ b/deps/v8/src/jsregexp.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -29,6 +29,7 @@ #define V8_JSREGEXP_H_ #include "allocation.h" +#include "assembler.h" #include "zone-inl.h" namespace v8 { @@ -388,7 +389,7 @@ class DispatchTable : public ZoneObject { typedef uc16 Key; typedef Entry Value; static const uc16 kNoKey; - static const Entry kNoValue; + static const Entry NoValue() { return Value(); } static inline int Compare(uc16 a, uc16 b) { if (a == b) return 0; diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc index 466110678..35281eb20 100644 --- a/deps/v8/src/lithium-allocator.cc +++ b/deps/v8/src/lithium-allocator.cc @@ -1043,11 +1043,13 @@ void LAllocator::ResolvePhis(HBasicBlock* block) { // it into a location different from the operand of a live range // covering a branch instruction. // Thus we need to manually record a pointer. - if (phi->representation().IsTagged()) { - LInstruction* branch = - InstructionAt(cur_block->last_instruction_index()); - if (branch->HasPointerMap()) { + LInstruction* branch = + InstructionAt(cur_block->last_instruction_index()); + if (branch->HasPointerMap()) { + if (phi->representation().IsTagged()) { branch->pointer_map()->RecordPointer(phi_operand); + } else if (!phi->representation().IsDouble()) { + branch->pointer_map()->RecordUntagged(phi_operand); } } } @@ -1142,10 +1144,13 @@ void LAllocator::ResolveControlFlow(LiveRange* range, // it into a location different from the operand of a live range // covering a branch instruction. // Thus we need to manually record a pointer. - if (HasTaggedValue(range->id())) { - LInstruction* branch = InstructionAt(pred->last_instruction_index()); - if (branch->HasPointerMap()) { + LInstruction* branch = InstructionAt(pred->last_instruction_index()); + if (branch->HasPointerMap()) { + if (HasTaggedValue(range->id())) { branch->pointer_map()->RecordPointer(cur_op); + } else if (!cur_op->IsDoubleStackSlot() && + !cur_op->IsDoubleRegister()) { + branch->pointer_map()->RemovePointer(cur_op); } } } diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc index 5410f6f05..31b16982d 100644 --- a/deps/v8/src/lithium.cc +++ b/deps/v8/src/lithium.cc @@ -156,6 +156,27 @@ void LPointerMap::RecordPointer(LOperand* op) { } +void LPointerMap::RemovePointer(LOperand* op) { + // Do not record arguments as pointers. + if (op->IsStackSlot() && op->index() < 0) return; + ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); + for (int i = 0; i < pointer_operands_.length(); ++i) { + if (pointer_operands_[i]->Equals(op)) { + pointer_operands_.Remove(i); + --i; + } + } +} + + +void LPointerMap::RecordUntagged(LOperand* op) { + // Do not record arguments as pointers. + if (op->IsStackSlot() && op->index() < 0) return; + ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); + untagged_operands_.Add(op); +} + + void LPointerMap::PrintTo(StringStream* stream) { stream->Add("{"); for (int i = 0; i < pointer_operands_.length(); ++i) { @@ -182,6 +203,7 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) { case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: return 3; + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h index 20da21a63..1e90804c3 100644 --- a/deps/v8/src/lithium.h +++ b/deps/v8/src/lithium.h @@ -407,9 +407,18 @@ class LParallelMove : public ZoneObject { class LPointerMap: public ZoneObject { public: explicit LPointerMap(int position) - : pointer_operands_(8), position_(position), lithium_position_(-1) { } - - const ZoneList<LOperand*>* operands() const { return &pointer_operands_; } + : pointer_operands_(8), + untagged_operands_(0), + position_(position), + lithium_position_(-1) { } + + const ZoneList<LOperand*>* GetNormalizedOperands() { + for (int i = 0; i < untagged_operands_.length(); ++i) { + RemovePointer(untagged_operands_[i]); + } + untagged_operands_.Clear(); + return &pointer_operands_; + } int position() const { return position_; } int lithium_position() const { return lithium_position_; } @@ -419,10 +428,13 @@ class LPointerMap: public ZoneObject { } void RecordPointer(LOperand* op); + void RemovePointer(LOperand* op); + void RecordUntagged(LOperand* op); void PrintTo(StringStream* stream); private: ZoneList<LOperand*> pointer_operands_; + ZoneList<LOperand*> untagged_operands_; int position_; int lithium_position_; }; diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc index d44c2fc1c..6107cbf0b 100644 --- a/deps/v8/src/liveedit.cc +++ b/deps/v8/src/liveedit.cc @@ -1000,6 +1000,7 @@ class ReferenceCollectorVisitor : public ObjectVisitor { static void ReplaceCodeObject(Code* original, Code* substitution) { ASSERT(!HEAP->InNewSpace(substitution)); + HeapIterator iterator; AssertNoAllocation no_allocations_please; // A zone scope for ReferenceCollectorVisitor. @@ -1016,7 +1017,6 @@ static void ReplaceCodeObject(Code* original, Code* substitution) { // Now iterate over all pointers of all objects, including code_target // implicit pointers. - HeapIterator iterator; for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { obj->Iterate(&visitor); } @@ -1101,6 +1101,8 @@ MaybeObject* LiveEdit::ReplaceFunctionCode( Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo(); + HEAP->EnsureHeapIsIterable(); + if (IsJSFunctionCode(shared_info->code())) { Handle<Code> code = compile_info_wrapper.GetFunctionCode(); ReplaceCodeObject(shared_info->code(), *code); @@ -1271,7 +1273,8 @@ class RelocInfoBuffer { // Patch positions in code (changes relocation info section) and possibly // returns new instance of code. -static Handle<Code> PatchPositionsInCode(Handle<Code> code, +static Handle<Code> PatchPositionsInCode( + Handle<Code> code, Handle<JSArray> position_change_array) { RelocInfoBuffer buffer_writer(code->relocation_size(), @@ -1286,7 +1289,7 @@ static Handle<Code> PatchPositionsInCode(Handle<Code> code, int new_position = TranslatePosition(position, position_change_array); if (position != new_position) { - RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position); + RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position, NULL); buffer_writer.Write(&info_copy); continue; } @@ -1333,6 +1336,8 @@ MaybeObject* LiveEdit::PatchFunctionPositions( info->set_end_position(new_function_end); info->set_function_token_position(new_function_token_pos); + HEAP->EnsureHeapIsIterable(); + if (IsJSFunctionCode(info->code())) { // Patch relocation info section of the code. Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()), diff --git a/deps/v8/src/liveobjectlist.cc b/deps/v8/src/liveobjectlist.cc index 957c0515d..d62c4d176 100644 --- a/deps/v8/src/liveobjectlist.cc +++ b/deps/v8/src/liveobjectlist.cc @@ -1336,7 +1336,9 @@ MaybeObject* LiveObjectList::DumpPrivate(DumpWriter* writer, // Allocate the JSArray of the elements. Handle<JSObject> elements = factory->NewJSObject(isolate->array_function()); if (elements->IsFailure()) return Object::cast(*elements); - Handle<JSArray>::cast(elements)->SetContent(*elements_arr); + + maybe_result = Handle<JSArray>::cast(elements)->SetContent(*elements_arr); + if (maybe_result->IsFailure()) return maybe_result; // Set body.elements. Handle<String> elements_sym = factory->LookupAsciiSymbol("elements"); @@ -1462,7 +1464,9 @@ MaybeObject* LiveObjectList::SummarizePrivate(SummaryWriter* writer, Handle<JSObject> summary_obj = factory->NewJSObject(isolate->array_function()); if (summary_obj->IsFailure()) return Object::cast(*summary_obj); - Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr); + + maybe_result = Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr); + if (maybe_result->IsFailure()) return maybe_result; // Create the body object. Handle<JSObject> body = factory->NewJSObject(isolate->object_function()); @@ -1589,7 +1593,9 @@ MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) { // Return the result as a JS array. Handle<JSObject> lols = factory->NewJSObject(isolate->array_function()); - Handle<JSArray>::cast(lols)->SetContent(*list); + + maybe_result = Handle<JSArray>::cast(lols)->SetContent(*list); + if (maybe_result->IsFailure()) return maybe_result; Handle<JSObject> result = factory->NewJSObject(isolate->object_function()); if (result->IsFailure()) return Object::cast(*result); @@ -2613,7 +2619,7 @@ void LiveObjectList::VerifyNotInFromSpace() { HeapObject* heap_obj = it.Obj(); if (heap->InFromSpace(heap_obj)) { OS::Print(" ERROR: VerifyNotInFromSpace: [%d] obj %p in From space %p\n", - i++, heap_obj, heap->new_space()->FromSpaceLow()); + i++, heap_obj, Heap::new_space()->FromSpaceStart()); } } } diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc index 3d66b5fb1..bad5fdc93 100644 --- a/deps/v8/src/log.cc +++ b/deps/v8/src/log.cc @@ -1356,12 +1356,12 @@ class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor { static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis, Handle<Code>* code_objects) { + HeapIterator iterator; AssertNoAllocation no_alloc; int compiled_funcs_count = 0; // Iterate the heap to find shared function info objects and record // the unoptimized code for them. - HeapIterator iterator; for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (!obj->IsSharedFunctionInfo()) continue; SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj); @@ -1519,8 +1519,9 @@ void Logger::LowLevelLogWriteBytes(const char* bytes, int size) { void Logger::LogCodeObjects() { - AssertNoAllocation no_alloc; + HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask); HeapIterator iterator; + AssertNoAllocation no_alloc; for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (obj->IsCode()) LogCodeObject(obj); } @@ -1573,6 +1574,7 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared, void Logger::LogCompiledFunctions() { + HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask); HandleScope scope; const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL); ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count); @@ -1591,9 +1593,9 @@ void Logger::LogCompiledFunctions() { void Logger::LogAccessorCallbacks() { - AssertNoAllocation no_alloc; + HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask); HeapIterator iterator; - i::Isolate* isolate = ISOLATE; + AssertNoAllocation no_alloc; for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (!obj->IsAccessorInfo()) continue; AccessorInfo* ai = AccessorInfo::cast(obj); @@ -1601,11 +1603,11 @@ void Logger::LogAccessorCallbacks() { String* name = String::cast(ai->name()); Address getter_entry = v8::ToCData<Address>(ai->getter()); if (getter_entry != 0) { - PROFILE(isolate, GetterCallbackEvent(name, getter_entry)); + PROFILE(ISOLATE, GetterCallbackEvent(name, getter_entry)); } Address setter_entry = v8::ToCData<Address>(ai->setter()); if (setter_entry != 0) { - PROFILE(isolate, SetterCallbackEvent(name, setter_entry)); + PROFILE(ISOLATE, SetterCallbackEvent(name, setter_entry)); } } } diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h index fe19810a2..4d76fc820 100644 --- a/deps/v8/src/log.h +++ b/deps/v8/src/log.h @@ -29,6 +29,7 @@ #define V8_LOG_H_ #include "allocation.h" +#include "objects.h" #include "platform.h" #include "log-utils.h" diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h index 30838bd76..364fdb627 100644 --- a/deps/v8/src/macro-assembler.h +++ b/deps/v8/src/macro-assembler.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -93,6 +93,63 @@ const int kInvalidProtoDepth = -1; namespace v8 { namespace internal { +class FrameScope { + public: + explicit FrameScope(MacroAssembler* masm, StackFrame::Type type) + : masm_(masm), type_(type), old_has_frame_(masm->has_frame()) { + masm->set_has_frame(true); + if (type != StackFrame::MANUAL && type_ != StackFrame::NONE) { + masm->EnterFrame(type); + } + } + + ~FrameScope() { + if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) { + masm_->LeaveFrame(type_); + } + masm_->set_has_frame(old_has_frame_); + } + + // Normally we generate the leave-frame code when this object goes + // out of scope. Sometimes we may need to generate the code somewhere else + // in addition. Calling this will achieve that, but the object stays in + // scope, the MacroAssembler is still marked as being in a frame scope, and + // the code will be generated again when it goes out of scope. + void GenerateLeaveFrame() { + masm_->LeaveFrame(type_); + } + + private: + MacroAssembler* masm_; + StackFrame::Type type_; + bool old_has_frame_; +}; + + +class AllowExternalCallThatCantCauseGC: public FrameScope { + public: + explicit AllowExternalCallThatCantCauseGC(MacroAssembler* masm) + : FrameScope(masm, StackFrame::NONE) { } +}; + + +class NoCurrentFrameScope { + public: + explicit NoCurrentFrameScope(MacroAssembler* masm) + : masm_(masm), saved_(masm->has_frame()) { + masm->set_has_frame(false); + } + + ~NoCurrentFrameScope() { + masm_->set_has_frame(saved_); + } + + private: + MacroAssembler* masm_; + bool saved_; +}; + + // Support for "structured" code comments. #ifdef DEBUG diff --git a/deps/v8/src/mark-compact-inl.h b/deps/v8/src/mark-compact-inl.h new file mode 100644 index 000000000..20f11a78a --- /dev/null +++ b/deps/v8/src/mark-compact-inl.h @@ -0,0 +1,101 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_MARK_COMPACT_INL_H_ +#define V8_MARK_COMPACT_INL_H_ + +#include "isolate.h" +#include "memory.h" +#include "mark-compact.h" + + +namespace v8 { +namespace internal { + + +MarkBit Marking::MarkBitFrom(Address addr) { + MemoryChunk *p = MemoryChunk::FromAddress(addr); + return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr), + p->ContainsOnlyData()); +} + + +void MarkCompactCollector::SetFlags(int flags) { + sweep_precisely_ = ((flags & Heap::kMakeHeapIterableMask) != 0); +} + + +void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) { + ASSERT(Marking::MarkBitFrom(obj) == mark_bit); + if (!mark_bit.Get()) { + mark_bit.Set(); + MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size()); +#ifdef DEBUG + UpdateLiveObjectCount(obj); +#endif + ProcessNewlyMarkedObject(obj); + } +} + + +void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) { + ASSERT(!mark_bit.Get()); + ASSERT(Marking::MarkBitFrom(obj) == mark_bit); + mark_bit.Set(); + MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size()); +#ifdef DEBUG + UpdateLiveObjectCount(obj); +#endif +} + + +bool MarkCompactCollector::IsMarked(Object* obj) { + ASSERT(obj->IsHeapObject()); + HeapObject* heap_object = HeapObject::cast(obj); + return Marking::MarkBitFrom(heap_object).Get(); +} + + +void MarkCompactCollector::RecordSlot(Object** anchor_slot, + Object** slot, + Object* object) { + Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object)); + if (object_page->IsEvacuationCandidate() && + !ShouldSkipEvacuationSlotRecording(anchor_slot)) { + if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, + object_page->slots_buffer_address(), + slot, + SlotsBuffer::FAIL_ON_OVERFLOW)) { + EvictEvacuationCandidate(object_page); + } + } +} + + +} } // namespace v8::internal + +#endif // V8_MARK_COMPACT_INL_H_ diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index 3e4a617b7..9fa79ca74 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -27,20 +27,31 @@ #include "v8.h" +#include "code-stubs.h" #include "compilation-cache.h" +#include "deoptimizer.h" #include "execution.h" -#include "heap-profiler.h" #include "gdb-jit.h" #include "global-handles.h" +#include "heap-profiler.h" #include "ic-inl.h" +#include "incremental-marking.h" #include "liveobjectlist-inl.h" #include "mark-compact.h" #include "objects-visiting.h" +#include "objects-visiting-inl.h" #include "stub-cache.h" namespace v8 { namespace internal { + +const char* Marking::kWhiteBitPattern = "00"; +const char* Marking::kBlackBitPattern = "10"; +const char* Marking::kGreyBitPattern = "11"; +const char* Marking::kImpossibleBitPattern = "01"; + + // ------------------------------------------------------------------------- // MarkCompactCollector @@ -48,11 +59,12 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT #ifdef DEBUG state_(IDLE), #endif - force_compaction_(false), - compacting_collection_(false), - compact_on_next_gc_(false), - previous_marked_count_(0), + sweep_precisely_(false), + compacting_(false), + was_marked_incrementally_(false), + collect_maps_(FLAG_collect_maps), tracer_(NULL), + migration_slots_buffer_(NULL), #ifdef DEBUG live_young_objects_size_(0), live_old_pointer_objects_size_(0), @@ -68,50 +80,408 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT encountered_weak_maps_(NULL) { } +#ifdef DEBUG +class VerifyMarkingVisitor: public ObjectVisitor { + public: + void VisitPointers(Object** start, Object** end) { + for (Object** current = start; current < end; current++) { + if ((*current)->IsHeapObject()) { + HeapObject* object = HeapObject::cast(*current); + ASSERT(HEAP->mark_compact_collector()->IsMarked(object)); + } + } + } +}; + + +static void VerifyMarking(Address bottom, Address top) { + VerifyMarkingVisitor visitor; + HeapObject* object; + Address next_object_must_be_here_or_later = bottom; + + for (Address current = bottom; + current < top; + current += kPointerSize) { + object = HeapObject::FromAddress(current); + if (MarkCompactCollector::IsMarked(object)) { + ASSERT(current >= next_object_must_be_here_or_later); + object->Iterate(&visitor); + next_object_must_be_here_or_later = current + object->Size(); + } + } +} + + +static void VerifyMarking(NewSpace* space) { + Address end = space->top(); + NewSpacePageIterator it(space->bottom(), end); + // The bottom position is at the start of its page. Allows us to use + // page->body() as start of range on all pages. + ASSERT_EQ(space->bottom(), + NewSpacePage::FromAddress(space->bottom())->body()); + while (it.has_next()) { + NewSpacePage* page = it.next(); + Address limit = it.has_next() ? page->body_limit() : end; + ASSERT(limit == end || !page->Contains(end)); + VerifyMarking(page->body(), limit); + } +} + + +static void VerifyMarking(PagedSpace* space) { + PageIterator it(space); + + while (it.has_next()) { + Page* p = it.next(); + VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd()); + } +} + + +static void VerifyMarking(Heap* heap) { + VerifyMarking(heap->old_pointer_space()); + VerifyMarking(heap->old_data_space()); + VerifyMarking(heap->code_space()); + VerifyMarking(heap->cell_space()); + VerifyMarking(heap->map_space()); + VerifyMarking(heap->new_space()); + + VerifyMarkingVisitor visitor; + + LargeObjectIterator it(heap->lo_space()); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + if (MarkCompactCollector::IsMarked(obj)) { + obj->Iterate(&visitor); + } + } + + heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); +} + + +class VerifyEvacuationVisitor: public ObjectVisitor { + public: + void VisitPointers(Object** start, Object** end) { + for (Object** current = start; current < end; current++) { + if ((*current)->IsHeapObject()) { + HeapObject* object = HeapObject::cast(*current); + CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); + } + } + } +}; + + +static void VerifyEvacuation(Address bottom, Address top) { + VerifyEvacuationVisitor visitor; + HeapObject* object; + Address next_object_must_be_here_or_later = bottom; + + for (Address current = bottom; + current < top; + current += kPointerSize) { + object = HeapObject::FromAddress(current); + if (MarkCompactCollector::IsMarked(object)) { + ASSERT(current >= next_object_must_be_here_or_later); + object->Iterate(&visitor); + next_object_must_be_here_or_later = current + object->Size(); + } + } +} + + +static void VerifyEvacuation(NewSpace* space) { + NewSpacePageIterator it(space->bottom(), space->top()); + VerifyEvacuationVisitor visitor; + + while (it.has_next()) { + NewSpacePage* page = it.next(); + Address current = page->body(); + Address limit = it.has_next() ? page->body_limit() : space->top(); + ASSERT(limit == space->top() || !page->Contains(space->top())); + while (current < limit) { + HeapObject* object = HeapObject::FromAddress(current); + object->Iterate(&visitor); + current += object->Size(); + } + } +} + + +static void VerifyEvacuation(PagedSpace* space) { + PageIterator it(space); + + while (it.has_next()) { + Page* p = it.next(); + if (p->IsEvacuationCandidate()) continue; + VerifyEvacuation(p->ObjectAreaStart(), p->ObjectAreaEnd()); + } +} + + +static void VerifyEvacuation(Heap* heap) { + VerifyEvacuation(heap->old_pointer_space()); + VerifyEvacuation(heap->old_data_space()); + VerifyEvacuation(heap->code_space()); + VerifyEvacuation(heap->cell_space()); + VerifyEvacuation(heap->map_space()); + VerifyEvacuation(heap->new_space()); + + VerifyEvacuationVisitor visitor; + heap->IterateStrongRoots(&visitor, VISIT_ALL); +} +#endif + + +void MarkCompactCollector::AddEvacuationCandidate(Page* p) { + p->MarkEvacuationCandidate(); + evacuation_candidates_.Add(p); +} + + +bool MarkCompactCollector::StartCompaction() { + if (!compacting_) { + ASSERT(evacuation_candidates_.length() == 0); + + CollectEvacuationCandidates(heap()->old_pointer_space()); + CollectEvacuationCandidates(heap()->old_data_space()); + + if (FLAG_compact_code_space) { + CollectEvacuationCandidates(heap()->code_space()); + } + + heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists(); + heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); + heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); + + compacting_ = evacuation_candidates_.length() > 0; + } + + return compacting_; +} + + void MarkCompactCollector::CollectGarbage() { // Make sure that Prepare() has been called. The individual steps below will // update the state as they proceed. ASSERT(state_ == PREPARE_GC); ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); - // Prepare has selected whether to compact the old generation or not. - // Tell the tracer. - if (IsCompacting()) tracer_->set_is_compacting(); - MarkLiveObjects(); + ASSERT(heap_->incremental_marking()->IsStopped()); - if (FLAG_collect_maps) ClearNonLiveTransitions(); + if (collect_maps_) ClearNonLiveTransitions(); ClearWeakMaps(); - SweepLargeObjectSpace(); +#ifdef DEBUG + if (FLAG_verify_heap) { + VerifyMarking(heap_); + } +#endif - if (IsCompacting()) { - GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT); - EncodeForwardingAddresses(); + SweepSpaces(); - heap()->MarkMapPointersAsEncoded(true); - UpdatePointers(); - heap()->MarkMapPointersAsEncoded(false); - heap()->isolate()->pc_to_code_cache()->Flush(); + if (!collect_maps_) ReattachInitialMaps(); - RelocateObjects(); - } else { - SweepSpaces(); - heap()->isolate()->pc_to_code_cache()->Flush(); - } + heap_->isolate()->inner_pointer_to_code_cache()->Flush(); Finish(); - // Save the count of marked objects remaining after the collection and - // null out the GC tracer. - previous_marked_count_ = tracer_->marked_count(); - ASSERT(previous_marked_count_ == 0); tracer_ = NULL; } +#ifdef DEBUG +void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { + PageIterator it(space); + + while (it.has_next()) { + Page* p = it.next(); + CHECK(p->markbits()->IsClean()); + CHECK_EQ(0, p->LiveBytes()); + } +} + +void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { + NewSpacePageIterator it(space->bottom(), space->top()); + + while (it.has_next()) { + NewSpacePage* p = it.next(); + CHECK(p->markbits()->IsClean()); + CHECK_EQ(0, p->LiveBytes()); + } +} + +void MarkCompactCollector::VerifyMarkbitsAreClean() { + VerifyMarkbitsAreClean(heap_->old_pointer_space()); + VerifyMarkbitsAreClean(heap_->old_data_space()); + VerifyMarkbitsAreClean(heap_->code_space()); + VerifyMarkbitsAreClean(heap_->cell_space()); + VerifyMarkbitsAreClean(heap_->map_space()); + VerifyMarkbitsAreClean(heap_->new_space()); + + LargeObjectIterator it(heap_->lo_space()); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + MarkBit mark_bit = Marking::MarkBitFrom(obj); + ASSERT(Marking::IsWhite(mark_bit)); + } +} +#endif + + +static void ClearMarkbits(PagedSpace* space) { + PageIterator it(space); + + while (it.has_next()) { + Bitmap::Clear(it.next()); + } +} + + +static void ClearMarkbits(NewSpace* space) { + NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); + + while (it.has_next()) { + Bitmap::Clear(it.next()); + } +} + + +static void ClearMarkbits(Heap* heap) { + ClearMarkbits(heap->code_space()); + ClearMarkbits(heap->map_space()); + ClearMarkbits(heap->old_pointer_space()); + ClearMarkbits(heap->old_data_space()); + ClearMarkbits(heap->cell_space()); + ClearMarkbits(heap->new_space()); + + LargeObjectIterator it(heap->lo_space()); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + MarkBit mark_bit = Marking::MarkBitFrom(obj); + mark_bit.Clear(); + mark_bit.Next().Clear(); + } +} + + +bool Marking::TransferMark(Address old_start, Address new_start) { + // This is only used when resizing an object. + ASSERT(MemoryChunk::FromAddress(old_start) == + MemoryChunk::FromAddress(new_start)); + + // If the mark doesn't move, we don't check the color of the object. + // It doesn't matter whether the object is black, since it hasn't changed + // size, so the adjustment to the live data count will be zero anyway. + if (old_start == new_start) return false; + + MarkBit new_mark_bit = MarkBitFrom(new_start); + MarkBit old_mark_bit = MarkBitFrom(old_start); + +#ifdef DEBUG + ObjectColor old_color = Color(old_mark_bit); +#endif + + if (Marking::IsBlack(old_mark_bit)) { + old_mark_bit.Clear(); + ASSERT(IsWhite(old_mark_bit)); + Marking::MarkBlack(new_mark_bit); + return true; + } else if (Marking::IsGrey(old_mark_bit)) { + ASSERT(heap_->incremental_marking()->IsMarking()); + old_mark_bit.Clear(); + old_mark_bit.Next().Clear(); + ASSERT(IsWhite(old_mark_bit)); + heap_->incremental_marking()->WhiteToGreyAndPush( + HeapObject::FromAddress(new_start), new_mark_bit); + heap_->incremental_marking()->RestartIfNotMarking(); + } + +#ifdef DEBUG + ObjectColor new_color = Color(new_mark_bit); + ASSERT(new_color == old_color); +#endif + + return false; +} + + +const char* AllocationSpaceName(AllocationSpace space) { + switch (space) { + case NEW_SPACE: return "NEW_SPACE"; + case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE"; + case OLD_DATA_SPACE: return "OLD_DATA_SPACE"; + case CODE_SPACE: return "CODE_SPACE"; + case MAP_SPACE: return "MAP_SPACE"; + case CELL_SPACE: return "CELL_SPACE"; + case LO_SPACE: return "LO_SPACE"; + default: + UNREACHABLE(); + } + + return NULL; +} + + +void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { + ASSERT(space->identity() == OLD_POINTER_SPACE || + space->identity() == OLD_DATA_SPACE || + space->identity() == CODE_SPACE); + + PageIterator it(space); + int count = 0; + if (it.has_next()) it.next(); // Never compact the first page. + while (it.has_next()) { + Page* p = it.next(); + bool evacuate = false; + if (FLAG_stress_compaction) { + int counter = space->heap()->ms_count(); + uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; + if ((counter & 1) == (page_number & 1)) evacuate = true; + } else { + if (space->IsFragmented(p)) evacuate = true; + } + if (evacuate) { + AddEvacuationCandidate(p); + count++; + } else { + p->ClearEvacuationCandidate(); + } + } + + if (count > 0 && FLAG_trace_fragmentation) { + PrintF("Collected %d evacuation candidates for space %s\n", + count, + AllocationSpaceName(space->identity())); + } +} + + +void MarkCompactCollector::AbortCompaction() { + if (compacting_) { + int npages = evacuation_candidates_.length(); + for (int i = 0; i < npages; i++) { + Page* p = evacuation_candidates_[i]; + slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); + p->ClearEvacuationCandidate(); + p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); + } + compacting_ = false; + evacuation_candidates_.Rewind(0); + invalidated_code_.Rewind(0); + } + ASSERT_EQ(0, evacuation_candidates_.length()); +} + + void MarkCompactCollector::Prepare(GCTracer* tracer) { + was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); + + // Disable collection of maps if incremental marking is enabled. + // Map collection algorithm relies on a special map transition tree traversal + // order which is not implemented for incremental marking. + collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_; + // Rather than passing the tracer around we stash it in a static member // variable. tracer_ = tracer; @@ -120,16 +490,10 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { ASSERT(state_ == IDLE); state_ = PREPARE_GC; #endif - ASSERT(!FLAG_always_compact || !FLAG_never_compact); - compacting_collection_ = - FLAG_always_compact || force_compaction_ || compact_on_next_gc_; - compact_on_next_gc_ = false; + ASSERT(!FLAG_never_compact || !FLAG_always_compact); - if (FLAG_never_compact) compacting_collection_ = false; - if (!heap()->map_space()->MapPointersEncodable()) - compacting_collection_ = false; - if (FLAG_collect_maps) CreateBackPointers(); + if (collect_maps_) CreateBackPointers(); #ifdef ENABLE_GDB_JIT_INTERFACE if (FLAG_gdbjit) { // If GDBJIT interface is active disable compaction. @@ -137,11 +501,32 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { } #endif + // Clear marking bits for precise sweeping to collect all garbage. + if (was_marked_incrementally_ && PreciseSweepingRequired()) { + heap()->incremental_marking()->Abort(); + ClearMarkbits(heap_); + AbortCompaction(); + was_marked_incrementally_ = false; + } + + // Don't start compaction if we are in the middle of incremental + // marking cycle. We did not collect any slots. + if (!FLAG_never_compact && !was_marked_incrementally_) { + StartCompaction(); + } + PagedSpaces spaces; for (PagedSpace* space = spaces.next(); - space != NULL; space = spaces.next()) { - space->PrepareForMarkCompact(compacting_collection_); + space != NULL; + space = spaces.next()) { + space->PrepareForMarkCompact(); + } + +#ifdef DEBUG + if (!was_marked_incrementally_) { + VerifyMarkbitsAreClean(); } +#endif #ifdef DEBUG live_bytes_ = 0; @@ -168,31 +553,6 @@ void MarkCompactCollector::Finish() { heap()->isolate()->stub_cache()->Clear(); heap()->external_string_table_.CleanUp(); - - // If we've just compacted old space there's no reason to check the - // fragmentation limit. Just return. - if (HasCompacted()) return; - - // We compact the old generation on the next GC if it has gotten too - // fragmented (ie, we could recover an expected amount of space by - // reclaiming the waste and free list blocks). - static const int kFragmentationLimit = 15; // Percent. - static const int kFragmentationAllowed = 1 * MB; // Absolute. - intptr_t old_gen_recoverable = 0; - intptr_t old_gen_used = 0; - - OldSpaces spaces; - for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) { - old_gen_recoverable += space->Waste() + space->AvailableFree(); - old_gen_used += space->Size(); - } - - int old_gen_fragmentation = - static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used); - if (old_gen_fragmentation > kFragmentationLimit && - old_gen_recoverable > kFragmentationAllowed) { - compact_on_next_gc_ = true; - } } @@ -261,13 +621,21 @@ class CodeFlusher { SharedFunctionInfo* shared = candidate->unchecked_shared(); Code* code = shared->unchecked_code(); - if (!code->IsMarked()) { + MarkBit code_mark = Marking::MarkBitFrom(code); + if (!code_mark.Get()) { shared->set_code(lazy_compile); candidate->set_code(lazy_compile); } else { candidate->set_code(shared->unchecked_code()); } + // We are in the middle of a GC cycle so the write barrier in the code + // setter did not record the slot update and we have to do that manually. + Address slot = candidate->address() + JSFunction::kCodeEntryOffset; + Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot)); + isolate_->heap()->mark_compact_collector()-> + RecordCodeEntrySlot(slot, target); + candidate = next_candidate; } @@ -285,7 +653,8 @@ class CodeFlusher { SetNextCandidate(candidate, NULL); Code* code = candidate->unchecked_code(); - if (!code->IsMarked()) { + MarkBit code_mark = Marking::MarkBitFrom(code); + if (!code_mark.Get()) { candidate->set_code(lazy_compile); } @@ -355,14 +724,14 @@ static inline HeapObject* ShortCircuitConsString(Object** p) { // except the maps for the object and its possible substrings might be // marked. HeapObject* object = HeapObject::cast(*p); - MapWord map_word = object->map_word(); - map_word.ClearMark(); - InstanceType type = map_word.ToMap()->instance_type(); + if (!FLAG_clever_optimizations) return object; + Map* map = object->map(); + InstanceType type = map->instance_type(); if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second(); - Heap* heap = map_word.ToMap()->heap(); - if (second != heap->raw_unchecked_empty_string()) { + Heap* heap = map->GetHeap(); + if (second != heap->empty_string()) { return object; } @@ -404,14 +773,12 @@ class StaticMarkingVisitor : public StaticVisitorBase { FixedArray::BodyDescriptor, void>::Visit); - table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit); + table_.Register(kVisitGlobalContext, &VisitGlobalContext); - table_.Register(kVisitGlobalContext, - &FixedBodyVisitor<StaticMarkingVisitor, - Context::MarkCompactBodyDescriptor, - void>::Visit); + table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit); table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); + table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit); table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit); table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); @@ -456,7 +823,7 @@ class StaticMarkingVisitor : public StaticVisitorBase { } INLINE(static void VisitPointer(Heap* heap, Object** p)) { - MarkObjectByPointer(heap, p); + MarkObjectByPointer(heap->mark_compact_collector(), p, p); } INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { @@ -466,29 +833,49 @@ class StaticMarkingVisitor : public StaticVisitorBase { if (VisitUnmarkedObjects(heap, start, end)) return; // We are close to a stack overflow, so just mark the objects. } - for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p); + MarkCompactCollector* collector = heap->mark_compact_collector(); + for (Object** p = start; p < end; p++) { + MarkObjectByPointer(collector, start, p); + } + } + + static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) { + ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL); + JSGlobalPropertyCell* cell = + JSGlobalPropertyCell::cast(rinfo->target_cell()); + MarkBit mark = Marking::MarkBitFrom(cell); + heap->mark_compact_collector()->MarkObject(cell, mark); + } + + static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) { + ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); + // TODO(mstarzinger): We do not short-circuit cons strings here, verify + // that there can be no such embedded pointers and add assertion here. + HeapObject* object = HeapObject::cast(rinfo->target_object()); + heap->mark_compact_collector()->RecordRelocSlot(rinfo, object); + MarkBit mark = Marking::MarkBitFrom(object); + heap->mark_compact_collector()->MarkObject(object, mark); } static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) { ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); - Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address()); - if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) { + Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); + if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) { IC::Clear(rinfo->pc()); // Please note targets for cleared inline cached do not have to be // marked since they are contained in HEAP->non_monomorphic_cache(). + target = Code::GetCodeFromTargetAddress(rinfo->target_address()); } else { - heap->mark_compact_collector()->MarkObject(code); - } - } - - static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) { - ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL); - Object* cell = rinfo->target_cell(); - Object* old_cell = cell; - VisitPointer(heap, &cell); - if (cell != old_cell) { - rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell)); + if (FLAG_cleanup_code_caches_at_gc && + target->kind() == Code::STUB && + target->major_key() == CodeStub::CallFunction && + target->has_function_cache()) { + CallFunctionStub::Clear(heap, rinfo->pc()); + } + MarkBit code_mark = Marking::MarkBitFrom(target); + heap->mark_compact_collector()->MarkObject(target, code_mark); } + heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); } static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) { @@ -496,17 +883,21 @@ class StaticMarkingVisitor : public StaticVisitorBase { rinfo->IsPatchedReturnSequence()) || (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && rinfo->IsPatchedDebugBreakSlotSequence())); - HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address()); - heap->mark_compact_collector()->MarkObject(code); + Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); + MarkBit code_mark = Marking::MarkBitFrom(target); + heap->mark_compact_collector()->MarkObject(target, code_mark); + heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); } // Mark object pointed to by p. - INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) { + INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector, + Object** anchor_slot, + Object** p)) { if (!(*p)->IsHeapObject()) return; HeapObject* object = ShortCircuitConsString(p); - if (!object->IsMarked()) { - heap->mark_compact_collector()->MarkUnmarkedObject(object); - } + collector->RecordSlot(anchor_slot, p, object); + MarkBit mark = Marking::MarkBitFrom(object); + collector->MarkObject(object, mark); } @@ -515,12 +906,15 @@ class StaticMarkingVisitor : public StaticVisitorBase { HeapObject* obj)) { #ifdef DEBUG ASSERT(Isolate::Current()->heap()->Contains(obj)); - ASSERT(!obj->IsMarked()); + ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj)); #endif Map* map = obj->map(); - collector->SetMark(obj); + Heap* heap = obj->GetHeap(); + MarkBit mark = Marking::MarkBitFrom(obj); + heap->mark_compact_collector()->SetMark(obj, mark); // Mark the map pointer and the body. - if (!map->IsMarked()) collector->MarkUnmarkedObject(map); + MarkBit map_mark = Marking::MarkBitFrom(map); + heap->mark_compact_collector()->MarkObject(map, map_mark); IterateBody(map, obj); } @@ -536,9 +930,12 @@ class StaticMarkingVisitor : public StaticVisitorBase { MarkCompactCollector* collector = heap->mark_compact_collector(); // Visit the unmarked objects. for (Object** p = start; p < end; p++) { - if (!(*p)->IsHeapObject()) continue; - HeapObject* obj = HeapObject::cast(*p); - if (obj->IsMarked()) continue; + Object* o = *p; + if (!o->IsHeapObject()) continue; + collector->RecordSlot(start, p, o); + HeapObject* obj = HeapObject::cast(o); + MarkBit mark = Marking::MarkBitFrom(obj); + if (mark.Get()) continue; VisitUnmarkedObject(collector, obj); } return true; @@ -567,7 +964,7 @@ class StaticMarkingVisitor : public StaticVisitorBase { void> StructObjectVisitor; static void VisitJSWeakMap(Map* map, HeapObject* object) { - MarkCompactCollector* collector = map->heap()->mark_compact_collector(); + MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object); // Enqueue weak map in linked list of encountered weak maps. @@ -578,25 +975,28 @@ class StaticMarkingVisitor : public StaticVisitorBase { // Skip visiting the backing hash table containing the mappings. int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object); BodyVisitorBase<StaticMarkingVisitor>::IteratePointers( - map->heap(), + map->GetHeap(), object, JSWeakMap::BodyDescriptor::kStartOffset, JSWeakMap::kTableOffset); BodyVisitorBase<StaticMarkingVisitor>::IteratePointers( - map->heap(), + map->GetHeap(), object, JSWeakMap::kTableOffset + kPointerSize, object_size); // Mark the backing hash table without pushing it on the marking stack. - ASSERT(!weak_map->unchecked_table()->IsMarked()); - ASSERT(weak_map->unchecked_table()->map()->IsMarked()); - collector->SetMark(weak_map->unchecked_table()); + ASSERT(!MarkCompactCollector::IsMarked(weak_map->unchecked_table())); + ASSERT(MarkCompactCollector::IsMarked(weak_map->unchecked_table()->map())); + + HeapObject* unchecked_table = weak_map->unchecked_table(); + MarkBit mark_bit = Marking::MarkBitFrom(unchecked_table); + collector->SetMark(unchecked_table, mark_bit); } static void VisitCode(Map* map, HeapObject* object) { reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>( - map->heap()); + map->GetHeap()); } // Code flushing support. @@ -608,7 +1008,7 @@ class StaticMarkingVisitor : public StaticVisitorBase { static const int kRegExpCodeThreshold = 5; inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) { - Object* undefined = heap->raw_unchecked_undefined_value(); + Object* undefined = heap->undefined_value(); return (info->script() != undefined) && (reinterpret_cast<Script*>(info->script())->source() != undefined); } @@ -629,7 +1029,9 @@ class StaticMarkingVisitor : public StaticVisitorBase { // Code is either on stack, in compilation cache or referenced // by optimized version of function. - if (function->unchecked_code()->IsMarked()) { + MarkBit code_mark = + Marking::MarkBitFrom(function->unchecked_code()); + if (code_mark.Get()) { shared_info->set_code_age(0); return false; } @@ -645,7 +1047,9 @@ class StaticMarkingVisitor : public StaticVisitorBase { inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) { // Code is either on stack, in compilation cache or referenced // by optimized version of function. - if (shared_info->unchecked_code()->IsMarked()) { + MarkBit code_mark = + Marking::MarkBitFrom(shared_info->unchecked_code()); + if (code_mark.Get()) { shared_info->set_code_age(0); return false; } @@ -658,11 +1062,7 @@ class StaticMarkingVisitor : public StaticVisitorBase { // We never flush code for Api functions. Object* function_data = shared_info->function_data(); - if (function_data->IsHeapObject() && - (SafeMap(function_data)->instance_type() == - FUNCTION_TEMPLATE_INFO_TYPE)) { - return false; - } + if (function_data->IsFunctionTemplateInfo()) return false; // Only flush code for functions. if (shared_info->code()->kind() != Code::FUNCTION) return false; @@ -695,40 +1095,9 @@ class StaticMarkingVisitor : public StaticVisitorBase { return true; } - - static inline Map* SafeMap(Object* obj) { - MapWord map_word = HeapObject::cast(obj)->map_word(); - map_word.ClearMark(); - map_word.ClearOverflow(); - return map_word.ToMap(); - } - - - static inline bool IsJSBuiltinsObject(Object* obj) { - return obj->IsHeapObject() && - (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE); - } - - static inline bool IsValidNotBuiltinContext(Object* ctx) { - if (!ctx->IsHeapObject()) return false; - - Map* map = SafeMap(ctx); - Heap* heap = map->heap(); - if (!(map == heap->raw_unchecked_function_context_map() || - map == heap->raw_unchecked_catch_context_map() || - map == heap->raw_unchecked_with_context_map() || - map == heap->raw_unchecked_global_context_map())) { - return false; - } - - Context* context = reinterpret_cast<Context*>(ctx); - - if (IsJSBuiltinsObject(context->global())) { - return false; - } - - return true; + return ctx->IsContext() && + !Context::cast(ctx)->global()->IsJSBuiltinsObject(); } @@ -748,13 +1117,15 @@ class StaticMarkingVisitor : public StaticVisitorBase { bool is_ascii) { // Make sure that the fixed array is in fact initialized on the RegExp. // We could potentially trigger a GC when initializing the RegExp. - if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return; + if (HeapObject::cast(re->data())->map()->instance_type() != + FIXED_ARRAY_TYPE) return; // Make sure this is a RegExp that actually contains code. if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return; Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii)); - if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) { + if (!code->IsSmi() && + HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) { // Save a copy that can be reinstated if we need the code again. re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii), code, @@ -790,7 +1161,7 @@ class StaticMarkingVisitor : public StaticVisitorBase { // If we did not use the code for kRegExpCodeThreshold mark sweep GCs // we flush the code. static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) { - Heap* heap = map->heap(); + Heap* heap = map->GetHeap(); MarkCompactCollector* collector = heap->mark_compact_collector(); if (!collector->is_code_flushing_enabled()) { VisitJSRegExpFields(map, object); @@ -807,7 +1178,7 @@ class StaticMarkingVisitor : public StaticVisitorBase { static void VisitSharedFunctionInfoAndFlushCode(Map* map, HeapObject* object) { - MarkCompactCollector* collector = map->heap()->mark_compact_collector(); + MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); if (!collector->is_code_flushing_enabled()) { VisitSharedFunctionInfoGeneric(map, object); return; @@ -818,7 +1189,7 @@ class StaticMarkingVisitor : public StaticVisitorBase { static void VisitSharedFunctionInfoAndFlushCodeGeneric( Map* map, HeapObject* object, bool known_flush_code_candidate) { - Heap* heap = map->heap(); + Heap* heap = map->GetHeap(); SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap(); @@ -835,18 +1206,30 @@ class StaticMarkingVisitor : public StaticVisitorBase { static void VisitCodeEntry(Heap* heap, Address entry_address) { - Object* code = Code::GetObjectFromEntryAddress(entry_address); - Object* old_code = code; - VisitPointer(heap, &code); - if (code != old_code) { - Memory::Address_at(entry_address) = - reinterpret_cast<Code*>(code)->entry(); - } + Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); + MarkBit mark = Marking::MarkBitFrom(code); + heap->mark_compact_collector()->MarkObject(code, mark); + heap->mark_compact_collector()-> + RecordCodeEntrySlot(entry_address, code); } + static void VisitGlobalContext(Map* map, HeapObject* object) { + FixedBodyVisitor<StaticMarkingVisitor, + Context::MarkCompactBodyDescriptor, + void>::Visit(map, object); + + MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); + for (int idx = Context::FIRST_WEAK_SLOT; + idx < Context::GLOBAL_CONTEXT_SLOTS; + ++idx) { + Object** slot = + HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx)); + collector->RecordSlot(slot, slot, *slot); + } + } static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) { - Heap* heap = map->heap(); + Heap* heap = map->GetHeap(); MarkCompactCollector* collector = heap->mark_compact_collector(); if (!collector->is_code_flushing_enabled()) { VisitJSFunction(map, object); @@ -861,7 +1244,9 @@ class StaticMarkingVisitor : public StaticVisitorBase { } if (!flush_code_candidate) { - collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code()); + Code* code = jsfunction->unchecked_shared()->unchecked_code(); + MarkBit code_mark = Marking::MarkBitFrom(code); + heap->mark_compact_collector()->MarkObject(code, code_mark); if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) { // For optimized functions we should retain both non-optimized version @@ -877,7 +1262,11 @@ class StaticMarkingVisitor : public StaticVisitorBase { i < count; i++) { JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i)); - collector->MarkObject(inlined->unchecked_shared()->unchecked_code()); + Code* inlined_code = inlined->unchecked_shared()->unchecked_code(); + MarkBit inlined_code_mark = + Marking::MarkBitFrom(inlined_code); + heap->mark_compact_collector()->MarkObject( + inlined_code, inlined_code_mark); } } } @@ -902,12 +1291,11 @@ class StaticMarkingVisitor : public StaticVisitorBase { static inline void VisitJSFunctionFields(Map* map, JSFunction* object, bool flush_code_candidate) { - Heap* heap = map->heap(); - MarkCompactCollector* collector = heap->mark_compact_collector(); + Heap* heap = map->GetHeap(); VisitPointers(heap, - SLOT_ADDR(object, JSFunction::kPropertiesOffset), - SLOT_ADDR(object, JSFunction::kCodeEntryOffset)); + HeapObject::RawField(object, JSFunction::kPropertiesOffset), + HeapObject::RawField(object, JSFunction::kCodeEntryOffset)); if (!flush_code_candidate) { VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); @@ -917,29 +1305,39 @@ class StaticMarkingVisitor : public StaticVisitorBase { // Visit shared function info to avoid double checking of it's // flushability. SharedFunctionInfo* shared_info = object->unchecked_shared(); - if (!shared_info->IsMarked()) { + MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info); + if (!shared_info_mark.Get()) { Map* shared_info_map = shared_info->map(); - collector->SetMark(shared_info); - collector->MarkObject(shared_info_map); + MarkBit shared_info_map_mark = + Marking::MarkBitFrom(shared_info_map); + heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark); + heap->mark_compact_collector()->MarkObject(shared_info_map, + shared_info_map_mark); VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map, shared_info, true); } } - VisitPointers(heap, - SLOT_ADDR(object, - JSFunction::kCodeEntryOffset + kPointerSize), - SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset)); + VisitPointers( + heap, + HeapObject::RawField(object, + JSFunction::kCodeEntryOffset + kPointerSize), + HeapObject::RawField(object, + JSFunction::kNonWeakFieldsEndOffset)); // Don't visit the next function list field as it is a weak reference. + Object** next_function = + HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset); + heap->mark_compact_collector()->RecordSlot( + next_function, next_function, *next_function); } static inline void VisitJSRegExpFields(Map* map, HeapObject* object) { int last_property_offset = JSRegExp::kSize + kPointerSize * map->inobject_properties(); - VisitPointers(map->heap(), + VisitPointers(map->GetHeap(), SLOT_ADDR(object, JSRegExp::kPropertiesOffset), SLOT_ADDR(object, last_property_offset)); } @@ -995,7 +1393,9 @@ class CodeMarkingVisitor : public ThreadVisitor { void VisitThread(Isolate* isolate, ThreadLocalTop* top) { for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { - collector_->MarkObject(it.frame()->unchecked_code()); + Code* code = it.frame()->unchecked_code(); + MarkBit code_bit = Marking::MarkBitFrom(code); + collector_->MarkObject(it.frame()->unchecked_code(), code_bit); } } @@ -1017,8 +1417,10 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { Object* obj = *slot; if (obj->IsSharedFunctionInfo()) { SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); - collector_->MarkObject(shared->unchecked_code()); - collector_->MarkObject(shared); + MarkBit shared_mark = Marking::MarkBitFrom(shared); + MarkBit code_mark = Marking::MarkBitFrom(shared->unchecked_code()); + collector_->MarkObject(shared->unchecked_code(), code_mark); + collector_->MarkObject(shared, shared_mark); } } @@ -1030,7 +1432,8 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { void MarkCompactCollector::PrepareForCodeFlushing() { ASSERT(heap() == Isolate::Current()->heap()); - if (!FLAG_flush_code) { + // TODO(1609) Currently incremental marker does not support code flushing. + if (!FLAG_flush_code || was_marked_incrementally_) { EnableCodeFlushing(false); return; } @@ -1042,16 +1445,21 @@ void MarkCompactCollector::PrepareForCodeFlushing() { return; } #endif + EnableCodeFlushing(true); // Ensure that empty descriptor array is marked. Method MarkDescriptorArray // relies on it being marked before any other descriptor array. - MarkObject(heap()->raw_unchecked_empty_descriptor_array()); + HeapObject* descriptor_array = heap()->empty_descriptor_array(); + MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array); + MarkObject(descriptor_array, descriptor_array_mark); // Make sure we are not referencing the code from the stack. ASSERT(this == heap()->mark_compact_collector()); for (StackFrameIterator it; !it.done(); it.Advance()) { - MarkObject(it.frame()->unchecked_code()); + Code* code = it.frame()->unchecked_code(); + MarkBit code_mark = Marking::MarkBitFrom(code); + MarkObject(code, code_mark); } // Iterate the archived stacks in all threads to check if @@ -1064,7 +1472,7 @@ void MarkCompactCollector::PrepareForCodeFlushing() { heap()->isolate()->compilation_cache()->IterateFunctions(&visitor); heap()->isolate()->handle_scope_implementer()->Iterate(&visitor); - ProcessMarkingStack(); + ProcessMarkingDeque(); } @@ -1088,19 +1496,21 @@ class RootMarkingVisitor : public ObjectVisitor { // Replace flat cons strings in place. HeapObject* object = ShortCircuitConsString(p); - if (object->IsMarked()) return; + MarkBit mark_bit = Marking::MarkBitFrom(object); + if (mark_bit.Get()) return; Map* map = object->map(); // Mark the object. - collector_->SetMark(object); + collector_->SetMark(object, mark_bit); // Mark the map pointer and body, and push them on the marking stack. - collector_->MarkObject(map); + MarkBit map_mark = Marking::MarkBitFrom(map); + collector_->MarkObject(map, map_mark); StaticMarkingVisitor::IterateBody(map, object); // Mark all the objects reachable from the map and body. May leave // overflowed objects in the heap. - collector_->EmptyMarkingStack(); + collector_->EmptyMarkingDeque(); } MarkCompactCollector* collector_; @@ -1116,17 +1526,19 @@ class SymbolTableCleaner : public ObjectVisitor { virtual void VisitPointers(Object** start, Object** end) { // Visit all HeapObject pointers in [start, end). for (Object** p = start; p < end; p++) { - if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) { + Object* o = *p; + if (o->IsHeapObject() && + !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) { // Check if the symbol being pruned is an external symbol. We need to // delete the associated external data as this symbol is going away. // Since no objects have yet been moved we can safely access the map of // the object. - if ((*p)->IsExternalString()) { + if (o->IsExternalString()) { heap_->FinalizeExternalString(String::cast(*p)); } // Set the entry to null_value (as deleted). - *p = heap_->raw_unchecked_null_value(); + *p = heap_->null_value(); pointers_removed_++; } } @@ -1147,8 +1559,7 @@ class SymbolTableCleaner : public ObjectVisitor { class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { public: virtual Object* RetainAs(Object* object) { - MapWord first_word = HeapObject::cast(object)->map_word(); - if (first_word.IsMarked()) { + if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) { return object; } else { return NULL; @@ -1157,28 +1568,26 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { }; -void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) { - ASSERT(!object->IsMarked()); +void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { + ASSERT(IsMarked(object)); ASSERT(HEAP->Contains(object)); if (object->IsMap()) { Map* map = Map::cast(object); if (FLAG_cleanup_code_caches_at_gc) { map->ClearCodeCache(heap()); } - SetMark(map); // When map collection is enabled we have to mark through map's transitions // in a special way to make transition links weak. // Only maps for subclasses of JSReceiver can have transitions. STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); - if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { + if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { MarkMapContents(map); } else { - marking_stack_.Push(map); + marking_deque_.PushBlack(map); } } else { - SetMark(object); - marking_stack_.Push(object); + marking_deque_.PushBlack(object); } } @@ -1187,12 +1596,17 @@ void MarkCompactCollector::MarkMapContents(Map* map) { // Mark prototype transitions array but don't push it into marking stack. // This will make references from it weak. We will clean dead prototype // transitions in ClearNonLiveTransitions. - FixedArray* prototype_transitions = map->unchecked_prototype_transitions(); - if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions); + FixedArray* prototype_transitions = map->prototype_transitions(); + MarkBit mark = Marking::MarkBitFrom(prototype_transitions); + if (!mark.Get()) { + mark.Set(); + MemoryChunk::IncrementLiveBytes(prototype_transitions->address(), + prototype_transitions->Size()); + } - Object* raw_descriptor_array = - *HeapObject::RawField(map, - Map::kInstanceDescriptorsOrBitField3Offset); + Object** raw_descriptor_array_slot = + HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset); + Object* raw_descriptor_array = *raw_descriptor_array_slot; if (!raw_descriptor_array->IsSmi()) { MarkDescriptorArray( reinterpret_cast<DescriptorArray*>(raw_descriptor_array)); @@ -1206,24 +1620,26 @@ void MarkCompactCollector::MarkMapContents(Map* map) { Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); - StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot); + StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot); } void MarkCompactCollector::MarkDescriptorArray( DescriptorArray* descriptors) { - if (descriptors->IsMarked()) return; + MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors); + if (descriptors_mark.Get()) return; // Empty descriptor array is marked as a root before any maps are marked. - ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array()); - SetMark(descriptors); + ASSERT(descriptors != heap()->empty_descriptor_array()); + SetMark(descriptors, descriptors_mark); FixedArray* contents = reinterpret_cast<FixedArray*>( descriptors->get(DescriptorArray::kContentArrayIndex)); ASSERT(contents->IsHeapObject()); - ASSERT(!contents->IsMarked()); + ASSERT(!IsMarked(contents)); ASSERT(contents->IsFixedArray()); ASSERT(contents->length() >= 2); - SetMark(contents); + MarkBit contents_mark = Marking::MarkBitFrom(contents); + SetMark(contents, contents_mark); // Contents contains (value, details) pairs. If the details say that the type // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as @@ -1233,27 +1649,45 @@ void MarkCompactCollector::MarkDescriptorArray( // If the pair (value, details) at index i, i+1 is not // a transition or null descriptor, mark the value. PropertyDetails details(Smi::cast(contents->get(i + 1))); - if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) { - HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i)); - if (object->IsHeapObject() && !object->IsMarked()) { - SetMark(object); - marking_stack_.Push(object); + + Object** slot = contents->data_start() + i; + Object* value = *slot; + if (!value->IsHeapObject()) continue; + + RecordSlot(slot, slot, *slot); + + PropertyType type = details.type(); + if (type < FIRST_PHANTOM_PROPERTY_TYPE) { + HeapObject* object = HeapObject::cast(value); + MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object)); + if (!mark.Get()) { + SetMark(HeapObject::cast(object), mark); + marking_deque_.PushBlack(object); + } + } else if (type == ELEMENTS_TRANSITION && value->IsFixedArray()) { + // For maps with multiple elements transitions, the transition maps are + // stored in a FixedArray. Keep the fixed array alive but not the maps + // that it refers to. + HeapObject* object = HeapObject::cast(value); + MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object)); + if (!mark.Get()) { + SetMark(HeapObject::cast(object), mark); } } } // The DescriptorArray descriptors contains a pointer to its contents array, // but the contents array is already marked. - marking_stack_.Push(descriptors); + marking_deque_.PushBlack(descriptors); } void MarkCompactCollector::CreateBackPointers() { HeapObjectIterator iterator(heap()->map_space()); - for (HeapObject* next_object = iterator.next(); - next_object != NULL; next_object = iterator.next()) { - if (next_object->IsMap()) { // Could also be ByteArray on free list. + for (HeapObject* next_object = iterator.Next(); + next_object != NULL; next_object = iterator.Next()) { + if (next_object->IsMap()) { // Could also be FreeSpace object on free list. Map* map = Map::cast(next_object); - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { map->CreateBackPointers(); } else { @@ -1264,54 +1698,123 @@ void MarkCompactCollector::CreateBackPointers() { } -static int OverflowObjectSize(HeapObject* obj) { - // Recover the normal map pointer, it might be marked as live and - // overflowed. - MapWord map_word = obj->map_word(); - map_word.ClearMark(); - map_word.ClearOverflow(); - return obj->SizeFromMap(map_word.ToMap()); +// Fill the marking stack with overflowed objects returned by the given +// iterator. Stop when the marking stack is filled or the end of the space +// is reached, whichever comes first. +template<class T> +static void DiscoverGreyObjectsWithIterator(Heap* heap, + MarkingDeque* marking_deque, + T* it) { + // The caller should ensure that the marking stack is initially not full, + // so that we don't waste effort pointlessly scanning for objects. + ASSERT(!marking_deque->IsFull()); + + Map* filler_map = heap->one_pointer_filler_map(); + for (HeapObject* object = it->Next(); + object != NULL; + object = it->Next()) { + MarkBit markbit = Marking::MarkBitFrom(object); + if ((object->map() != filler_map) && Marking::IsGrey(markbit)) { + Marking::GreyToBlack(markbit); + MemoryChunk::IncrementLiveBytes(object->address(), object->Size()); + marking_deque->PushBlack(object); + if (marking_deque->IsFull()) return; + } + } } -class OverflowedObjectsScanner : public AllStatic { - public: - // Fill the marking stack with overflowed objects returned by the given - // iterator. Stop when the marking stack is filled or the end of the space - // is reached, whichever comes first. - template<class T> - static inline void ScanOverflowedObjects(MarkCompactCollector* collector, - T* it) { - // The caller should ensure that the marking stack is initially not full, - // so that we don't waste effort pointlessly scanning for objects. - ASSERT(!collector->marking_stack_.is_full()); - - for (HeapObject* object = it->next(); object != NULL; object = it->next()) { - if (object->IsOverflowed()) { - object->ClearOverflow(); - ASSERT(object->IsMarked()); - ASSERT(HEAP->Contains(object)); - collector->marking_stack_.Push(object); - if (collector->marking_stack_.is_full()) return; - } +static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts); + + +static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) { + ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); + ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + + MarkBit::CellType* cells = p->markbits()->cells(); + + int last_cell_index = + Bitmap::IndexToCell( + Bitmap::CellAlignIndex( + p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); + + int cell_index = Page::kFirstUsedCell; + Address cell_base = p->ObjectAreaStart(); + + for (cell_index = Page::kFirstUsedCell; + cell_index < last_cell_index; + cell_index++, cell_base += 32 * kPointerSize) { + ASSERT((unsigned)cell_index == + Bitmap::IndexToCell( + Bitmap::CellAlignIndex( + p->AddressToMarkbitIndex(cell_base)))); + + const MarkBit::CellType current_cell = cells[cell_index]; + if (current_cell == 0) continue; + + const MarkBit::CellType next_cell = cells[cell_index + 1]; + MarkBit::CellType grey_objects = current_cell & + ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1))); + + int offset = 0; + while (grey_objects != 0) { + int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects); + grey_objects >>= trailing_zeros; + offset += trailing_zeros; + MarkBit markbit(&cells[cell_index], 1 << offset, false); + ASSERT(Marking::IsGrey(markbit)); + Marking::GreyToBlack(markbit); + Address addr = cell_base + offset * kPointerSize; + HeapObject* object = HeapObject::FromAddress(addr); + MemoryChunk::IncrementLiveBytes(object->address(), object->Size()); + marking_deque->PushBlack(object); + if (marking_deque->IsFull()) return; + offset += 2; + grey_objects >>= 2; } + + grey_objects >>= (Bitmap::kBitsPerCell - 1); } -}; +} + + +static void DiscoverGreyObjectsInSpace(Heap* heap, + MarkingDeque* marking_deque, + PagedSpace* space) { + if (!space->was_swept_conservatively()) { + HeapObjectIterator it(space); + DiscoverGreyObjectsWithIterator(heap, marking_deque, &it); + } else { + PageIterator it(space); + while (it.has_next()) { + Page* p = it.next(); + DiscoverGreyObjectsOnPage(marking_deque, p); + if (marking_deque->IsFull()) return; + } + } +} bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { - return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked(); + Object* o = *p; + if (!o->IsHeapObject()) return false; + HeapObject* heap_object = HeapObject::cast(o); + MarkBit mark = Marking::MarkBitFrom(heap_object); + return !mark.Get(); } void MarkCompactCollector::MarkSymbolTable() { - SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table(); + SymbolTable* symbol_table = heap()->symbol_table(); // Mark the symbol table itself. - SetMark(symbol_table); + MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table); + SetMark(symbol_table, symbol_table_mark); // Explicitly mark the prefix. MarkingVisitor marker(heap()); symbol_table->IteratePrefix(&marker); - ProcessMarkingStack(); + ProcessMarkingDeque(); } @@ -1324,9 +1827,9 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { MarkSymbolTable(); // There may be overflowed objects in the heap. Visit them now. - while (marking_stack_.overflowed()) { - RefillMarkingStack(); - EmptyMarkingStack(); + while (marking_deque_.overflowed()) { + RefillMarkingDeque(); + EmptyMarkingDeque(); } } @@ -1344,9 +1847,13 @@ void MarkCompactCollector::MarkObjectGroups() { bool group_marked = false; for (size_t j = 0; j < entry->length_; j++) { Object* object = *objects[j]; - if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) { - group_marked = true; - break; + if (object->IsHeapObject()) { + HeapObject* heap_object = HeapObject::cast(object); + MarkBit mark = Marking::MarkBitFrom(heap_object); + if (mark.Get()) { + group_marked = true; + break; + } } } @@ -1355,17 +1862,21 @@ void MarkCompactCollector::MarkObjectGroups() { continue; } - // An object in the group is marked, so mark all heap objects in - // the group. + // An object in the group is marked, so mark as grey all white heap + // objects in the group. for (size_t j = 0; j < entry->length_; ++j) { - if ((*objects[j])->IsHeapObject()) { - MarkObject(HeapObject::cast(*objects[j])); + Object* object = *objects[j]; + if (object->IsHeapObject()) { + HeapObject* heap_object = HeapObject::cast(object); + MarkBit mark = Marking::MarkBitFrom(heap_object); + MarkObject(heap_object, mark); } } - // Once the entire group has been marked, dispose it because it's - // not needed anymore. + // Once the entire group has been colored grey, set the object group + // to NULL so it won't be processed again. entry->Dispose(); + object_groups->at(i) = NULL; } object_groups->Rewind(last); } @@ -1380,7 +1891,7 @@ void MarkCompactCollector::MarkImplicitRefGroups() { ImplicitRefGroup* entry = ref_groups->at(i); ASSERT(entry != NULL); - if (!(*entry->parent_)->IsMarked()) { + if (!IsMarked(*entry->parent_)) { (*ref_groups)[last++] = entry; continue; } @@ -1389,7 +1900,9 @@ void MarkCompactCollector::MarkImplicitRefGroups() { // A parent object is marked, so mark all child heap objects. for (size_t j = 0; j < entry->length_; ++j) { if ((*children[j])->IsHeapObject()) { - MarkObject(HeapObject::cast(*children[j])); + HeapObject* child = HeapObject::cast(*children[j]); + MarkBit mark = Marking::MarkBitFrom(child); + MarkObject(child, mark); } } @@ -1405,21 +1918,17 @@ void MarkCompactCollector::MarkImplicitRefGroups() { // Before: the marking stack contains zero or more heap object pointers. // After: the marking stack is empty, and all objects reachable from the // marking stack have been marked, or are overflowed in the heap. -void MarkCompactCollector::EmptyMarkingStack() { - while (!marking_stack_.is_empty()) { - while (!marking_stack_.is_empty()) { - HeapObject* object = marking_stack_.Pop(); +void MarkCompactCollector::EmptyMarkingDeque() { + while (!marking_deque_.IsEmpty()) { + while (!marking_deque_.IsEmpty()) { + HeapObject* object = marking_deque_.Pop(); ASSERT(object->IsHeapObject()); ASSERT(heap()->Contains(object)); - ASSERT(object->IsMarked()); - ASSERT(!object->IsOverflowed()); + ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); - // Because the object is marked, we have to recover the original map - // pointer and use it to mark the object's body. - MapWord map_word = object->map_word(); - map_word.ClearMark(); - Map* map = map_word.ToMap(); - MarkObject(map); + Map* map = object->map(); + MarkBit map_mark = Marking::MarkBitFrom(map); + MarkObject(map, map_mark); StaticMarkingVisitor::IterateBody(map, object); } @@ -1436,39 +1945,45 @@ void MarkCompactCollector::EmptyMarkingStack() { // before sweeping completes. If sweeping completes, there are no remaining // overflowed objects in the heap so the overflow flag on the markings stack // is cleared. -void MarkCompactCollector::RefillMarkingStack() { - ASSERT(marking_stack_.overflowed()); +void MarkCompactCollector::RefillMarkingDeque() { + ASSERT(marking_deque_.overflowed()); - SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize); - OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it); - if (marking_stack_.is_full()) return; + SemiSpaceIterator new_it(heap()->new_space()); + DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it); + if (marking_deque_.IsFull()) return; - HeapObjectIterator old_pointer_it(heap()->old_pointer_space(), - &OverflowObjectSize); - OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it); - if (marking_stack_.is_full()) return; + DiscoverGreyObjectsInSpace(heap(), + &marking_deque_, + heap()->old_pointer_space()); + if (marking_deque_.IsFull()) return; - HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize); - OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it); - if (marking_stack_.is_full()) return; + DiscoverGreyObjectsInSpace(heap(), + &marking_deque_, + heap()->old_data_space()); + if (marking_deque_.IsFull()) return; - HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize); - OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it); - if (marking_stack_.is_full()) return; + DiscoverGreyObjectsInSpace(heap(), + &marking_deque_, + heap()->code_space()); + if (marking_deque_.IsFull()) return; - HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize); - OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it); - if (marking_stack_.is_full()) return; + DiscoverGreyObjectsInSpace(heap(), + &marking_deque_, + heap()->map_space()); + if (marking_deque_.IsFull()) return; - HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize); - OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it); - if (marking_stack_.is_full()) return; + DiscoverGreyObjectsInSpace(heap(), + &marking_deque_, + heap()->cell_space()); + if (marking_deque_.IsFull()) return; - LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize); - OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it); - if (marking_stack_.is_full()) return; + LargeObjectIterator lo_it(heap()->lo_space()); + DiscoverGreyObjectsWithIterator(heap(), + &marking_deque_, + &lo_it); + if (marking_deque_.IsFull()) return; - marking_stack_.clear_overflowed(); + marking_deque_.ClearOverflowed(); } @@ -1476,23 +1991,23 @@ void MarkCompactCollector::RefillMarkingStack() { // stack. Before: the marking stack contains zero or more heap object // pointers. After: the marking stack is empty and there are no overflowed // objects in the heap. -void MarkCompactCollector::ProcessMarkingStack() { - EmptyMarkingStack(); - while (marking_stack_.overflowed()) { - RefillMarkingStack(); - EmptyMarkingStack(); +void MarkCompactCollector::ProcessMarkingDeque() { + EmptyMarkingDeque(); + while (marking_deque_.overflowed()) { + RefillMarkingDeque(); + EmptyMarkingDeque(); } } void MarkCompactCollector::ProcessExternalMarking() { bool work_to_do = true; - ASSERT(marking_stack_.is_empty()); + ASSERT(marking_deque_.IsEmpty()); while (work_to_do) { MarkObjectGroups(); MarkImplicitRefGroups(); - work_to_do = !marking_stack_.is_empty(); - ProcessMarkingStack(); + work_to_do = !marking_deque_.IsEmpty(); + ProcessMarkingDeque(); } } @@ -1504,16 +2019,43 @@ void MarkCompactCollector::MarkLiveObjects() { // with the C stack limit check. PostponeInterruptsScope postpone(heap()->isolate()); + bool incremental_marking_overflowed = false; + IncrementalMarking* incremental_marking = heap_->incremental_marking(); + if (was_marked_incrementally_) { + // Finalize the incremental marking and check whether we had an overflow. + // Both markers use grey color to mark overflowed objects so + // non-incremental marker can deal with them as if overflow + // occured during normal marking. + // But incremental marker uses a separate marking deque + // so we have to explicitly copy it's overflow state. + incremental_marking->Finalize(); + incremental_marking_overflowed = + incremental_marking->marking_deque()->overflowed(); + incremental_marking->marking_deque()->ClearOverflowed(); + } else { + // Abort any pending incremental activities e.g. incremental sweeping. + incremental_marking->Abort(); + } + #ifdef DEBUG ASSERT(state_ == PREPARE_GC); state_ = MARK_LIVE_OBJECTS; #endif - // The to space contains live objects, the from space is used as a marking - // stack. - marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(), - heap()->new_space()->FromSpaceHigh()); + // The to space contains live objects, a page in from space is used as a + // marking stack. + Address marking_deque_start = heap()->new_space()->FromSpacePageLow(); + Address marking_deque_end = heap()->new_space()->FromSpacePageHigh(); + if (FLAG_force_marking_deque_overflows) { + marking_deque_end = marking_deque_start + 64 * kPointerSize; + } + marking_deque_.Initialize(marking_deque_start, + marking_deque_end); + ASSERT(!marking_deque_.overflowed()); - ASSERT(!marking_stack_.overflowed()); + if (incremental_marking_overflowed) { + // There are overflowed objects left in the heap after incremental marking. + marking_deque_.SetOverflowed(); + } PrepareForCodeFlushing(); @@ -1535,15 +2077,20 @@ void MarkCompactCollector::MarkLiveObjects() { &IsUnmarkedHeapObject); // Then we mark the objects and process the transitive closure. heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); - while (marking_stack_.overflowed()) { - RefillMarkingStack(); - EmptyMarkingStack(); + while (marking_deque_.overflowed()) { + RefillMarkingDeque(); + EmptyMarkingDeque(); } // Repeat host application specific marking to mark unmarked objects // reachable from the weak roots. ProcessExternalMarking(); + AfterMarking(); +} + + +void MarkCompactCollector::AfterMarking() { // Object literal map caches reference symbols (cache keys) and maps // (cache values). At this point still useful maps have already been // marked. Mark the keys for the alive values before we process the @@ -1553,7 +2100,7 @@ void MarkCompactCollector::MarkLiveObjects() { // Prune the symbol table removing all symbols only pointed to by the // symbol table. Cannot use symbol_table() here because the symbol // table is marked. - SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table(); + SymbolTable* symbol_table = heap()->symbol_table(); SymbolTableCleaner v(heap()); symbol_table->IterateElements(&v); symbol_table->ElementsRemoved(v.PointersRemoved()); @@ -1582,13 +2129,13 @@ void MarkCompactCollector::ProcessMapCaches() { Object* raw_context = heap()->global_contexts_list_; while (raw_context != heap()->undefined_value()) { Context* context = reinterpret_cast<Context*>(raw_context); - if (context->IsMarked()) { + if (IsMarked(context)) { HeapObject* raw_map_cache = HeapObject::cast(context->get(Context::MAP_CACHE_INDEX)); // A map cache may be reachable from the stack. In this case // it's already transitively marked and it's too late to clean // up its parts. - if (!raw_map_cache->IsMarked() && + if (!IsMarked(raw_map_cache) && raw_map_cache != heap()->undefined_value()) { MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache); int existing_elements = map_cache->NumberOfElements(); @@ -1601,8 +2148,7 @@ void MarkCompactCollector::ProcessMapCaches() { raw_key == heap()->null_value()) continue; STATIC_ASSERT(MapCache::kEntrySize == 2); Object* raw_map = map_cache->get(i + 1); - if (raw_map->IsHeapObject() && - HeapObject::cast(raw_map)->IsMarked()) { + if (raw_map->IsHeapObject() && IsMarked(raw_map)) { ++used_elements; } else { // Delete useless entries with unmarked maps. @@ -1618,14 +2164,15 @@ void MarkCompactCollector::ProcessMapCaches() { // extra complexity during GC. We rely on subsequent cache // usages (EnsureCapacity) to do this. map_cache->ElementsRemoved(existing_elements - used_elements); - MarkObject(map_cache); + MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache); + MarkObject(map_cache, map_cache_markbit); } } } // Move to next element in the list. raw_context = context->get(Context::NEXT_CONTEXT_LINK); } - ProcessMarkingStack(); + ProcessMarkingDeque(); } @@ -1655,27 +2202,26 @@ void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { #endif // DEBUG -void MarkCompactCollector::SweepLargeObjectSpace() { -#ifdef DEBUG - ASSERT(state_ == MARK_LIVE_OBJECTS); - state_ = - compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES; -#endif - // Deallocate unmarked objects and clear marked bits for marked objects. - heap()->lo_space()->FreeUnmarkedObjects(); -} +void MarkCompactCollector::ReattachInitialMaps() { + HeapObjectIterator map_iterator(heap()->map_space()); + for (HeapObject* obj = map_iterator.Next(); + obj != NULL; + obj = map_iterator.Next()) { + if (obj->IsFreeSpace()) continue; + Map* map = Map::cast(obj); + STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); + if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue; -// Safe to use during marking phase only. -bool MarkCompactCollector::SafeIsMap(HeapObject* object) { - MapWord metamap = object->map_word(); - metamap.ClearMark(); - return metamap.ToMap()->instance_type() == MAP_TYPE; + if (map->attached_to_shared_function_info()) { + JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map); + } + } } void MarkCompactCollector::ClearNonLiveTransitions() { - HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject); + HeapObjectIterator map_iterator(heap()->map_space()); // Iterate over the map space, setting map transitions that go from // a marked map to an unmarked map to null transitions. At the same time, // set all the prototype fields of maps back to their original value, @@ -1686,17 +2232,19 @@ void MarkCompactCollector::ClearNonLiveTransitions() { // scan the descriptor arrays of those maps, not all maps. // All of these actions are carried out only on maps of JSObjects // and related subtypes. - for (HeapObject* obj = map_iterator.next(); - obj != NULL; obj = map_iterator.next()) { + for (HeapObject* obj = map_iterator.Next(); + obj != NULL; obj = map_iterator.Next()) { Map* map = reinterpret_cast<Map*>(obj); - if (!map->IsMarked() && map->IsByteArray()) continue; + MarkBit map_mark = Marking::MarkBitFrom(map); + if (map->IsFreeSpace()) continue; - ASSERT(SafeIsMap(map)); + ASSERT(map->IsMap()); // Only JSObject and subtypes have map transitions and back pointers. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue; + STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE); + if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue; - if (map->IsMarked() && map->attached_to_shared_function_info()) { + if (map_mark.Get() && + map->attached_to_shared_function_info()) { // This map is used for inobject slack tracking and has been detached // from SharedFunctionInfo during the mark phase. // Since it survived the GC, reattach it now. @@ -1705,52 +2253,55 @@ void MarkCompactCollector::ClearNonLiveTransitions() { // Clear dead prototype transitions. int number_of_transitions = map->NumberOfProtoTransitions(); - if (number_of_transitions > 0) { - FixedArray* prototype_transitions = - map->unchecked_prototype_transitions(); - int new_number_of_transitions = 0; - const int header = Map::kProtoTransitionHeaderSize; - const int proto_offset = - header + Map::kProtoTransitionPrototypeOffset; - const int map_offset = header + Map::kProtoTransitionMapOffset; - const int step = Map::kProtoTransitionElementsPerEntry; - for (int i = 0; i < number_of_transitions; i++) { - Object* prototype = prototype_transitions->get(proto_offset + i * step); - Object* cached_map = prototype_transitions->get(map_offset + i * step); - if (HeapObject::cast(prototype)->IsMarked() && - HeapObject::cast(cached_map)->IsMarked()) { - if (new_number_of_transitions != i) { - prototype_transitions->set_unchecked( - heap_, - proto_offset + new_number_of_transitions * step, - prototype, - UPDATE_WRITE_BARRIER); - prototype_transitions->set_unchecked( - heap_, - map_offset + new_number_of_transitions * step, - cached_map, - SKIP_WRITE_BARRIER); - } - new_number_of_transitions++; + FixedArray* prototype_transitions = map->prototype_transitions(); + + int new_number_of_transitions = 0; + const int header = Map::kProtoTransitionHeaderSize; + const int proto_offset = + header + Map::kProtoTransitionPrototypeOffset; + const int map_offset = header + Map::kProtoTransitionMapOffset; + const int step = Map::kProtoTransitionElementsPerEntry; + for (int i = 0; i < number_of_transitions; i++) { + Object* prototype = prototype_transitions->get(proto_offset + i * step); + Object* cached_map = prototype_transitions->get(map_offset + i * step); + if (IsMarked(prototype) && IsMarked(cached_map)) { + if (new_number_of_transitions != i) { + prototype_transitions->set_unchecked( + heap_, + proto_offset + new_number_of_transitions * step, + prototype, + UPDATE_WRITE_BARRIER); + prototype_transitions->set_unchecked( + heap_, + map_offset + new_number_of_transitions * step, + cached_map, + SKIP_WRITE_BARRIER); } } // Fill slots that became free with undefined value. - Object* undefined = heap()->raw_unchecked_undefined_value(); + Object* undefined = heap()->undefined_value(); for (int i = new_number_of_transitions * step; i < number_of_transitions * step; i++) { + // The undefined object is on a page that is never compacted and never + // in new space so it is OK to skip the write barrier. Also it's a + // root. prototype_transitions->set_unchecked(heap_, header + i, undefined, SKIP_WRITE_BARRIER); + + Object** undefined_slot = + prototype_transitions->data_start() + i; + RecordSlot(undefined_slot, undefined_slot, undefined); } map->SetNumberOfProtoTransitions(new_number_of_transitions); } // Follow the chain of back pointers to find the prototype. Map* current = map; - while (SafeIsMap(current)) { + while (current->IsMap()) { current = reinterpret_cast<Map*>(current->prototype()); ASSERT(current->IsHeapObject()); } @@ -1759,21 +2310,28 @@ void MarkCompactCollector::ClearNonLiveTransitions() { // Follow back pointers, setting them to prototype, // clearing map transitions when necessary. current = map; - bool on_dead_path = !current->IsMarked(); + bool on_dead_path = !map_mark.Get(); Object* next; - while (SafeIsMap(current)) { + while (current->IsMap()) { next = current->prototype(); // There should never be a dead map above a live map. - ASSERT(on_dead_path || current->IsMarked()); + MarkBit current_mark = Marking::MarkBitFrom(current); + bool is_alive = current_mark.Get(); + ASSERT(on_dead_path || is_alive); // A live map above a dead map indicates a dead transition. // This test will always be false on the first iteration. - if (on_dead_path && current->IsMarked()) { + if (on_dead_path && is_alive) { on_dead_path = false; current->ClearNonLiveTransitions(heap(), real_prototype); } *HeapObject::RawField(current, Map::kPrototypeOffset) = real_prototype; + + if (is_alive) { + Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset); + RecordSlot(slot, slot, real_prototype); + } current = reinterpret_cast<Map*>(next); } } @@ -1783,13 +2341,13 @@ void MarkCompactCollector::ClearNonLiveTransitions() { void MarkCompactCollector::ProcessWeakMaps() { Object* weak_map_obj = encountered_weak_maps(); while (weak_map_obj != Smi::FromInt(0)) { - ASSERT(HeapObject::cast(weak_map_obj)->IsMarked()); + ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj))); JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj); ObjectHashTable* table = weak_map->unchecked_table(); for (int i = 0; i < table->Capacity(); i++) { - if (HeapObject::cast(table->KeyAt(i))->IsMarked()) { + if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { Object* value = table->get(table->EntryToValueIndex(i)); - StaticMarkingVisitor::MarkObjectByPointer(heap(), &value); + StaticMarkingVisitor::VisitPointer(heap(), &value); table->set_unchecked(heap(), table->EntryToValueIndex(i), value, @@ -1804,11 +2362,11 @@ void MarkCompactCollector::ProcessWeakMaps() { void MarkCompactCollector::ClearWeakMaps() { Object* weak_map_obj = encountered_weak_maps(); while (weak_map_obj != Smi::FromInt(0)) { - ASSERT(HeapObject::cast(weak_map_obj)->IsMarked()); + ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj))); JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj); ObjectHashTable* table = weak_map->unchecked_table(); for (int i = 0; i < table->Capacity(); i++) { - if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) { + if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { table->RemoveEntry(i, heap()); } } @@ -1818,316 +2376,97 @@ void MarkCompactCollector::ClearWeakMaps() { set_encountered_weak_maps(Smi::FromInt(0)); } -// ------------------------------------------------------------------------- -// Phase 2: Encode forwarding addresses. -// When compacting, forwarding addresses for objects in old space and map -// space are encoded in their map pointer word (along with an encoding of -// their map pointers). -// -// The excact encoding is described in the comments for class MapWord in -// objects.h. + +// We scavange new space simultaneously with sweeping. This is done in two +// passes. // -// An address range [start, end) can have both live and non-live objects. -// Maximal non-live regions are marked so they can be skipped on subsequent -// sweeps of the heap. A distinguished map-pointer encoding is used to mark -// free regions of one-word size (in which case the next word is the start -// of a live object). A second distinguished map-pointer encoding is used -// to mark free regions larger than one word, and the size of the free -// region (including the first word) is written to the second word of the -// region. +// The first pass migrates all alive objects from one semispace to another or +// promotes them to old space. Forwarding address is written directly into +// first word of object without any encoding. If object is dead we write +// NULL as a forwarding address. // -// Any valid map page offset must lie in the object area of the page, so map -// page offsets less than Page::kObjectStartOffset are invalid. We use a -// pair of distinguished invalid map encodings (for single word and multiple -// words) to indicate free regions in the page found during computation of -// forwarding addresses and skipped over in subsequent sweeps. - - -// Encode a free region, defined by the given start address and size, in the -// first word or two of the region. -void EncodeFreeRegion(Address free_start, int free_size) { - ASSERT(free_size >= kIntSize); - if (free_size == kIntSize) { - Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding; - } else { - ASSERT(free_size >= 2 * kIntSize); - Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding; - Memory::int_at(free_start + kIntSize) = free_size; - } +// The second pass updates pointers to new space in all spaces. It is possible +// to encounter pointers to dead new space objects during traversal of pointers +// to new space. We should clear them to avoid encountering them during next +// pointer iteration. This is an issue if the store buffer overflows and we +// have to scan the entire old space, including dead objects, looking for +// pointers to new space. +void MarkCompactCollector::MigrateObject(Address dst, + Address src, + int size, + AllocationSpace dest) { + HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst)); + if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) { + Address src_slot = src; + Address dst_slot = dst; + ASSERT(IsAligned(size, kPointerSize)); + + for (int remaining = size / kPointerSize; remaining > 0; remaining--) { + Object* value = Memory::Object_at(src_slot); + + Memory::Object_at(dst_slot) = value; + + if (heap_->InNewSpace(value)) { + heap_->store_buffer()->Mark(dst_slot); + } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { + SlotsBuffer::AddTo(&slots_buffer_allocator_, + &migration_slots_buffer_, + reinterpret_cast<Object**>(dst_slot), + SlotsBuffer::IGNORE_OVERFLOW); + } -#ifdef DEBUG - // Zap the body of the free region. - if (FLAG_enable_slow_asserts) { - for (int offset = 2 * kIntSize; - offset < free_size; - offset += kPointerSize) { - Memory::Address_at(free_start + offset) = kZapValue; + src_slot += kPointerSize; + dst_slot += kPointerSize; } - } -#endif -} + if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) { + Address code_entry_slot = dst + JSFunction::kCodeEntryOffset; + Address code_entry = Memory::Address_at(code_entry_slot); -// Try to promote all objects in new space. Heap numbers and sequential -// strings are promoted to the code space, large objects to large object space, -// and all others to the old space. -inline MaybeObject* MCAllocateFromNewSpace(Heap* heap, - HeapObject* object, - int object_size) { - MaybeObject* forwarded; - if (object_size > heap->MaxObjectSizeInPagedSpace()) { - forwarded = Failure::Exception(); - } else { - OldSpace* target_space = heap->TargetSpace(object); - ASSERT(target_space == heap->old_pointer_space() || - target_space == heap->old_data_space()); - forwarded = target_space->MCAllocateRaw(object_size); - } - Object* result; - if (!forwarded->ToObject(&result)) { - result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked(); - } - return result; -} - - -// Allocation functions for the paged spaces call the space's MCAllocateRaw. -MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace( - Heap *heap, - HeapObject* ignore, - int object_size) { - return heap->old_pointer_space()->MCAllocateRaw(object_size); -} - - -MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace( - Heap* heap, - HeapObject* ignore, - int object_size) { - return heap->old_data_space()->MCAllocateRaw(object_size); -} - - -MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace( - Heap* heap, - HeapObject* ignore, - int object_size) { - return heap->code_space()->MCAllocateRaw(object_size); -} - - -MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace( - Heap* heap, - HeapObject* ignore, - int object_size) { - return heap->map_space()->MCAllocateRaw(object_size); -} - - -MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace( - Heap* heap, HeapObject* ignore, int object_size) { - return heap->cell_space()->MCAllocateRaw(object_size); -} - - -// The forwarding address is encoded at the same offset as the current -// to-space object, but in from space. -inline void EncodeForwardingAddressInNewSpace(Heap* heap, - HeapObject* old_object, - int object_size, - Object* new_object, - int* ignored) { - int offset = - heap->new_space()->ToSpaceOffsetForAddress(old_object->address()); - Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) = - HeapObject::cast(new_object)->address(); -} - - -// The forwarding address is encoded in the map pointer of the object as an -// offset (in terms of live bytes) from the address of the first live object -// in the page. -inline void EncodeForwardingAddressInPagedSpace(Heap* heap, - HeapObject* old_object, - int object_size, - Object* new_object, - int* offset) { - // Record the forwarding address of the first live object if necessary. - if (*offset == 0) { - Page::FromAddress(old_object->address())->mc_first_forwarded = - HeapObject::cast(new_object)->address(); - } - - MapWord encoding = - MapWord::EncodeAddress(old_object->map()->address(), *offset); - old_object->set_map_word(encoding); - *offset += object_size; - ASSERT(*offset <= Page::kObjectAreaSize); -} - - -// Most non-live objects are ignored. -inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {} - - -// Function template that, given a range of addresses (eg, a semispace or a -// paged space page), iterates through the objects in the range to clear -// mark bits and compute and encode forwarding addresses. As a side effect, -// maximal free chunks are marked so that they can be skipped on subsequent -// sweeps. -// -// The template parameters are an allocation function, a forwarding address -// encoding function, and a function to process non-live objects. -template<MarkCompactCollector::AllocationFunction Alloc, - MarkCompactCollector::EncodingFunction Encode, - MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive> -inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector, - Address start, - Address end, - int* offset) { - // The start address of the current free region while sweeping the space. - // This address is set when a transition from live to non-live objects is - // encountered. A value (an encoding of the 'next free region' pointer) - // is written to memory at this address when a transition from non-live to - // live objects is encountered. - Address free_start = NULL; - - // A flag giving the state of the previously swept object. Initially true - // to ensure that free_start is initialized to a proper address before - // trying to write to it. - bool is_prev_alive = true; - - int object_size; // Will be set on each iteration of the loop. - for (Address current = start; current < end; current += object_size) { - HeapObject* object = HeapObject::FromAddress(current); - if (object->IsMarked()) { - object->ClearMark(); - collector->tracer()->decrement_marked_count(); - object_size = object->Size(); - - Object* forwarded = - Alloc(collector->heap(), object, object_size)->ToObjectUnchecked(); - Encode(collector->heap(), object, object_size, forwarded, offset); - -#ifdef DEBUG - if (FLAG_gc_verbose) { - PrintF("forward %p -> %p.\n", object->address(), - HeapObject::cast(forwarded)->address()); - } -#endif - if (!is_prev_alive) { // Transition from non-live to live. - EncodeFreeRegion(free_start, static_cast<int>(current - free_start)); - is_prev_alive = true; - } - } else { // Non-live object. - object_size = object->Size(); - ProcessNonLive(object, collector->heap()->isolate()); - if (is_prev_alive) { // Transition from live to non-live. - free_start = current; - is_prev_alive = false; + if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { + SlotsBuffer::AddTo(&slots_buffer_allocator_, + &migration_slots_buffer_, + SlotsBuffer::CODE_ENTRY_SLOT, + code_entry_slot, + SlotsBuffer::IGNORE_OVERFLOW); } - LiveObjectList::ProcessNonLive(object); } - } - - // If we ended on a free region, mark it. - if (!is_prev_alive) { - EncodeFreeRegion(free_start, static_cast<int>(end - free_start)); - } -} - - -// Functions to encode the forwarding pointers in each compactable space. -void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() { - int ignored; - EncodeForwardingAddressesInRange<MCAllocateFromNewSpace, - EncodeForwardingAddressInNewSpace, - IgnoreNonLiveObject>( - this, - heap()->new_space()->bottom(), - heap()->new_space()->top(), - &ignored); -} - - -template<MarkCompactCollector::AllocationFunction Alloc, - MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive> -void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace( - PagedSpace* space) { - PageIterator it(space, PageIterator::PAGES_IN_USE); - while (it.has_next()) { - Page* p = it.next(); - - // The offset of each live object in the page from the first live object - // in the page. - int offset = 0; - EncodeForwardingAddressesInRange<Alloc, - EncodeForwardingAddressInPagedSpace, - ProcessNonLive>( - this, - p->ObjectAreaStart(), - p->AllocationTop(), - &offset); - } -} - - -// We scavange new space simultaneously with sweeping. This is done in two -// passes. -// The first pass migrates all alive objects from one semispace to another or -// promotes them to old space. Forwading address is written directly into -// first word of object without any encoding. If object is dead we are writing -// NULL as a forwarding address. -// The second pass updates pointers to new space in all spaces. It is possible -// to encounter pointers to dead objects during traversal of dirty regions we -// should clear them to avoid encountering them during next dirty regions -// iteration. -static void MigrateObject(Heap* heap, - Address dst, - Address src, - int size, - bool to_old_space) { - if (to_old_space) { - heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size); + } else if (dest == CODE_SPACE) { + PROFILE(heap()->isolate(), CodeMoveEvent(src, dst)); + heap()->MoveBlock(dst, src, size); + SlotsBuffer::AddTo(&slots_buffer_allocator_, + &migration_slots_buffer_, + SlotsBuffer::RELOCATED_CODE_OBJECT, + dst, + SlotsBuffer::IGNORE_OVERFLOW); + Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src); } else { - heap->CopyBlock(dst, src, size); + ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE); + heap()->MoveBlock(dst, src, size); } - Memory::Address_at(src) = dst; } -class StaticPointersToNewGenUpdatingVisitor : public - StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> { - public: - static inline void VisitPointer(Heap* heap, Object** p) { - if (!(*p)->IsHeapObject()) return; - - HeapObject* obj = HeapObject::cast(*p); - Address old_addr = obj->address(); - - if (heap->new_space()->Contains(obj)) { - ASSERT(heap->InFromSpace(*p)); - *p = HeapObject::FromAddress(Memory::Address_at(old_addr)); - } - } -}; - - // Visitor for updating pointers from live objects in old spaces to new space. // It does not expect to encounter pointers to dead objects. -class PointersToNewGenUpdatingVisitor: public ObjectVisitor { +class PointersUpdatingVisitor: public ObjectVisitor { public: - explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { } + explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { } void VisitPointer(Object** p) { - StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p); + UpdatePointer(p); } void VisitPointers(Object** start, Object** end) { - for (Object** p = start; p < end; p++) { - StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p); - } + for (Object** p = start; p < end; p++) UpdatePointer(p); + } + + void VisitEmbeddedPointer(RelocInfo* rinfo) { + ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); + Object* target = rinfo->target_object(); + VisitPointer(&target); + rinfo->set_target_object(target); } void VisitCodeTarget(RelocInfo* rinfo) { @@ -2147,68 +2486,96 @@ class PointersToNewGenUpdatingVisitor: public ObjectVisitor { rinfo->set_call_address(Code::cast(target)->instruction_start()); } + static inline void UpdateSlot(Heap* heap, Object** slot) { + Object* obj = *slot; + + if (!obj->IsHeapObject()) return; + + HeapObject* heap_obj = HeapObject::cast(obj); + + MapWord map_word = heap_obj->map_word(); + if (map_word.IsForwardingAddress()) { + ASSERT(heap->InFromSpace(heap_obj) || + MarkCompactCollector::IsOnEvacuationCandidate(heap_obj)); + HeapObject* target = map_word.ToForwardingAddress(); + *slot = target; + ASSERT(!heap->InFromSpace(target) && + !MarkCompactCollector::IsOnEvacuationCandidate(target)); + } + } + private: + inline void UpdatePointer(Object** p) { + UpdateSlot(heap_, p); + } + Heap* heap_; }; -// Visitor for updating pointers from live objects in old spaces to new space. -// It can encounter pointers to dead objects in new space when traversing map -// space (see comment for MigrateObject). -static void UpdatePointerToNewGen(HeapObject** p) { - if (!(*p)->IsHeapObject()) return; +static void UpdatePointer(HeapObject** p, HeapObject* object) { + ASSERT(*p == object); - Address old_addr = (*p)->address(); - ASSERT(HEAP->InFromSpace(*p)); + Address old_addr = object->address(); Address new_addr = Memory::Address_at(old_addr); - if (new_addr == NULL) { - // We encountered pointer to a dead object. Clear it so we will - // not visit it again during next iteration of dirty regions. - *p = NULL; - } else { + // The new space sweep will overwrite the map word of dead objects + // with NULL. In this case we do not need to transfer this entry to + // the store buffer which we are rebuilding. + if (new_addr != NULL) { *p = HeapObject::FromAddress(new_addr); + } else { + // We have to zap this pointer, because the store buffer may overflow later, + // and then we have to scan the entire heap and we don't want to find + // spurious newspace pointers in the old space. + *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0)); } } -static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, - Object** p) { - Address old_addr = HeapObject::cast(*p)->address(); - Address new_addr = Memory::Address_at(old_addr); - return String::cast(HeapObject::FromAddress(new_addr)); +static String* UpdateReferenceInExternalStringTableEntry(Heap* heap, + Object** p) { + MapWord map_word = HeapObject::cast(*p)->map_word(); + + if (map_word.IsForwardingAddress()) { + return String::cast(map_word.ToForwardingAddress()); + } + + return String::cast(*p); } -static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) { +bool MarkCompactCollector::TryPromoteObject(HeapObject* object, + int object_size) { Object* result; - if (object_size > heap->MaxObjectSizeInPagedSpace()) { + if (object_size > heap()->MaxObjectSizeInPagedSpace()) { MaybeObject* maybe_result = - heap->lo_space()->AllocateRawFixedArray(object_size); + heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE); if (maybe_result->ToObject(&result)) { HeapObject* target = HeapObject::cast(result); - MigrateObject(heap, target->address(), object->address(), object_size, - true); - heap->mark_compact_collector()->tracer()-> + MigrateObject(target->address(), + object->address(), + object_size, + LO_SPACE); + heap()->mark_compact_collector()->tracer()-> increment_promoted_objects_size(object_size); return true; } } else { - OldSpace* target_space = heap->TargetSpace(object); + OldSpace* target_space = heap()->TargetSpace(object); - ASSERT(target_space == heap->old_pointer_space() || - target_space == heap->old_data_space()); + ASSERT(target_space == heap()->old_pointer_space() || + target_space == heap()->old_data_space()); MaybeObject* maybe_result = target_space->AllocateRaw(object_size); if (maybe_result->ToObject(&result)) { HeapObject* target = HeapObject::cast(result); - MigrateObject(heap, - target->address(), + MigrateObject(target->address(), object->address(), object_size, - target_space == heap->old_pointer_space()); - heap->mark_compact_collector()->tracer()-> + target_space->identity()); + heap()->mark_compact_collector()->tracer()-> increment_promoted_objects_size(object_size); return true; } @@ -2218,1145 +2585,1247 @@ static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) { } -static void SweepNewSpace(Heap* heap, NewSpace* space) { - heap->CheckNewSpaceExpansionCriteria(); +void MarkCompactCollector::EvacuateNewSpace() { + heap()->CheckNewSpaceExpansionCriteria(); + + NewSpace* new_space = heap()->new_space(); - Address from_bottom = space->bottom(); - Address from_top = space->top(); + // Store allocation range before flipping semispaces. + Address from_bottom = new_space->bottom(); + Address from_top = new_space->top(); // Flip the semispaces. After flipping, to space is empty, from space has // live objects. - space->Flip(); - space->ResetAllocationInfo(); + new_space->Flip(); + new_space->ResetAllocationInfo(); - int size = 0; int survivors_size = 0; // First pass: traverse all objects in inactive semispace, remove marks, - // migrate live objects and write forwarding addresses. - for (Address current = from_bottom; current < from_top; current += size) { - HeapObject* object = HeapObject::FromAddress(current); - - if (object->IsMarked()) { - object->ClearMark(); - heap->mark_compact_collector()->tracer()->decrement_marked_count(); - - size = object->Size(); + // migrate live objects and write forwarding addresses. This stage puts + // new entries in the store buffer and may cause some pages to be marked + // scan-on-scavenge. + SemiSpaceIterator from_it(from_bottom, from_top); + for (HeapObject* object = from_it.Next(); + object != NULL; + object = from_it.Next()) { + MarkBit mark_bit = Marking::MarkBitFrom(object); + if (mark_bit.Get()) { + mark_bit.Clear(); + // Don't bother decrementing live bytes count. We'll discard the + // entire page at the end. + int size = object->Size(); survivors_size += size; // Aggressively promote young survivors to the old space. - if (TryPromoteObject(heap, object, size)) { + if (TryPromoteObject(object, size)) { continue; } // Promotion failed. Just migrate object to another semispace. - // Allocation cannot fail at this point: semispaces are of equal size. - Object* target = space->AllocateRaw(size)->ToObjectUnchecked(); + MaybeObject* allocation = new_space->AllocateRaw(size); + if (allocation->IsFailure()) { + if (!new_space->AddFreshPage()) { + // Shouldn't happen. We are sweeping linearly, and to-space + // has the same number of pages as from-space, so there is + // always room. + UNREACHABLE(); + } + allocation = new_space->AllocateRaw(size); + ASSERT(!allocation->IsFailure()); + } + Object* target = allocation->ToObjectUnchecked(); - MigrateObject(heap, - HeapObject::cast(target)->address(), - current, + MigrateObject(HeapObject::cast(target)->address(), + object->address(), size, - false); + NEW_SPACE); } else { // Process the dead object before we write a NULL into its header. LiveObjectList::ProcessNonLive(object); - size = object->Size(); - Memory::Address_at(current) = NULL; + // Mark dead objects in the new space with null in their map field. + Memory::Address_at(object->address()) = NULL; } } - // Second pass: find pointers to new space and update them. - PointersToNewGenUpdatingVisitor updating_visitor(heap); - - // Update pointers in to space. - Address current = space->bottom(); - while (current < space->top()) { - HeapObject* object = HeapObject::FromAddress(current); - current += - StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(), - object); - } - - // Update roots. - heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); - LiveObjectList::IterateElements(&updating_visitor); - - // Update pointers in old spaces. - heap->IterateDirtyRegions(heap->old_pointer_space(), - &Heap::IteratePointersInDirtyRegion, - &UpdatePointerToNewGen, - heap->WATERMARK_SHOULD_BE_VALID); - - heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen); - - // Update pointers from cells. - HeapObjectIterator cell_iterator(heap->cell_space()); - for (HeapObject* cell = cell_iterator.next(); - cell != NULL; - cell = cell_iterator.next()) { - if (cell->IsJSGlobalPropertyCell()) { - Address value_address = - reinterpret_cast<Address>(cell) + - (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); - updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); - } - } - - // Update pointer from the global contexts list. - updating_visitor.VisitPointer(heap->global_contexts_list_address()); - - // Update pointers from external string table. - heap->UpdateNewSpaceReferencesInExternalStringTable( - &UpdateNewSpaceReferenceInExternalStringTableEntry); - - // All pointers were updated. Update auxiliary allocation info. - heap->IncrementYoungSurvivorsCounter(survivors_size); - space->set_age_mark(space->top()); - - // Update JSFunction pointers from the runtime profiler. - heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge(); + heap_->IncrementYoungSurvivorsCounter(survivors_size); + new_space->set_age_mark(new_space->top()); } -static void SweepSpace(Heap* heap, PagedSpace* space) { - PageIterator it(space, PageIterator::PAGES_IN_USE); - - // During sweeping of paged space we are trying to find longest sequences - // of pages without live objects and free them (instead of putting them on - // the free list). +void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { + AlwaysAllocateScope always_allocate; + PagedSpace* space = static_cast<PagedSpace*>(p->owner()); + ASSERT(p->IsEvacuationCandidate() && !p->WasSwept()); + MarkBit::CellType* cells = p->markbits()->cells(); + p->MarkSweptPrecisely(); - // Page preceding current. - Page* prev = Page::FromAddress(NULL); + int last_cell_index = + Bitmap::IndexToCell( + Bitmap::CellAlignIndex( + p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); - // First empty page in a sequence. - Page* first_empty_page = Page::FromAddress(NULL); + int cell_index = Page::kFirstUsedCell; + Address cell_base = p->ObjectAreaStart(); + int offsets[16]; - // Page preceding first empty page. - Page* prec_first_empty_page = Page::FromAddress(NULL); - - // If last used page of space ends with a sequence of dead objects - // we can adjust allocation top instead of puting this free area into - // the free list. Thus during sweeping we keep track of such areas - // and defer their deallocation until the sweeping of the next page - // is done: if one of the next pages contains live objects we have - // to put such area into the free list. - Address last_free_start = NULL; - int last_free_size = 0; - - while (it.has_next()) { - Page* p = it.next(); - - bool is_previous_alive = true; - Address free_start = NULL; - HeapObject* object; - - for (Address current = p->ObjectAreaStart(); - current < p->AllocationTop(); - current += object->Size()) { - object = HeapObject::FromAddress(current); - if (object->IsMarked()) { - object->ClearMark(); - heap->mark_compact_collector()->tracer()->decrement_marked_count(); - - if (!is_previous_alive) { // Transition from free to live. - space->DeallocateBlock(free_start, - static_cast<int>(current - free_start), - true); - is_previous_alive = true; - } - } else { - heap->mark_compact_collector()->ReportDeleteIfNeeded( - object, heap->isolate()); - if (is_previous_alive) { // Transition from live to free. - free_start = current; - is_previous_alive = false; - } - LiveObjectList::ProcessNonLive(object); - } - // The object is now unmarked for the call to Size() at the top of the - // loop. - } + for (cell_index = Page::kFirstUsedCell; + cell_index < last_cell_index; + cell_index++, cell_base += 32 * kPointerSize) { + ASSERT((unsigned)cell_index == + Bitmap::IndexToCell( + Bitmap::CellAlignIndex( + p->AddressToMarkbitIndex(cell_base)))); + if (cells[cell_index] == 0) continue; - bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop()) - || (!is_previous_alive && free_start == p->ObjectAreaStart()); + int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); + for (int i = 0; i < live_objects; i++) { + Address object_addr = cell_base + offsets[i] * kPointerSize; + HeapObject* object = HeapObject::FromAddress(object_addr); + ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); - if (page_is_empty) { - // This page is empty. Check whether we are in the middle of - // sequence of empty pages and start one if not. - if (!first_empty_page->is_valid()) { - first_empty_page = p; - prec_first_empty_page = prev; - } + int size = object->Size(); - if (!is_previous_alive) { - // There are dead objects on this page. Update space accounting stats - // without putting anything into free list. - int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start); - if (size_in_bytes > 0) { - space->DeallocateBlock(free_start, size_in_bytes, false); - } - } - } else { - // This page is not empty. Sequence of empty pages ended on the previous - // one. - if (first_empty_page->is_valid()) { - space->FreePages(prec_first_empty_page, prev); - prec_first_empty_page = first_empty_page = Page::FromAddress(NULL); + MaybeObject* target = space->AllocateRaw(size); + if (target->IsFailure()) { + // OS refused to give us memory. + V8::FatalProcessOutOfMemory("Evacuation"); + return; } - // If there is a free ending area on one of the previous pages we have - // deallocate that area and put it on the free list. - if (last_free_size > 0) { - Page::FromAddress(last_free_start)-> - SetAllocationWatermark(last_free_start); - space->DeallocateBlock(last_free_start, last_free_size, true); - last_free_start = NULL; - last_free_size = 0; - } + Object* target_object = target->ToObjectUnchecked(); - // If the last region of this page was not live we remember it. - if (!is_previous_alive) { - ASSERT(last_free_size == 0); - last_free_size = static_cast<int>(p->AllocationTop() - free_start); - last_free_start = free_start; - } + MigrateObject(HeapObject::cast(target_object)->address(), + object_addr, + size, + space->identity()); + ASSERT(object->map_word().IsForwardingAddress()); } - prev = p; - } - - // We reached end of space. See if we need to adjust allocation top. - Address new_allocation_top = NULL; - - if (first_empty_page->is_valid()) { - // Last used pages in space are empty. We can move allocation top backwards - // to the beginning of first empty page. - ASSERT(prev == space->AllocationTopPage()); - - new_allocation_top = first_empty_page->ObjectAreaStart(); + // Clear marking bits for current cell. + cells[cell_index] = 0; } + p->ResetLiveBytes(); +} - if (last_free_size > 0) { - // There was a free ending area on the previous page. - // Deallocate it without putting it into freelist and move allocation - // top to the beginning of this free area. - space->DeallocateBlock(last_free_start, last_free_size, false); - new_allocation_top = last_free_start; - } - if (new_allocation_top != NULL) { -#ifdef DEBUG - Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top); - if (!first_empty_page->is_valid()) { - ASSERT(new_allocation_top_page == space->AllocationTopPage()); - } else if (last_free_size > 0) { - ASSERT(new_allocation_top_page == prec_first_empty_page); - } else { - ASSERT(new_allocation_top_page == first_empty_page); +void MarkCompactCollector::EvacuatePages() { + int npages = evacuation_candidates_.length(); + for (int i = 0; i < npages; i++) { + Page* p = evacuation_candidates_[i]; + ASSERT(p->IsEvacuationCandidate() || + p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); + if (p->IsEvacuationCandidate()) { + // During compaction we might have to request a new page. + // Check that space still have room for that. + if (static_cast<PagedSpace*>(p->owner())->CanExpand()) { + EvacuateLiveObjectsFromPage(p); + } else { + // Without room for expansion evacuation is not guaranteed to succeed. + // Pessimistically abandon unevacuated pages. + for (int j = i; j < npages; j++) { + Page* page = evacuation_candidates_[j]; + slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); + page->ClearEvacuationCandidate(); + page->SetFlag(Page::RESCAN_ON_EVACUATION); + } + return; + } } -#endif - - space->SetTop(new_allocation_top); } } -void MarkCompactCollector::EncodeForwardingAddresses() { - ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); - // Objects in the active semispace of the young generation may be - // relocated to the inactive semispace (if not promoted). Set the - // relocation info to the beginning of the inactive semispace. - heap()->new_space()->MCResetRelocationInfo(); - - // Compute the forwarding pointers in each space. - EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace, - ReportDeleteIfNeeded>( - heap()->old_pointer_space()); - - EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace, - IgnoreNonLiveObject>( - heap()->old_data_space()); - - EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace, - ReportDeleteIfNeeded>( - heap()->code_space()); - - EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace, - IgnoreNonLiveObject>( - heap()->cell_space()); - - - // Compute new space next to last after the old and code spaces have been - // compacted. Objects in new space can be promoted to old or code space. - EncodeForwardingAddressesInNewSpace(); - - // Compute map space last because computing forwarding addresses - // overwrites non-live objects. Objects in the other spaces rely on - // non-live map pointers to get the sizes of non-live objects. - EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace, - IgnoreNonLiveObject>( - heap()->map_space()); - - // Write relocation info to the top page, so we can use it later. This is - // done after promoting objects from the new space so we get the correct - // allocation top. - heap()->old_pointer_space()->MCWriteRelocationInfoToPage(); - heap()->old_data_space()->MCWriteRelocationInfoToPage(); - heap()->code_space()->MCWriteRelocationInfoToPage(); - heap()->map_space()->MCWriteRelocationInfoToPage(); - heap()->cell_space()->MCWriteRelocationInfoToPage(); -} - - -class MapIterator : public HeapObjectIterator { +class EvacuationWeakObjectRetainer : public WeakObjectRetainer { public: - explicit MapIterator(Heap* heap) - : HeapObjectIterator(heap->map_space(), &SizeCallback) { } - - MapIterator(Heap* heap, Address start) - : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { } - - private: - static int SizeCallback(HeapObject* unused) { - USE(unused); - return Map::kSize; + virtual Object* RetainAs(Object* object) { + if (object->IsHeapObject()) { + HeapObject* heap_object = HeapObject::cast(object); + MapWord map_word = heap_object->map_word(); + if (map_word.IsForwardingAddress()) { + return map_word.ToForwardingAddress(); + } + } + return object; } }; -class MapCompact { - public: - explicit MapCompact(Heap* heap, int live_maps) - : heap_(heap), - live_maps_(live_maps), - to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)), - vacant_map_it_(heap), - map_to_evacuate_it_(heap, to_evacuate_start_), - first_map_to_evacuate_( - reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) { - } - - void CompactMaps() { - // As we know the number of maps to evacuate beforehand, - // we stop then there is no more vacant maps. - for (Map* next_vacant_map = NextVacantMap(); - next_vacant_map; - next_vacant_map = NextVacantMap()) { - EvacuateMap(next_vacant_map, NextMapToEvacuate()); +static inline void UpdateSlot(ObjectVisitor* v, + SlotsBuffer::SlotType slot_type, + Address addr) { + switch (slot_type) { + case SlotsBuffer::CODE_TARGET_SLOT: { + RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL); + rinfo.Visit(v); + break; } - -#ifdef DEBUG - CheckNoMapsToEvacuate(); -#endif - } - - void UpdateMapPointersInRoots() { - MapUpdatingVisitor map_updating_visitor; - heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG); - heap()->isolate()->global_handles()->IterateWeakRoots( - &map_updating_visitor); - LiveObjectList::IterateElements(&map_updating_visitor); - } - - void UpdateMapPointersInPagedSpace(PagedSpace* space) { - ASSERT(space != heap()->map_space()); - - PageIterator it(space, PageIterator::PAGES_IN_USE); - while (it.has_next()) { - Page* p = it.next(); - UpdateMapPointersInRange(heap(), - p->ObjectAreaStart(), - p->AllocationTop()); + case SlotsBuffer::CODE_ENTRY_SLOT: { + v->VisitCodeEntry(addr); + break; } + case SlotsBuffer::RELOCATED_CODE_OBJECT: { + HeapObject* obj = HeapObject::FromAddress(addr); + Code::cast(obj)->CodeIterateBody(v); + break; + } + case SlotsBuffer::DEBUG_TARGET_SLOT: { + RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL); + if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v); + break; + } + case SlotsBuffer::JS_RETURN_SLOT: { + RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL); + if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v); + break; + } + case SlotsBuffer::EMBEDDED_OBJECT_SLOT: { + RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL); + rinfo.Visit(v); + break; + } + default: + UNREACHABLE(); + break; } +} - void UpdateMapPointersInNewSpace() { - NewSpace* space = heap()->new_space(); - UpdateMapPointersInRange(heap(), space->bottom(), space->top()); - } - - void UpdateMapPointersInLargeObjectSpace() { - LargeObjectIterator it(heap()->lo_space()); - for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) - UpdateMapPointersInObject(heap(), obj); - } - - void Finish() { - heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_); - } - - inline Heap* heap() const { return heap_; } - - private: - Heap* heap_; - int live_maps_; - Address to_evacuate_start_; - MapIterator vacant_map_it_; - MapIterator map_to_evacuate_it_; - Map* first_map_to_evacuate_; - - // Helper class for updating map pointers in HeapObjects. - class MapUpdatingVisitor: public ObjectVisitor { - public: - MapUpdatingVisitor() {} - - void VisitPointer(Object** p) { - UpdateMapPointer(p); - } - void VisitPointers(Object** start, Object** end) { - for (Object** p = start; p < end; p++) UpdateMapPointer(p); - } +enum SweepingMode { + SWEEP_ONLY, + SWEEP_AND_VISIT_LIVE_OBJECTS +}; - private: - void UpdateMapPointer(Object** p) { - if (!(*p)->IsHeapObject()) return; - HeapObject* old_map = reinterpret_cast<HeapObject*>(*p); - // Moved maps are tagged with overflowed map word. They are the only - // objects those map word is overflowed as marking is already complete. - MapWord map_word = old_map->map_word(); - if (!map_word.IsOverflowed()) return; +enum SkipListRebuildingMode { + REBUILD_SKIP_LIST, + IGNORE_SKIP_LIST +}; - *p = GetForwardedMap(map_word); - } - }; - static Map* NextMap(MapIterator* it, HeapObject* last, bool live) { - while (true) { - HeapObject* next = it->next(); - ASSERT(next != NULL); - if (next == last) - return NULL; - ASSERT(!next->IsOverflowed()); - ASSERT(!next->IsMarked()); - ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next)); - if (next->IsMap() == live) - return reinterpret_cast<Map*>(next); +// Sweep a space precisely. After this has been done the space can +// be iterated precisely, hitting only the live objects. Code space +// is always swept precisely because we want to be able to iterate +// over it. Map space is swept precisely, because it is not compacted. +// Slots in live objects pointing into evacuation candidates are updated +// if requested. +template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode> +static void SweepPrecisely(PagedSpace* space, + Page* p, + ObjectVisitor* v) { + ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); + ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST, + space->identity() == CODE_SPACE); + ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); + + MarkBit::CellType* cells = p->markbits()->cells(); + p->MarkSweptPrecisely(); + + int last_cell_index = + Bitmap::IndexToCell( + Bitmap::CellAlignIndex( + p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); + + int cell_index = Page::kFirstUsedCell; + Address free_start = p->ObjectAreaStart(); + ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); + Address object_address = p->ObjectAreaStart(); + int offsets[16]; + + SkipList* skip_list = p->skip_list(); + int curr_region = -1; + if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { + skip_list->Clear(); + } + + for (cell_index = Page::kFirstUsedCell; + cell_index < last_cell_index; + cell_index++, object_address += 32 * kPointerSize) { + ASSERT((unsigned)cell_index == + Bitmap::IndexToCell( + Bitmap::CellAlignIndex( + p->AddressToMarkbitIndex(object_address)))); + int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); + int live_index = 0; + for ( ; live_objects != 0; live_objects--) { + Address free_end = object_address + offsets[live_index++] * kPointerSize; + if (free_end != free_start) { + space->Free(free_start, static_cast<int>(free_end - free_start)); + } + HeapObject* live_object = HeapObject::FromAddress(free_end); + ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); + Map* map = live_object->map(); + int size = live_object->SizeFromMap(map); + if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { + live_object->IterateBody(map->instance_type(), size, v); + } + if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { + int new_region_start = + SkipList::RegionNumber(free_end); + int new_region_end = + SkipList::RegionNumber(free_end + size - kPointerSize); + if (new_region_start != curr_region || + new_region_end != curr_region) { + skip_list->AddObject(free_end, size); + curr_region = new_region_end; + } + } + free_start = free_end + size; } + // Clear marking bits for current cell. + cells[cell_index] = 0; } - - Map* NextVacantMap() { - Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false); - ASSERT(map == NULL || FreeListNode::IsFreeListNode(map)); - return map; + if (free_start != p->ObjectAreaEnd()) { + space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); } + p->ResetLiveBytes(); +} - Map* NextMapToEvacuate() { - Map* map = NextMap(&map_to_evacuate_it_, NULL, true); - ASSERT(map != NULL); - ASSERT(map->IsMap()); - return map; - } - static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) { - ASSERT(FreeListNode::IsFreeListNode(vacant_map)); - ASSERT(map_to_evacuate->IsMap()); +static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { + Page* p = Page::FromAddress(code->address()); - ASSERT(Map::kSize % 4 == 0); + if (p->IsEvacuationCandidate() || + p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { + return false; + } - map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks( - vacant_map->address(), map_to_evacuate->address(), Map::kSize); + Address code_start = code->address(); + Address code_end = code_start + code->Size(); - ASSERT(vacant_map->IsMap()); // Due to memcpy above. + uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start); + uint32_t end_index = + MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize); - MapWord forwarding_map_word = MapWord::FromMap(vacant_map); - forwarding_map_word.SetOverflow(); - map_to_evacuate->set_map_word(forwarding_map_word); + Bitmap* b = p->markbits(); - ASSERT(map_to_evacuate->map_word().IsOverflowed()); - ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map); - } + MarkBit start_mark_bit = b->MarkBitFromIndex(start_index); + MarkBit end_mark_bit = b->MarkBitFromIndex(end_index); - static Map* GetForwardedMap(MapWord map_word) { - ASSERT(map_word.IsOverflowed()); - map_word.ClearOverflow(); - Map* new_map = map_word.ToMap(); - ASSERT_MAP_ALIGNED(new_map->address()); - return new_map; - } + MarkBit::CellType* start_cell = start_mark_bit.cell(); + MarkBit::CellType* end_cell = end_mark_bit.cell(); - static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) { - ASSERT(!obj->IsMarked()); - Map* map = obj->map(); - ASSERT(heap->map_space()->Contains(map)); - MapWord map_word = map->map_word(); - ASSERT(!map_word.IsMarked()); - if (map_word.IsOverflowed()) { - Map* new_map = GetForwardedMap(map_word); - ASSERT(heap->map_space()->Contains(new_map)); - obj->set_map(new_map); + if (value) { + MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1); + MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1; -#ifdef DEBUG - if (FLAG_gc_verbose) { - PrintF("update %p : %p -> %p\n", - obj->address(), - reinterpret_cast<void*>(map), - reinterpret_cast<void*>(new_map)); + if (start_cell == end_cell) { + *start_cell |= start_mask & end_mask; + } else { + *start_cell |= start_mask; + for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) { + *cell = ~0; } -#endif + *end_cell |= end_mask; } - - int size = obj->SizeFromMap(map); - MapUpdatingVisitor map_updating_visitor; - obj->IterateBody(map->instance_type(), size, &map_updating_visitor); - return size; - } - - static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) { - HeapObject* object; - int size; - for (Address current = start; current < end; current += size) { - object = HeapObject::FromAddress(current); - size = UpdateMapPointersInObject(heap, object); - ASSERT(size > 0); + } else { + for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) { + *cell = 0; } } -#ifdef DEBUG - void CheckNoMapsToEvacuate() { - if (!FLAG_enable_slow_asserts) - return; - - for (HeapObject* obj = map_to_evacuate_it_.next(); - obj != NULL; obj = map_to_evacuate_it_.next()) - ASSERT(FreeListNode::IsFreeListNode(obj)); - } -#endif -}; + return true; +} -void MarkCompactCollector::SweepSpaces() { - GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); +static bool IsOnInvalidatedCodeObject(Address addr) { + // We did not record any slots in large objects thus + // we can safely go to the page from the slot address. + Page* p = Page::FromAddress(addr); - ASSERT(state_ == SWEEP_SPACES); - ASSERT(!IsCompacting()); - // Noncompacting collections simply sweep the spaces to clear the mark - // bits and free the nonlive blocks (for old and map spaces). We sweep - // the map space last because freeing non-live maps overwrites them and - // the other spaces rely on possibly non-live maps to get the sizes for - // non-live objects. - SweepSpace(heap(), heap()->old_pointer_space()); - SweepSpace(heap(), heap()->old_data_space()); - SweepSpace(heap(), heap()->code_space()); - SweepSpace(heap(), heap()->cell_space()); - { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); - SweepNewSpace(heap(), heap()->new_space()); - } - SweepSpace(heap(), heap()->map_space()); + // First check owner's identity because old pointer and old data spaces + // are swept lazily and might still have non-zero mark-bits on some + // pages. + if (p->owner()->identity() != CODE_SPACE) return false; - heap()->IterateDirtyRegions(heap()->map_space(), - &heap()->IteratePointersInDirtyMapsRegion, - &UpdatePointerToNewGen, - heap()->WATERMARK_SHOULD_BE_VALID); + // In code space only bits on evacuation candidates (but we don't record + // any slots on them) and under invalidated code objects are non-zero. + MarkBit mark_bit = + p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr)); - intptr_t live_maps_size = heap()->map_space()->Size(); - int live_maps = static_cast<int>(live_maps_size / Map::kSize); - ASSERT(live_map_objects_size_ == live_maps_size); + return mark_bit.Get(); +} - if (heap()->map_space()->NeedsCompaction(live_maps)) { - MapCompact map_compact(heap(), live_maps); - map_compact.CompactMaps(); - map_compact.UpdateMapPointersInRoots(); +void MarkCompactCollector::InvalidateCode(Code* code) { + if (heap_->incremental_marking()->IsCompacting() && + !ShouldSkipEvacuationSlotRecording(code)) { + ASSERT(compacting_); - PagedSpaces spaces; - for (PagedSpace* space = spaces.next(); - space != NULL; space = spaces.next()) { - if (space == heap()->map_space()) continue; - map_compact.UpdateMapPointersInPagedSpace(space); - } - map_compact.UpdateMapPointersInNewSpace(); - map_compact.UpdateMapPointersInLargeObjectSpace(); + // If the object is white than no slots were recorded on it yet. + MarkBit mark_bit = Marking::MarkBitFrom(code); + if (Marking::IsWhite(mark_bit)) return; - map_compact.Finish(); + invalidated_code_.Add(code); } } -// Iterate the live objects in a range of addresses (eg, a page or a -// semispace). The live regions of the range have been linked into a list. -// The first live region is [first_live_start, first_live_end), and the last -// address in the range is top. The callback function is used to get the -// size of each live object. -int MarkCompactCollector::IterateLiveObjectsInRange( - Address start, - Address end, - LiveObjectCallback size_func) { - int live_objects_size = 0; - Address current = start; - while (current < end) { - uint32_t encoded_map = Memory::uint32_at(current); - if (encoded_map == kSingleFreeEncoding) { - current += kPointerSize; - } else if (encoded_map == kMultiFreeEncoding) { - current += Memory::int_at(current + kIntSize); - } else { - int size = (this->*size_func)(HeapObject::FromAddress(current)); - current += size; - live_objects_size += size; +bool MarkCompactCollector::MarkInvalidatedCode() { + bool code_marked = false; + + int length = invalidated_code_.length(); + for (int i = 0; i < length; i++) { + Code* code = invalidated_code_[i]; + + if (SetMarkBitsUnderInvalidatedCode(code, true)) { + code_marked = true; } } - return live_objects_size; + + return code_marked; } -int MarkCompactCollector::IterateLiveObjects( - NewSpace* space, LiveObjectCallback size_f) { - ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS); - return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f); +void MarkCompactCollector::RemoveDeadInvalidatedCode() { + int length = invalidated_code_.length(); + for (int i = 0; i < length; i++) { + if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL; + } } -int MarkCompactCollector::IterateLiveObjects( - PagedSpace* space, LiveObjectCallback size_f) { - ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS); - int total = 0; - PageIterator it(space, PageIterator::PAGES_IN_USE); - while (it.has_next()) { - Page* p = it.next(); - total += IterateLiveObjectsInRange(p->ObjectAreaStart(), - p->AllocationTop(), - size_f); +void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) { + int length = invalidated_code_.length(); + for (int i = 0; i < length; i++) { + Code* code = invalidated_code_[i]; + if (code != NULL) { + code->Iterate(visitor); + SetMarkBitsUnderInvalidatedCode(code, false); + } } - return total; + invalidated_code_.Rewind(0); } -// ------------------------------------------------------------------------- -// Phase 3: Update pointers +void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { + bool code_slots_filtering_required = MarkInvalidatedCode(); -// Helper class for updating pointers in HeapObjects. -class UpdatingVisitor: public ObjectVisitor { - public: - explicit UpdatingVisitor(Heap* heap) : heap_(heap) {} + EvacuateNewSpace(); + EvacuatePages(); - void VisitPointer(Object** p) { - UpdatePointer(p); - } + // Second pass: find pointers to new space and update them. + PointersUpdatingVisitor updating_visitor(heap()); - void VisitPointers(Object** start, Object** end) { - // Mark all HeapObject pointers in [start, end) - for (Object** p = start; p < end; p++) UpdatePointer(p); + // Update pointers in to space. + SemiSpaceIterator to_it(heap()->new_space()->bottom(), + heap()->new_space()->top()); + for (HeapObject* object = to_it.Next(); + object != NULL; + object = to_it.Next()) { + Map* map = object->map(); + object->IterateBody(map->instance_type(), + object->SizeFromMap(map), + &updating_visitor); } - void VisitCodeTarget(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); - Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); - VisitPointer(&target); - rinfo->set_target_address( - reinterpret_cast<Code*>(target)->instruction_start()); - } + // Update roots. + heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); + LiveObjectList::IterateElements(&updating_visitor); - void VisitDebugTarget(RelocInfo* rinfo) { - ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && - rinfo->IsPatchedReturnSequence()) || - (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && - rinfo->IsPatchedDebugBreakSlotSequence())); - Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); - VisitPointer(&target); - rinfo->set_call_address( - reinterpret_cast<Code*>(target)->instruction_start()); + { + StoreBufferRebuildScope scope(heap_, + heap_->store_buffer(), + &Heap::ScavengeStoreBufferCallback); + heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); + } + + SlotsBuffer::UpdateSlotsRecordedIn(heap_, + migration_slots_buffer_, + code_slots_filtering_required); + if (FLAG_trace_fragmentation) { + PrintF(" migration slots buffer: %d\n", + SlotsBuffer::SizeOfChain(migration_slots_buffer_)); + } + + if (compacting_ && was_marked_incrementally_) { + // It's difficult to filter out slots recorded for large objects. + LargeObjectIterator it(heap_->lo_space()); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + // LargeObjectSpace is not swept yet thus we have to skip + // dead objects explicitly. + if (!IsMarked(obj)) continue; + + Page* p = Page::FromAddress(obj->address()); + if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { + obj->Iterate(&updating_visitor); + p->ClearFlag(Page::RESCAN_ON_EVACUATION); + } + } } - inline Heap* heap() const { return heap_; } + int npages = evacuation_candidates_.length(); + for (int i = 0; i < npages; i++) { + Page* p = evacuation_candidates_[i]; + ASSERT(p->IsEvacuationCandidate() || + p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); - private: - void UpdatePointer(Object** p) { - if (!(*p)->IsHeapObject()) return; - - HeapObject* obj = HeapObject::cast(*p); - Address old_addr = obj->address(); - Address new_addr; - ASSERT(!heap()->InFromSpace(obj)); - - if (heap()->new_space()->Contains(obj)) { - Address forwarding_pointer_addr = - heap()->new_space()->FromSpaceLow() + - heap()->new_space()->ToSpaceOffsetForAddress(old_addr); - new_addr = Memory::Address_at(forwarding_pointer_addr); - -#ifdef DEBUG - ASSERT(heap()->old_pointer_space()->Contains(new_addr) || - heap()->old_data_space()->Contains(new_addr) || - heap()->new_space()->FromSpaceContains(new_addr) || - heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr))); - - if (heap()->new_space()->FromSpaceContains(new_addr)) { - ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <= - heap()->new_space()->ToSpaceOffsetForAddress(old_addr)); + if (p->IsEvacuationCandidate()) { + SlotsBuffer::UpdateSlotsRecordedIn(heap_, + p->slots_buffer(), + code_slots_filtering_required); + if (FLAG_trace_fragmentation) { + PrintF(" page %p slots buffer: %d\n", + reinterpret_cast<void*>(p), + SlotsBuffer::SizeOfChain(p->slots_buffer())); } -#endif - - } else if (heap()->lo_space()->Contains(obj)) { - // Don't move objects in the large object space. - return; + // Important: skip list should be cleared only after roots were updated + // because root iteration traverses the stack and might have to find code + // objects from non-updated pc pointing into evacuation candidate. + SkipList* list = p->skip_list(); + if (list != NULL) list->Clear(); } else { -#ifdef DEBUG - PagedSpaces spaces; - PagedSpace* original_space = spaces.next(); - while (original_space != NULL) { - if (original_space->Contains(obj)) break; - original_space = spaces.next(); + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", + reinterpret_cast<intptr_t>(p)); + } + PagedSpace* space = static_cast<PagedSpace*>(p->owner()); + p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); + + switch (space->identity()) { + case OLD_DATA_SPACE: + SweepConservatively(space, p); + break; + case OLD_POINTER_SPACE: + SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>( + space, p, &updating_visitor); + break; + case CODE_SPACE: + SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>( + space, p, &updating_visitor); + break; + default: + UNREACHABLE(); + break; } - ASSERT(original_space != NULL); -#endif - new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj); - ASSERT(original_space->Contains(new_addr)); - ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <= - original_space->MCSpaceOffsetForAddress(old_addr)); } + } - *p = HeapObject::FromAddress(new_addr); - -#ifdef DEBUG - if (FLAG_gc_verbose) { - PrintF("update %p : %p -> %p\n", - reinterpret_cast<Address>(p), old_addr, new_addr); + // Update pointers from cells. + HeapObjectIterator cell_iterator(heap_->cell_space()); + for (HeapObject* cell = cell_iterator.Next(); + cell != NULL; + cell = cell_iterator.Next()) { + if (cell->IsJSGlobalPropertyCell()) { + Address value_address = + reinterpret_cast<Address>(cell) + + (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); + updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); } -#endif } - Heap* heap_; -}; + // Update pointer from the global contexts list. + updating_visitor.VisitPointer(heap_->global_contexts_list_address()); + heap_->symbol_table()->Iterate(&updating_visitor); -void MarkCompactCollector::UpdatePointers() { -#ifdef DEBUG - ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); - state_ = UPDATE_POINTERS; -#endif - UpdatingVisitor updating_visitor(heap()); + // Update pointers from external string table. + heap_->UpdateReferencesInExternalStringTable( + &UpdateReferenceInExternalStringTableEntry); + + // Update JSFunction pointers from the runtime profiler. heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( &updating_visitor); - heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG); - heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor); - // Update the pointer to the head of the weak list of global contexts. - updating_visitor.VisitPointer(&heap()->global_contexts_list_); + EvacuationWeakObjectRetainer evacuation_object_retainer; + heap()->ProcessWeakReferences(&evacuation_object_retainer); - LiveObjectList::IterateElements(&updating_visitor); + // Visit invalidated code (we ignored all slots on it) and clear mark-bits + // under it. + ProcessInvalidatedCode(&updating_visitor); - int live_maps_size = IterateLiveObjects( - heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject); - int live_pointer_olds_size = IterateLiveObjects( - heap()->old_pointer_space(), - &MarkCompactCollector::UpdatePointersInOldObject); - int live_data_olds_size = IterateLiveObjects( - heap()->old_data_space(), - &MarkCompactCollector::UpdatePointersInOldObject); - int live_codes_size = IterateLiveObjects( - heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject); - int live_cells_size = IterateLiveObjects( - heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject); - int live_news_size = IterateLiveObjects( - heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject); - - // Large objects do not move, the map word can be updated directly. - LargeObjectIterator it(heap()->lo_space()); - for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { - UpdatePointersInNewObject(obj); - } - - USE(live_maps_size); - USE(live_pointer_olds_size); - USE(live_data_olds_size); - USE(live_codes_size); - USE(live_cells_size); - USE(live_news_size); - ASSERT(live_maps_size == live_map_objects_size_); - ASSERT(live_data_olds_size == live_old_data_objects_size_); - ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); - ASSERT(live_codes_size == live_code_objects_size_); - ASSERT(live_cells_size == live_cell_objects_size_); - ASSERT(live_news_size == live_young_objects_size_); -} - - -int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) { - // Keep old map pointers - Map* old_map = obj->map(); - ASSERT(old_map->IsHeapObject()); - - Address forwarded = GetForwardingAddressInOldSpace(old_map); - - ASSERT(heap()->map_space()->Contains(old_map)); - ASSERT(heap()->map_space()->Contains(forwarded)); #ifdef DEBUG - if (FLAG_gc_verbose) { - PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(), - forwarded); + if (FLAG_verify_heap) { + VerifyEvacuation(heap_); } #endif - // Update the map pointer. - obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded))); - // We have to compute the object size relying on the old map because - // map objects are not relocated yet. - int obj_size = obj->SizeFromMap(old_map); - - // Update pointers in the object body. - UpdatingVisitor updating_visitor(heap()); - obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor); - return obj_size; + slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); + ASSERT(migration_slots_buffer_ == NULL); + for (int i = 0; i < npages; i++) { + Page* p = evacuation_candidates_[i]; + if (!p->IsEvacuationCandidate()) continue; + PagedSpace* space = static_cast<PagedSpace*>(p->owner()); + space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize); + p->set_scan_on_scavenge(false); + slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); + p->ClearEvacuationCandidate(); + } + evacuation_candidates_.Rewind(0); + compacting_ = false; } -int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) { - // Decode the map pointer. - MapWord encoding = obj->map_word(); - Address map_addr = encoding.DecodeMapAddress(heap()->map_space()); - ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr))); - - // At this point, the first word of map_addr is also encoded, cannot - // cast it to Map* using Map::cast. - Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)); - int obj_size = obj->SizeFromMap(map); - InstanceType type = map->instance_type(); - - // Update map pointer. - Address new_map_addr = GetForwardingAddressInOldSpace(map); - int offset = encoding.DecodeOffset(); - obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset)); +static const int kStartTableEntriesPerLine = 5; +static const int kStartTableLines = 171; +static const int kStartTableInvalidLine = 127; +static const int kStartTableUnusedEntry = 126; -#ifdef DEBUG - if (FLAG_gc_verbose) { - PrintF("update %p : %p -> %p\n", obj->address(), - map_addr, new_map_addr); +#define _ kStartTableUnusedEntry +#define X kStartTableInvalidLine +// Mark-bit to object start offset table. +// +// The line is indexed by the mark bits in a byte. The first number on +// the line describes the number of live object starts for the line and the +// other numbers on the line describe the offsets (in words) of the object +// starts. +// +// Since objects are at least 2 words large we don't have entries for two +// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits. +char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = { + 0, _, _, _, _, // 0 + 1, 0, _, _, _, // 1 + 1, 1, _, _, _, // 2 + X, _, _, _, _, // 3 + 1, 2, _, _, _, // 4 + 2, 0, 2, _, _, // 5 + X, _, _, _, _, // 6 + X, _, _, _, _, // 7 + 1, 3, _, _, _, // 8 + 2, 0, 3, _, _, // 9 + 2, 1, 3, _, _, // 10 + X, _, _, _, _, // 11 + X, _, _, _, _, // 12 + X, _, _, _, _, // 13 + X, _, _, _, _, // 14 + X, _, _, _, _, // 15 + 1, 4, _, _, _, // 16 + 2, 0, 4, _, _, // 17 + 2, 1, 4, _, _, // 18 + X, _, _, _, _, // 19 + 2, 2, 4, _, _, // 20 + 3, 0, 2, 4, _, // 21 + X, _, _, _, _, // 22 + X, _, _, _, _, // 23 + X, _, _, _, _, // 24 + X, _, _, _, _, // 25 + X, _, _, _, _, // 26 + X, _, _, _, _, // 27 + X, _, _, _, _, // 28 + X, _, _, _, _, // 29 + X, _, _, _, _, // 30 + X, _, _, _, _, // 31 + 1, 5, _, _, _, // 32 + 2, 0, 5, _, _, // 33 + 2, 1, 5, _, _, // 34 + X, _, _, _, _, // 35 + 2, 2, 5, _, _, // 36 + 3, 0, 2, 5, _, // 37 + X, _, _, _, _, // 38 + X, _, _, _, _, // 39 + 2, 3, 5, _, _, // 40 + 3, 0, 3, 5, _, // 41 + 3, 1, 3, 5, _, // 42 + X, _, _, _, _, // 43 + X, _, _, _, _, // 44 + X, _, _, _, _, // 45 + X, _, _, _, _, // 46 + X, _, _, _, _, // 47 + X, _, _, _, _, // 48 + X, _, _, _, _, // 49 + X, _, _, _, _, // 50 + X, _, _, _, _, // 51 + X, _, _, _, _, // 52 + X, _, _, _, _, // 53 + X, _, _, _, _, // 54 + X, _, _, _, _, // 55 + X, _, _, _, _, // 56 + X, _, _, _, _, // 57 + X, _, _, _, _, // 58 + X, _, _, _, _, // 59 + X, _, _, _, _, // 60 + X, _, _, _, _, // 61 + X, _, _, _, _, // 62 + X, _, _, _, _, // 63 + 1, 6, _, _, _, // 64 + 2, 0, 6, _, _, // 65 + 2, 1, 6, _, _, // 66 + X, _, _, _, _, // 67 + 2, 2, 6, _, _, // 68 + 3, 0, 2, 6, _, // 69 + X, _, _, _, _, // 70 + X, _, _, _, _, // 71 + 2, 3, 6, _, _, // 72 + 3, 0, 3, 6, _, // 73 + 3, 1, 3, 6, _, // 74 + X, _, _, _, _, // 75 + X, _, _, _, _, // 76 + X, _, _, _, _, // 77 + X, _, _, _, _, // 78 + X, _, _, _, _, // 79 + 2, 4, 6, _, _, // 80 + 3, 0, 4, 6, _, // 81 + 3, 1, 4, 6, _, // 82 + X, _, _, _, _, // 83 + 3, 2, 4, 6, _, // 84 + 4, 0, 2, 4, 6, // 85 + X, _, _, _, _, // 86 + X, _, _, _, _, // 87 + X, _, _, _, _, // 88 + X, _, _, _, _, // 89 + X, _, _, _, _, // 90 + X, _, _, _, _, // 91 + X, _, _, _, _, // 92 + X, _, _, _, _, // 93 + X, _, _, _, _, // 94 + X, _, _, _, _, // 95 + X, _, _, _, _, // 96 + X, _, _, _, _, // 97 + X, _, _, _, _, // 98 + X, _, _, _, _, // 99 + X, _, _, _, _, // 100 + X, _, _, _, _, // 101 + X, _, _, _, _, // 102 + X, _, _, _, _, // 103 + X, _, _, _, _, // 104 + X, _, _, _, _, // 105 + X, _, _, _, _, // 106 + X, _, _, _, _, // 107 + X, _, _, _, _, // 108 + X, _, _, _, _, // 109 + X, _, _, _, _, // 110 + X, _, _, _, _, // 111 + X, _, _, _, _, // 112 + X, _, _, _, _, // 113 + X, _, _, _, _, // 114 + X, _, _, _, _, // 115 + X, _, _, _, _, // 116 + X, _, _, _, _, // 117 + X, _, _, _, _, // 118 + X, _, _, _, _, // 119 + X, _, _, _, _, // 120 + X, _, _, _, _, // 121 + X, _, _, _, _, // 122 + X, _, _, _, _, // 123 + X, _, _, _, _, // 124 + X, _, _, _, _, // 125 + X, _, _, _, _, // 126 + X, _, _, _, _, // 127 + 1, 7, _, _, _, // 128 + 2, 0, 7, _, _, // 129 + 2, 1, 7, _, _, // 130 + X, _, _, _, _, // 131 + 2, 2, 7, _, _, // 132 + 3, 0, 2, 7, _, // 133 + X, _, _, _, _, // 134 + X, _, _, _, _, // 135 + 2, 3, 7, _, _, // 136 + 3, 0, 3, 7, _, // 137 + 3, 1, 3, 7, _, // 138 + X, _, _, _, _, // 139 + X, _, _, _, _, // 140 + X, _, _, _, _, // 141 + X, _, _, _, _, // 142 + X, _, _, _, _, // 143 + 2, 4, 7, _, _, // 144 + 3, 0, 4, 7, _, // 145 + 3, 1, 4, 7, _, // 146 + X, _, _, _, _, // 147 + 3, 2, 4, 7, _, // 148 + 4, 0, 2, 4, 7, // 149 + X, _, _, _, _, // 150 + X, _, _, _, _, // 151 + X, _, _, _, _, // 152 + X, _, _, _, _, // 153 + X, _, _, _, _, // 154 + X, _, _, _, _, // 155 + X, _, _, _, _, // 156 + X, _, _, _, _, // 157 + X, _, _, _, _, // 158 + X, _, _, _, _, // 159 + 2, 5, 7, _, _, // 160 + 3, 0, 5, 7, _, // 161 + 3, 1, 5, 7, _, // 162 + X, _, _, _, _, // 163 + 3, 2, 5, 7, _, // 164 + 4, 0, 2, 5, 7, // 165 + X, _, _, _, _, // 166 + X, _, _, _, _, // 167 + 3, 3, 5, 7, _, // 168 + 4, 0, 3, 5, 7, // 169 + 4, 1, 3, 5, 7 // 170 +}; +#undef _ +#undef X + + +// Takes a word of mark bits. Returns the number of objects that start in the +// range. Puts the offsets of the words in the supplied array. +static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) { + int objects = 0; + int offset = 0; + + // No consecutive 1 bits. + ASSERT((mark_bits & 0x180) != 0x180); + ASSERT((mark_bits & 0x18000) != 0x18000); + ASSERT((mark_bits & 0x1800000) != 0x1800000); + + while (mark_bits != 0) { + int byte = (mark_bits & 0xff); + mark_bits >>= 8; + if (byte != 0) { + ASSERT(byte < kStartTableLines); // No consecutive 1 bits. + char* table = kStartTable + byte * kStartTableEntriesPerLine; + int objects_in_these_8_words = table[0]; + ASSERT(objects_in_these_8_words != kStartTableInvalidLine); + ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine); + for (int i = 0; i < objects_in_these_8_words; i++) { + starts[objects++] = offset + table[1 + i]; + } + } + offset += 8; } -#endif - - // Update pointers in the object body. - UpdatingVisitor updating_visitor(heap()); - obj->IterateBody(type, obj_size, &updating_visitor); - return obj_size; + return objects; } -Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) { - // Object should either in old or map space. - MapWord encoding = obj->map_word(); - - // Offset to the first live object's forwarding address. - int offset = encoding.DecodeOffset(); - Address obj_addr = obj->address(); - - // Find the first live object's forwarding address. - Page* p = Page::FromAddress(obj_addr); - Address first_forwarded = p->mc_first_forwarded; - - // Page start address of forwarded address. - Page* forwarded_page = Page::FromAddress(first_forwarded); - int forwarded_offset = forwarded_page->Offset(first_forwarded); +static inline Address DigestFreeStart(Address approximate_free_start, + uint32_t free_start_cell) { + ASSERT(free_start_cell != 0); - // Find end of allocation in the page of first_forwarded. - int mc_top_offset = forwarded_page->AllocationWatermarkOffset(); + // No consecutive 1 bits. + ASSERT((free_start_cell & (free_start_cell << 1)) == 0); - // Check if current object's forward pointer is in the same page - // as the first live object's forwarding pointer - if (forwarded_offset + offset < mc_top_offset) { - // In the same page. - return first_forwarded + offset; + int offsets[16]; + uint32_t cell = free_start_cell; + int offset_of_last_live; + if ((cell & 0x80000000u) != 0) { + // This case would overflow below. + offset_of_last_live = 31; + } else { + // Remove all but one bit, the most significant. This is an optimization + // that may or may not be worthwhile. + cell |= cell >> 16; + cell |= cell >> 8; + cell |= cell >> 4; + cell |= cell >> 2; + cell |= cell >> 1; + cell = (cell + 1) >> 1; + int live_objects = MarkWordToObjectStarts(cell, offsets); + ASSERT(live_objects == 1); + offset_of_last_live = offsets[live_objects - 1]; + } + Address last_live_start = + approximate_free_start + offset_of_last_live * kPointerSize; + HeapObject* last_live = HeapObject::FromAddress(last_live_start); + Address free_start = last_live_start + last_live->Size(); + return free_start; +} + + +static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { + ASSERT(cell != 0); + + // No consecutive 1 bits. + ASSERT((cell & (cell << 1)) == 0); + + int offsets[16]; + if (cell == 0x80000000u) { // Avoid overflow below. + return block_address + 31 * kPointerSize; + } + uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; + ASSERT((first_set_bit & cell) == first_set_bit); + int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); + ASSERT(live_objects == 1); + USE(live_objects); + return block_address + offsets[0] * kPointerSize; +} + + +// Sweeps a space conservatively. After this has been done the larger free +// spaces have been put on the free list and the smaller ones have been +// ignored and left untouched. A free space is always either ignored or put +// on the free list, never split up into two parts. This is important +// because it means that any FreeSpace maps left actually describe a region of +// memory that can be ignored when scanning. Dead objects other than free +// spaces will not contain the free space map. +intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { + ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); + MarkBit::CellType* cells = p->markbits()->cells(); + p->MarkSweptConservatively(); + + int last_cell_index = + Bitmap::IndexToCell( + Bitmap::CellAlignIndex( + p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); + + int cell_index = Page::kFirstUsedCell; + intptr_t freed_bytes = 0; + + // This is the start of the 32 word block that we are currently looking at. + Address block_address = p->ObjectAreaStart(); + + // Skip over all the dead objects at the start of the page and mark them free. + for (cell_index = Page::kFirstUsedCell; + cell_index < last_cell_index; + cell_index++, block_address += 32 * kPointerSize) { + if (cells[cell_index] != 0) break; + } + size_t size = block_address - p->ObjectAreaStart(); + if (cell_index == last_cell_index) { + freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(), + static_cast<int>(size))); + ASSERT_EQ(0, p->LiveBytes()); + return freed_bytes; + } + // Grow the size of the start-of-page free space a little to get up to the + // first live object. + Address free_end = StartOfLiveObject(block_address, cells[cell_index]); + // Free the first free space. + size = free_end - p->ObjectAreaStart(); + freed_bytes += space->Free(p->ObjectAreaStart(), + static_cast<int>(size)); + // The start of the current free area is represented in undigested form by + // the address of the last 32-word section that contained a live object and + // the marking bitmap for that cell, which describes where the live object + // started. Unless we find a large free space in the bitmap we will not + // digest this pair into a real address. We start the iteration here at the + // first word in the marking bit map that indicates a live object. + Address free_start = block_address; + uint32_t free_start_cell = cells[cell_index]; + + for ( ; + cell_index < last_cell_index; + cell_index++, block_address += 32 * kPointerSize) { + ASSERT((unsigned)cell_index == + Bitmap::IndexToCell( + Bitmap::CellAlignIndex( + p->AddressToMarkbitIndex(block_address)))); + uint32_t cell = cells[cell_index]; + if (cell != 0) { + // We have a live object. Check approximately whether it is more than 32 + // words since the last live object. + if (block_address - free_start > 32 * kPointerSize) { + free_start = DigestFreeStart(free_start, free_start_cell); + if (block_address - free_start > 32 * kPointerSize) { + // Now that we know the exact start of the free space it still looks + // like we have a large enough free space to be worth bothering with. + // so now we need to find the start of the first live object at the + // end of the free space. + free_end = StartOfLiveObject(block_address, cell); + freed_bytes += space->Free(free_start, + static_cast<int>(free_end - free_start)); + } + } + // Update our undigested record of where the current free area started. + free_start = block_address; + free_start_cell = cell; + // Clear marking bits for current cell. + cells[cell_index] = 0; + } } - // Must be in the next page, NOTE: this may cross chunks. - Page* next_page = forwarded_page->next_page(); - ASSERT(next_page->is_valid()); - - offset -= (mc_top_offset - forwarded_offset); - offset += Page::kObjectStartOffset; - - ASSERT_PAGE_OFFSET(offset); - ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop()); + // Handle the free space at the end of the page. + if (block_address - free_start > 32 * kPointerSize) { + free_start = DigestFreeStart(free_start, free_start_cell); + freed_bytes += space->Free(free_start, + static_cast<int>(block_address - free_start)); + } - return next_page->OffsetToAddress(offset); + p->ResetLiveBytes(); + return freed_bytes; } -// ------------------------------------------------------------------------- -// Phase 4: Relocate objects +void MarkCompactCollector::SweepSpace(PagedSpace* space, + SweeperType sweeper) { + space->set_was_swept_conservatively(sweeper == CONSERVATIVE || + sweeper == LAZY_CONSERVATIVE); -void MarkCompactCollector::RelocateObjects() { -#ifdef DEBUG - ASSERT(state_ == UPDATE_POINTERS); - state_ = RELOCATE_OBJECTS; -#endif - // Relocates objects, always relocate map objects first. Relocating - // objects in other space relies on map objects to get object size. - int live_maps_size = IterateLiveObjects( - heap()->map_space(), &MarkCompactCollector::RelocateMapObject); - int live_pointer_olds_size = IterateLiveObjects( - heap()->old_pointer_space(), - &MarkCompactCollector::RelocateOldPointerObject); - int live_data_olds_size = IterateLiveObjects( - heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject); - int live_codes_size = IterateLiveObjects( - heap()->code_space(), &MarkCompactCollector::RelocateCodeObject); - int live_cells_size = IterateLiveObjects( - heap()->cell_space(), &MarkCompactCollector::RelocateCellObject); - int live_news_size = IterateLiveObjects( - heap()->new_space(), &MarkCompactCollector::RelocateNewObject); - - USE(live_maps_size); - USE(live_pointer_olds_size); - USE(live_data_olds_size); - USE(live_codes_size); - USE(live_cells_size); - USE(live_news_size); - ASSERT(live_maps_size == live_map_objects_size_); - ASSERT(live_data_olds_size == live_old_data_objects_size_); - ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); - ASSERT(live_codes_size == live_code_objects_size_); - ASSERT(live_cells_size == live_cell_objects_size_); - ASSERT(live_news_size == live_young_objects_size_); - - // Flip from and to spaces - heap()->new_space()->Flip(); - - heap()->new_space()->MCCommitRelocationInfo(); - - // Set age_mark to bottom in to space - Address mark = heap()->new_space()->bottom(); - heap()->new_space()->set_age_mark(mark); + space->ClearStats(); - PagedSpaces spaces; - for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next()) - space->MCCommitRelocationInfo(); + PageIterator it(space); - heap()->CheckNewSpaceExpansionCriteria(); - heap()->IncrementYoungSurvivorsCounter(live_news_size); -} + intptr_t freed_bytes = 0; + intptr_t newspace_size = space->heap()->new_space()->Size(); + bool lazy_sweeping_active = false; + bool unused_page_present = false; + while (it.has_next()) { + Page* p = it.next(); -int MarkCompactCollector::RelocateMapObject(HeapObject* obj) { - // Recover map pointer. - MapWord encoding = obj->map_word(); - Address map_addr = encoding.DecodeMapAddress(heap()->map_space()); - ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr))); + // Clear sweeping flags indicating that marking bits are still intact. + p->ClearSweptPrecisely(); + p->ClearSweptConservatively(); - // Get forwarding address before resetting map pointer - Address new_addr = GetForwardingAddressInOldSpace(obj); + if (p->IsEvacuationCandidate()) { + ASSERT(evacuation_candidates_.length() > 0); + continue; + } - // Reset map pointer. The meta map object may not be copied yet so - // Map::cast does not yet work. - obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr))); + if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { + // Will be processed in EvacuateNewSpaceAndCandidates. + continue; + } - Address old_addr = obj->address(); + if (lazy_sweeping_active) { + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n", + reinterpret_cast<intptr_t>(p)); + } + continue; + } - if (new_addr != old_addr) { - // Move contents. - heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr, - old_addr, - Map::kSize); - } + // One unused page is kept, all further are released before sweeping them. + if (p->LiveBytes() == 0) { + if (unused_page_present) { + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", + reinterpret_cast<intptr_t>(p)); + } + space->ReleasePage(p); + continue; + } + unused_page_present = true; + } -#ifdef DEBUG - if (FLAG_gc_verbose) { - PrintF("relocate %p -> %p\n", old_addr, new_addr); + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " with sweeper %d.\n", + reinterpret_cast<intptr_t>(p), + sweeper); + } + + switch (sweeper) { + case CONSERVATIVE: { + SweepConservatively(space, p); + break; + } + case LAZY_CONSERVATIVE: { + freed_bytes += SweepConservatively(space, p); + if (freed_bytes >= newspace_size && p != space->LastPage()) { + space->SetPagesToSweep(p->next_page(), space->anchor()); + lazy_sweeping_active = true; + } + break; + } + case PRECISE: { + if (space->identity() == CODE_SPACE) { + SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL); + } else { + SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL); + } + break; + } + default: { + UNREACHABLE(); + } + } } -#endif - return Map::kSize; + // Give pages that are queued to be freed back to the OS. + heap()->FreeQueuedChunks(); } -static inline int RestoreMap(HeapObject* obj, - PagedSpace* space, - Address new_addr, - Address map_addr) { - // This must be a non-map object, and the function relies on the - // assumption that the Map space is compacted before the other paged - // spaces (see RelocateObjects). - - // Reset map pointer. - obj->set_map(Map::cast(HeapObject::FromAddress(map_addr))); - - int obj_size = obj->Size(); - ASSERT_OBJECT_SIZE(obj_size); - - ASSERT(space->MCSpaceOffsetForAddress(new_addr) <= - space->MCSpaceOffsetForAddress(obj->address())); - +void MarkCompactCollector::SweepSpaces() { + GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); #ifdef DEBUG - if (FLAG_gc_verbose) { - PrintF("relocate %p -> %p\n", obj->address(), new_addr); - } + state_ = SWEEP_SPACES; #endif + SweeperType how_to_sweep = + FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; + if (sweep_precisely_) how_to_sweep = PRECISE; + // Noncompacting collections simply sweep the spaces to clear the mark + // bits and free the nonlive blocks (for old and map spaces). We sweep + // the map space last because freeing non-live maps overwrites them and + // the other spaces rely on possibly non-live maps to get the sizes for + // non-live objects. + SweepSpace(heap()->old_pointer_space(), how_to_sweep); + SweepSpace(heap()->old_data_space(), how_to_sweep); - return obj_size; -} - - -int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj, - PagedSpace* space) { - // Recover map pointer. - MapWord encoding = obj->map_word(); - Address map_addr = encoding.DecodeMapAddress(heap()->map_space()); - ASSERT(heap()->map_space()->Contains(map_addr)); - - // Get forwarding address before resetting map pointer. - Address new_addr = GetForwardingAddressInOldSpace(obj); - - // Reset the map pointer. - int obj_size = RestoreMap(obj, space, new_addr, map_addr); + RemoveDeadInvalidatedCode(); + SweepSpace(heap()->code_space(), PRECISE); - Address old_addr = obj->address(); + SweepSpace(heap()->cell_space(), PRECISE); - if (new_addr != old_addr) { - // Move contents. - if (space == heap()->old_data_space()) { - heap()->MoveBlock(new_addr, old_addr, obj_size); - } else { - heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr, - old_addr, - obj_size); - } + { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); + EvacuateNewSpaceAndCandidates(); } - ASSERT(!HeapObject::FromAddress(new_addr)->IsCode()); + // ClearNonLiveTransitions depends on precise sweeping of map space to + // detect whether unmarked map became dead in this collection or in one + // of the previous ones. + SweepSpace(heap()->map_space(), PRECISE); - HeapObject* copied_to = HeapObject::FromAddress(new_addr); - if (copied_to->IsSharedFunctionInfo()) { - PROFILE(heap()->isolate(), - SharedFunctionInfoMoveEvent(old_addr, new_addr)); - } - HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr)); + ASSERT(live_map_objects_size_ <= heap()->map_space()->Size()); - return obj_size; + // Deallocate unmarked objects and clear marked bits for marked objects. + heap_->lo_space()->FreeUnmarkedObjects(); } -int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) { - return RelocateOldNonCodeObject(obj, heap()->old_pointer_space()); +void MarkCompactCollector::EnableCodeFlushing(bool enable) { + if (enable) { + if (code_flusher_ != NULL) return; + code_flusher_ = new CodeFlusher(heap()->isolate()); + } else { + if (code_flusher_ == NULL) return; + delete code_flusher_; + code_flusher_ = NULL; + } } -int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) { - return RelocateOldNonCodeObject(obj, heap()->old_data_space()); +// TODO(1466) ReportDeleteIfNeeded is not called currently. +// Our profiling tools do not expect intersections between +// code objects. We should either reenable it or change our tools. +void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, + Isolate* isolate) { +#ifdef ENABLE_GDB_JIT_INTERFACE + if (obj->IsCode()) { + GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj)); + } +#endif + if (obj->IsCode()) { + PROFILE(isolate, CodeDeleteEvent(obj->address())); + } } -int MarkCompactCollector::RelocateCellObject(HeapObject* obj) { - return RelocateOldNonCodeObject(obj, heap()->cell_space()); +void MarkCompactCollector::Initialize() { + StaticMarkingVisitor::Initialize(); } -int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) { - // Recover map pointer. - MapWord encoding = obj->map_word(); - Address map_addr = encoding.DecodeMapAddress(heap()->map_space()); - ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr))); +bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) { + return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES; +} - // Get forwarding address before resetting map pointer - Address new_addr = GetForwardingAddressInOldSpace(obj); - // Reset the map pointer. - int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr); +bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator, + SlotsBuffer** buffer_address, + SlotType type, + Address addr, + AdditionMode mode) { + SlotsBuffer* buffer = *buffer_address; + if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) { + if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { + allocator->DeallocateChain(buffer_address); + return false; + } + buffer = allocator->AllocateBuffer(buffer); + *buffer_address = buffer; + } + ASSERT(buffer->HasSpaceForTypedSlot()); + buffer->Add(reinterpret_cast<ObjectSlot>(type)); + buffer->Add(reinterpret_cast<ObjectSlot>(addr)); + return true; +} - Address old_addr = obj->address(); - if (new_addr != old_addr) { - // Move contents. - heap()->MoveBlock(new_addr, old_addr, obj_size); +static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { + if (RelocInfo::IsCodeTarget(rmode)) { + return SlotsBuffer::CODE_TARGET_SLOT; + } else if (RelocInfo::IsEmbeddedObject(rmode)) { + return SlotsBuffer::EMBEDDED_OBJECT_SLOT; + } else if (RelocInfo::IsDebugBreakSlot(rmode)) { + return SlotsBuffer::DEBUG_TARGET_SLOT; + } else if (RelocInfo::IsJSReturn(rmode)) { + return SlotsBuffer::JS_RETURN_SLOT; } + UNREACHABLE(); + return SlotsBuffer::NUMBER_OF_SLOT_TYPES; +} - HeapObject* copied_to = HeapObject::FromAddress(new_addr); - if (copied_to->IsCode()) { - // May also update inline cache target. - Code::cast(copied_to)->Relocate(new_addr - old_addr); - // Notify the logger that compiled code has moved. - PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr)); - } - HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr)); - return obj_size; +void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) { + Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); + if (target_page->IsEvacuationCandidate() && + (rinfo->host() == NULL || + !ShouldSkipEvacuationSlotRecording(rinfo->host()))) { + if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, + target_page->slots_buffer_address(), + SlotTypeForRMode(rinfo->rmode()), + rinfo->pc(), + SlotsBuffer::FAIL_ON_OVERFLOW)) { + EvictEvacuationCandidate(target_page); + } + } } -int MarkCompactCollector::RelocateNewObject(HeapObject* obj) { - int obj_size = obj->Size(); - - // Get forwarding address - Address old_addr = obj->address(); - int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr); +void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) { + Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); + if (target_page->IsEvacuationCandidate() && + !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) { + if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, + target_page->slots_buffer_address(), + SlotsBuffer::CODE_ENTRY_SLOT, + slot, + SlotsBuffer::FAIL_ON_OVERFLOW)) { + EvictEvacuationCandidate(target_page); + } + } +} - Address new_addr = - Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset); -#ifdef DEBUG - if (heap()->new_space()->FromSpaceContains(new_addr)) { - ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <= - heap()->new_space()->ToSpaceOffsetForAddress(old_addr)); - } else { - ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() || - heap()->TargetSpace(obj) == heap()->old_data_space()); - } -#endif +static inline SlotsBuffer::SlotType DecodeSlotType( + SlotsBuffer::ObjectSlot slot) { + return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot)); +} - // New and old addresses cannot overlap. - if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) { - heap()->CopyBlock(new_addr, old_addr, obj_size); - } else { - heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr, - old_addr, - obj_size); - } -#ifdef DEBUG - if (FLAG_gc_verbose) { - PrintF("relocate %p -> %p\n", old_addr, new_addr); - } -#endif +void SlotsBuffer::UpdateSlots(Heap* heap) { + PointersUpdatingVisitor v(heap); - HeapObject* copied_to = HeapObject::FromAddress(new_addr); - if (copied_to->IsSharedFunctionInfo()) { - PROFILE(heap()->isolate(), - SharedFunctionInfoMoveEvent(old_addr, new_addr)); + for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { + ObjectSlot slot = slots_[slot_idx]; + if (!IsTypedSlot(slot)) { + PointersUpdatingVisitor::UpdateSlot(heap, slot); + } else { + ++slot_idx; + ASSERT(slot_idx < idx_); + UpdateSlot(&v, + DecodeSlotType(slot), + reinterpret_cast<Address>(slots_[slot_idx])); + } } - HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr)); - - return obj_size; } -void MarkCompactCollector::EnableCodeFlushing(bool enable) { - if (enable) { - if (code_flusher_ != NULL) return; - code_flusher_ = new CodeFlusher(heap()->isolate()); - } else { - if (code_flusher_ == NULL) return; - delete code_flusher_; - code_flusher_ = NULL; +void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) { + PointersUpdatingVisitor v(heap); + + for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { + ObjectSlot slot = slots_[slot_idx]; + if (!IsTypedSlot(slot)) { + if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) { + PointersUpdatingVisitor::UpdateSlot(heap, slot); + } + } else { + ++slot_idx; + ASSERT(slot_idx < idx_); + Address pc = reinterpret_cast<Address>(slots_[slot_idx]); + if (!IsOnInvalidatedCodeObject(pc)) { + UpdateSlot(&v, + DecodeSlotType(slot), + reinterpret_cast<Address>(slots_[slot_idx])); + } + } } } -void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, - Isolate* isolate) { -#ifdef ENABLE_GDB_JIT_INTERFACE - if (obj->IsCode()) { - GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj)); - } -#endif - if (obj->IsCode()) { - PROFILE(isolate, CodeDeleteEvent(obj->address())); - } +SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { + return new SlotsBuffer(next_buffer); } -int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) { - MapWord map_word = obj->map_word(); - map_word.ClearMark(); - return obj->SizeFromMap(map_word.ToMap()); +void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) { + delete buffer; } -void MarkCompactCollector::Initialize() { - StaticPointersToNewGenUpdatingVisitor::Initialize(); - StaticMarkingVisitor::Initialize(); +void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) { + SlotsBuffer* buffer = *buffer_address; + while (buffer != NULL) { + SlotsBuffer* next_buffer = buffer->next(); + DeallocateBuffer(buffer); + buffer = next_buffer; + } + *buffer_address = NULL; } diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h index 9b67c8aff..d54d82249 100644 --- a/deps/v8/src/mark-compact.h +++ b/deps/v8/src/mark-compact.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -28,6 +28,7 @@ #ifndef V8_MARK_COMPACT_H_ #define V8_MARK_COMPACT_H_ +#include "compiler-intrinsics.h" #include "spaces.h" namespace v8 { @@ -45,62 +46,365 @@ class MarkingVisitor; class RootMarkingVisitor; +class Marking { + public: + explicit Marking(Heap* heap) + : heap_(heap) { + } + + static inline MarkBit MarkBitFrom(Address addr); + + static inline MarkBit MarkBitFrom(HeapObject* obj) { + return MarkBitFrom(reinterpret_cast<Address>(obj)); + } + + // Impossible markbits: 01 + static const char* kImpossibleBitPattern; + static inline bool IsImpossible(MarkBit mark_bit) { + ASSERT(strcmp(kImpossibleBitPattern, "01") == 0); + return !mark_bit.Get() && mark_bit.Next().Get(); + } + + // Black markbits: 10 - this is required by the sweeper. + static const char* kBlackBitPattern; + static inline bool IsBlack(MarkBit mark_bit) { + ASSERT(strcmp(kBlackBitPattern, "10") == 0); + ASSERT(!IsImpossible(mark_bit)); + return mark_bit.Get() && !mark_bit.Next().Get(); + } + + // White markbits: 00 - this is required by the mark bit clearer. + static const char* kWhiteBitPattern; + static inline bool IsWhite(MarkBit mark_bit) { + ASSERT(strcmp(kWhiteBitPattern, "00") == 0); + ASSERT(!IsImpossible(mark_bit)); + return !mark_bit.Get(); + } + + // Grey markbits: 11 + static const char* kGreyBitPattern; + static inline bool IsGrey(MarkBit mark_bit) { + ASSERT(strcmp(kGreyBitPattern, "11") == 0); + ASSERT(!IsImpossible(mark_bit)); + return mark_bit.Get() && mark_bit.Next().Get(); + } + + static inline void MarkBlack(MarkBit mark_bit) { + mark_bit.Set(); + mark_bit.Next().Clear(); + ASSERT(Marking::IsBlack(mark_bit)); + } + + static inline void BlackToGrey(MarkBit markbit) { + ASSERT(IsBlack(markbit)); + markbit.Next().Set(); + ASSERT(IsGrey(markbit)); + } + + static inline void WhiteToGrey(MarkBit markbit) { + ASSERT(IsWhite(markbit)); + markbit.Set(); + markbit.Next().Set(); + ASSERT(IsGrey(markbit)); + } + + static inline void GreyToBlack(MarkBit markbit) { + ASSERT(IsGrey(markbit)); + markbit.Next().Clear(); + ASSERT(IsBlack(markbit)); + } + + static inline void BlackToGrey(HeapObject* obj) { + ASSERT(obj->Size() >= 2 * kPointerSize); + BlackToGrey(MarkBitFrom(obj)); + } + + static inline void AnyToGrey(MarkBit markbit) { + markbit.Set(); + markbit.Next().Set(); + ASSERT(IsGrey(markbit)); + } + + // Returns true if the the object whose mark is transferred is marked black. + bool TransferMark(Address old_start, Address new_start); + +#ifdef DEBUG + enum ObjectColor { + BLACK_OBJECT, + WHITE_OBJECT, + GREY_OBJECT, + IMPOSSIBLE_COLOR + }; + + static const char* ColorName(ObjectColor color) { + switch (color) { + case BLACK_OBJECT: return "black"; + case WHITE_OBJECT: return "white"; + case GREY_OBJECT: return "grey"; + case IMPOSSIBLE_COLOR: return "impossible"; + } + return "error"; + } + + static ObjectColor Color(HeapObject* obj) { + return Color(Marking::MarkBitFrom(obj)); + } + + static ObjectColor Color(MarkBit mark_bit) { + if (IsBlack(mark_bit)) return BLACK_OBJECT; + if (IsWhite(mark_bit)) return WHITE_OBJECT; + if (IsGrey(mark_bit)) return GREY_OBJECT; + UNREACHABLE(); + return IMPOSSIBLE_COLOR; + } +#endif + + // Returns true if the transferred color is black. + INLINE(static bool TransferColor(HeapObject* from, + HeapObject* to)) { + MarkBit from_mark_bit = MarkBitFrom(from); + MarkBit to_mark_bit = MarkBitFrom(to); + bool is_black = false; + if (from_mark_bit.Get()) { + to_mark_bit.Set(); + is_black = true; // Looks black so far. + } + if (from_mark_bit.Next().Get()) { + to_mark_bit.Next().Set(); + is_black = false; // Was actually gray. + } + ASSERT(Color(from) == Color(to)); + ASSERT(is_black == (Color(to) == BLACK_OBJECT)); + return is_black; + } + + private: + Heap* heap_; +}; + // ---------------------------------------------------------------------------- -// Marking stack for tracing live objects. +// Marking deque for tracing live objects. -class MarkingStack { +class MarkingDeque { public: - MarkingStack() : low_(NULL), top_(NULL), high_(NULL), overflowed_(false) { } + MarkingDeque() + : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { } void Initialize(Address low, Address high) { - top_ = low_ = reinterpret_cast<HeapObject**>(low); - high_ = reinterpret_cast<HeapObject**>(high); + HeapObject** obj_low = reinterpret_cast<HeapObject**>(low); + HeapObject** obj_high = reinterpret_cast<HeapObject**>(high); + array_ = obj_low; + mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1; + top_ = bottom_ = 0; overflowed_ = false; } - bool is_full() const { return top_ >= high_; } + inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; } - bool is_empty() const { return top_ <= low_; } + inline bool IsEmpty() { return top_ == bottom_; } bool overflowed() const { return overflowed_; } - void clear_overflowed() { overflowed_ = false; } + void ClearOverflowed() { overflowed_ = false; } + + void SetOverflowed() { overflowed_ = true; } // Push the (marked) object on the marking stack if there is room, // otherwise mark the object as overflowed and wait for a rescan of the // heap. - void Push(HeapObject* object) { - CHECK(object->IsHeapObject()); - if (is_full()) { - object->SetOverflow(); - overflowed_ = true; + inline void PushBlack(HeapObject* object) { + ASSERT(object->IsHeapObject()); + if (IsFull()) { + Marking::BlackToGrey(object); + MemoryChunk::IncrementLiveBytes(object->address(), -object->Size()); + SetOverflowed(); } else { - *(top_++) = object; + array_[top_] = object; + top_ = ((top_ + 1) & mask_); } } - HeapObject* Pop() { - ASSERT(!is_empty()); - HeapObject* object = *(--top_); - CHECK(object->IsHeapObject()); + inline void PushGrey(HeapObject* object) { + ASSERT(object->IsHeapObject()); + if (IsFull()) { + ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object))); + SetOverflowed(); + } else { + array_[top_] = object; + top_ = ((top_ + 1) & mask_); + } + } + + inline HeapObject* Pop() { + ASSERT(!IsEmpty()); + top_ = ((top_ - 1) & mask_); + HeapObject* object = array_[top_]; + ASSERT(object->IsHeapObject()); return object; } + inline void UnshiftGrey(HeapObject* object) { + ASSERT(object->IsHeapObject()); + if (IsFull()) { + ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object))); + SetOverflowed(); + } else { + bottom_ = ((bottom_ - 1) & mask_); + array_[bottom_] = object; + } + } + + HeapObject** array() { return array_; } + int bottom() { return bottom_; } + int top() { return top_; } + int mask() { return mask_; } + void set_top(int top) { top_ = top; } + private: - HeapObject** low_; - HeapObject** top_; - HeapObject** high_; + HeapObject** array_; + // array_[(top - 1) & mask_] is the top element in the deque. The Deque is + // empty when top_ == bottom_. It is full when top_ + 1 == bottom + // (mod mask + 1). + int top_; + int bottom_; + int mask_; bool overflowed_; - DISALLOW_COPY_AND_ASSIGN(MarkingStack); + DISALLOW_COPY_AND_ASSIGN(MarkingDeque); }; -// ------------------------------------------------------------------------- -// Mark-Compact collector +class SlotsBufferAllocator { + public: + SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer); + void DeallocateBuffer(SlotsBuffer* buffer); + + void DeallocateChain(SlotsBuffer** buffer_address); +}; + + +// SlotsBuffer records a sequence of slots that has to be updated +// after live objects were relocated from evacuation candidates. +// All slots are either untyped or typed: +// - Untyped slots are expected to contain a tagged object pointer. +// They are recorded by an address. +// - Typed slots are expected to contain an encoded pointer to a heap +// object where the way of encoding depends on the type of the slot. +// They are recorded as a pair (SlotType, slot address). +// We assume that zero-page is never mapped this allows us to distinguish +// untyped slots from typed slots during iteration by a simple comparison: +// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it +// is the first element of typed slot's pair. +class SlotsBuffer { + public: + typedef Object** ObjectSlot; + + explicit SlotsBuffer(SlotsBuffer* next_buffer) + : idx_(0), chain_length_(1), next_(next_buffer) { + if (next_ != NULL) { + chain_length_ = next_->chain_length_ + 1; + } + } + + ~SlotsBuffer() { + } + + void Add(ObjectSlot slot) { + ASSERT(0 <= idx_ && idx_ < kNumberOfElements); + slots_[idx_++] = slot; + } + + enum SlotType { + EMBEDDED_OBJECT_SLOT, + RELOCATED_CODE_OBJECT, + CODE_TARGET_SLOT, + CODE_ENTRY_SLOT, + DEBUG_TARGET_SLOT, + JS_RETURN_SLOT, + NUMBER_OF_SLOT_TYPES + }; + + void UpdateSlots(Heap* heap); + + void UpdateSlotsWithFilter(Heap* heap); + + SlotsBuffer* next() { return next_; } + + static int SizeOfChain(SlotsBuffer* buffer) { + if (buffer == NULL) return 0; + return static_cast<int>(buffer->idx_ + + (buffer->chain_length_ - 1) * kNumberOfElements); + } + + inline bool IsFull() { + return idx_ == kNumberOfElements; + } + + inline bool HasSpaceForTypedSlot() { + return idx_ < kNumberOfElements - 1; + } + + static void UpdateSlotsRecordedIn(Heap* heap, + SlotsBuffer* buffer, + bool code_slots_filtering_required) { + while (buffer != NULL) { + if (code_slots_filtering_required) { + buffer->UpdateSlotsWithFilter(heap); + } else { + buffer->UpdateSlots(heap); + } + buffer = buffer->next(); + } + } + + enum AdditionMode { + FAIL_ON_OVERFLOW, + IGNORE_OVERFLOW + }; -class OverflowedObjectsScanner; + static bool ChainLengthThresholdReached(SlotsBuffer* buffer) { + return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold; + } + + static bool AddTo(SlotsBufferAllocator* allocator, + SlotsBuffer** buffer_address, + ObjectSlot slot, + AdditionMode mode) { + SlotsBuffer* buffer = *buffer_address; + if (buffer == NULL || buffer->IsFull()) { + if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { + allocator->DeallocateChain(buffer_address); + return false; + } + buffer = allocator->AllocateBuffer(buffer); + *buffer_address = buffer; + } + buffer->Add(slot); + return true; + } + + static bool IsTypedSlot(ObjectSlot slot); + + static bool AddTo(SlotsBufferAllocator* allocator, + SlotsBuffer** buffer_address, + SlotType type, + Address addr, + AdditionMode mode); + + static const int kNumberOfElements = 1021; + + private: + static const int kChainLengthThreshold = 6; + + intptr_t idx_; + intptr_t chain_length_; + SlotsBuffer* next_; + ObjectSlot slots_[kNumberOfElements]; +}; + +// ------------------------------------------------------------------------- +// Mark-Compact collector class MarkCompactCollector { public: // Type of functions to compute forwarding addresses of objects in @@ -134,13 +438,18 @@ class MarkCompactCollector { // Set the global force_compaction flag, it must be called before Prepare // to take effect. - void SetForceCompaction(bool value) { - force_compaction_ = value; - } + inline void SetFlags(int flags); + inline bool PreciseSweepingRequired() { + return sweep_precisely_; + } static void Initialize(); + void CollectEvacuationCandidates(PagedSpace* space); + + void AddEvacuationCandidate(Page* p); + // Prepares for GC by resetting relocation info in old and map spaces and // choosing spaces to compact. void Prepare(GCTracer* tracer); @@ -148,23 +457,9 @@ class MarkCompactCollector { // Performs a global garbage collection. void CollectGarbage(); - // True if the last full GC performed heap compaction. - bool HasCompacted() { return compacting_collection_; } - - // True after the Prepare phase if the compaction is taking place. - bool IsCompacting() { -#ifdef DEBUG - // For the purposes of asserts we don't want this to keep returning true - // after the collection is completed. - return state_ != IDLE && compacting_collection_; -#else - return compacting_collection_; -#endif - } + bool StartCompaction(); - // The count of the number of objects left marked at the end of the last - // completed full GC (expected to be zero). - int previous_marked_count() { return previous_marked_count_; } + void AbortCompaction(); // During a full GC, there is a stack-allocated GCTracer that is used for // bookkeeping information. Return a pointer to that tracer. @@ -179,29 +474,99 @@ class MarkCompactCollector { // Determine type of object and emit deletion log event. static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate); - // Returns size of a possibly marked object. - static int SizeOfMarkedObject(HeapObject* obj); - // Distinguishable invalid map encodings (for single word and multiple words) // that indicate free regions. static const uint32_t kSingleFreeEncoding = 0; static const uint32_t kMultiFreeEncoding = 1; + static inline bool IsMarked(Object* obj); + inline Heap* heap() const { return heap_; } CodeFlusher* code_flusher() { return code_flusher_; } inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; } void EnableCodeFlushing(bool enable); + enum SweeperType { + CONSERVATIVE, + LAZY_CONSERVATIVE, + PRECISE + }; + +#ifdef DEBUG + void VerifyMarkbitsAreClean(); + static void VerifyMarkbitsAreClean(PagedSpace* space); + static void VerifyMarkbitsAreClean(NewSpace* space); +#endif + + // Sweep a single page from the given space conservatively. + // Return a number of reclaimed bytes. + static intptr_t SweepConservatively(PagedSpace* space, Page* p); + + INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) { + return Page::FromAddress(reinterpret_cast<Address>(anchor))-> + ShouldSkipEvacuationSlotRecording(); + } + + INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) { + return Page::FromAddress(reinterpret_cast<Address>(host))-> + ShouldSkipEvacuationSlotRecording(); + } + + INLINE(static bool IsOnEvacuationCandidate(Object* obj)) { + return Page::FromAddress(reinterpret_cast<Address>(obj))-> + IsEvacuationCandidate(); + } + + void EvictEvacuationCandidate(Page* page) { + if (FLAG_trace_fragmentation) { + PrintF("Page %p is too popular. Disabling evacuation.\n", + reinterpret_cast<void*>(page)); + } + + // TODO(gc) If all evacuation candidates are too popular we + // should stop slots recording entirely. + page->ClearEvacuationCandidate(); + + // We were not collecting slots on this page that point + // to other evacuation candidates thus we have to + // rescan the page after evacuation to discover and update all + // pointers to evacuated objects. + if (page->owner()->identity() == OLD_DATA_SPACE) { + evacuation_candidates_.RemoveElement(page); + } else { + page->SetFlag(Page::RESCAN_ON_EVACUATION); + } + } + + void RecordRelocSlot(RelocInfo* rinfo, Object* target); + void RecordCodeEntrySlot(Address slot, Code* target); + + INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object)); + + void MigrateObject(Address dst, + Address src, + int size, + AllocationSpace to_old_space); + + bool TryPromoteObject(HeapObject* object, int object_size); + inline Object* encountered_weak_maps() { return encountered_weak_maps_; } inline void set_encountered_weak_maps(Object* weak_map) { encountered_weak_maps_ = weak_map; } + void InvalidateCode(Code* code); + private: MarkCompactCollector(); ~MarkCompactCollector(); + bool MarkInvalidatedCode(); + void RemoveDeadInvalidatedCode(); + void ProcessInvalidatedCode(ObjectVisitor* visitor); + + #ifdef DEBUG enum CollectorState { IDLE, @@ -217,23 +582,26 @@ class MarkCompactCollector { CollectorState state_; #endif - // Global flag that forces a compaction. - bool force_compaction_; + // Global flag that forces sweeping to be precise, so we can traverse the + // heap. + bool sweep_precisely_; - // Global flag indicating whether spaces were compacted on the last GC. - bool compacting_collection_; + // True if we are collecting slots to perform evacuation from evacuation + // candidates. + bool compacting_; - // Global flag indicating whether spaces will be compacted on the next GC. - bool compact_on_next_gc_; + bool was_marked_incrementally_; - // The number of objects left marked at the end of the last completed full - // GC (expected to be zero). - int previous_marked_count_; + bool collect_maps_; // A pointer to the current stack-allocated GC tracer object during a full // collection (NULL before and after). GCTracer* tracer_; + SlotsBufferAllocator slots_buffer_allocator_; + + SlotsBuffer* migration_slots_buffer_; + // Finishes GC, performs heap verification if enabled. void Finish(); @@ -258,13 +626,13 @@ class MarkCompactCollector { // Marking operations for objects reachable from roots. void MarkLiveObjects(); - void MarkUnmarkedObject(HeapObject* obj); + void AfterMarking(); - inline void MarkObject(HeapObject* obj) { - if (!obj->IsMarked()) MarkUnmarkedObject(obj); - } + INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit)); + + INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit)); - inline void SetMark(HeapObject* obj); + void ProcessNewlyMarkedObject(HeapObject* obj); // Creates back pointers for all map transitions, stores them in // the prototype field. The original prototype pointers are restored @@ -298,18 +666,18 @@ class MarkCompactCollector { // Mark objects reachable (transitively) from objects in the marking stack // or overflowed in the heap. - void ProcessMarkingStack(); + void ProcessMarkingDeque(); // Mark objects reachable (transitively) from objects in the marking // stack. This function empties the marking stack, but may leave // overflowed objects in the heap, in which case the marking stack's // overflow flag will be set. - void EmptyMarkingStack(); + void EmptyMarkingDeque(); // Refill the marking stack with overflowed objects from the heap. This // function either leaves the marking stack full or clears the overflow // flag on the marking stack. - void RefillMarkingStack(); + void RefillMarkingDeque(); // After reachable maps have been marked process per context object // literal map caches removing unmarked entries. @@ -323,17 +691,16 @@ class MarkCompactCollector { void UpdateLiveObjectCount(HeapObject* obj); #endif - // We sweep the large object space in the same way whether we are - // compacting or not, because the large object space is never compacted. - void SweepLargeObjectSpace(); - - // Test whether a (possibly marked) object is a Map. - static inline bool SafeIsMap(HeapObject* object); - // Map transitions from a live map to a dead map must be killed. // We replace them with a null descriptor, with the same key. void ClearNonLiveTransitions(); + // Marking detaches initial maps from SharedFunctionInfo objects + // to make this reference weak. We need to reattach initial maps + // back after collection. This is either done during + // ClearNonLiveTransitions pass or by calling this function. + void ReattachInitialMaps(); + // Mark all values associated with reachable keys in weak maps encountered // so far. This might push new object or even new weak maps onto the // marking stack. @@ -346,133 +713,30 @@ class MarkCompactCollector { // ----------------------------------------------------------------------- // Phase 2: Sweeping to clear mark bits and free non-live objects for - // a non-compacting collection, or else computing and encoding - // forwarding addresses for a compacting collection. + // a non-compacting collection. // // Before: Live objects are marked and non-live objects are unmarked. // - // After: (Non-compacting collection.) Live objects are unmarked, - // non-live regions have been added to their space's free - // list. - // - // After: (Compacting collection.) The forwarding address of live - // objects in the paged spaces is encoded in their map word - // along with their (non-forwarded) map pointer. - // - // The forwarding address of live objects in the new space is - // written to their map word's offset in the inactive - // semispace. - // - // Bookkeeping data is written to the page header of - // eached paged-space page that contains live objects after - // compaction: + // After: Live objects are unmarked, non-live regions have been added to + // their space's free list. Active eden semispace is compacted by + // evacuation. // - // The allocation watermark field is used to track the - // relocation top address, the address of the first word - // after the end of the last live object in the page after - // compaction. - // - // The Page::mc_page_index field contains the zero-based index of the - // page in its space. This word is only used for map space pages, in - // order to encode the map addresses in 21 bits to free 11 - // bits per map word for the forwarding address. - // - // The Page::mc_first_forwarded field contains the (nonencoded) - // forwarding address of the first live object in the page. - // - // In both the new space and the paged spaces, a linked list - // of live regions is constructructed (linked through - // pointers in the non-live region immediately following each - // live region) to speed further passes of the collector. - - // Encodes forwarding addresses of objects in compactable parts of the - // heap. - void EncodeForwardingAddresses(); - - // Encodes the forwarding addresses of objects in new space. - void EncodeForwardingAddressesInNewSpace(); - - // Function template to encode the forwarding addresses of objects in - // paged spaces, parameterized by allocation and non-live processing - // functions. - template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive> - void EncodeForwardingAddressesInPagedSpace(PagedSpace* space); - - // Iterates live objects in a space, passes live objects - // to a callback function which returns the heap size of the object. - // Returns the number of live objects iterated. - int IterateLiveObjects(NewSpace* space, LiveObjectCallback size_f); - int IterateLiveObjects(PagedSpace* space, LiveObjectCallback size_f); - - // Iterates the live objects between a range of addresses, returning the - // number of live objects. - int IterateLiveObjectsInRange(Address start, Address end, - LiveObjectCallback size_func); // If we are not compacting the heap, we simply sweep the spaces except // for the large object space, clearing mark bits and adding unmarked // regions to each space's free list. void SweepSpaces(); - // ----------------------------------------------------------------------- - // Phase 3: Updating pointers in live objects. - // - // Before: Same as after phase 2 (compacting collection). - // - // After: All pointers in live objects, including encoded map - // pointers, are updated to point to their target's new - // location. - - friend class UpdatingVisitor; // helper for updating visited objects + void EvacuateNewSpace(); - // Updates pointers in all spaces. - void UpdatePointers(); + void EvacuateLiveObjectsFromPage(Page* p); - // Updates pointers in an object in new space. - // Returns the heap size of the object. - int UpdatePointersInNewObject(HeapObject* obj); + void EvacuatePages(); - // Updates pointers in an object in old spaces. - // Returns the heap size of the object. - int UpdatePointersInOldObject(HeapObject* obj); + void EvacuateNewSpaceAndCandidates(); - // Calculates the forwarding address of an object in an old space. - static Address GetForwardingAddressInOldSpace(HeapObject* obj); + void SweepSpace(PagedSpace* space, SweeperType sweeper); - // ----------------------------------------------------------------------- - // Phase 4: Relocating objects. - // - // Before: Pointers to live objects are updated to point to their - // target's new location. - // - // After: Objects have been moved to their new addresses. - - // Relocates objects in all spaces. - void RelocateObjects(); - - // Converts a code object's inline target to addresses, convention from - // address to target happens in the marking phase. - int ConvertCodeICTargetToAddress(HeapObject* obj); - - // Relocate a map object. - int RelocateMapObject(HeapObject* obj); - - // Relocates an old object. - int RelocateOldPointerObject(HeapObject* obj); - int RelocateOldDataObject(HeapObject* obj); - - // Relocate a property cell object. - int RelocateCellObject(HeapObject* obj); - - // Helper function. - inline int RelocateOldNonCodeObject(HeapObject* obj, - PagedSpace* space); - - // Relocates an object in the code space. - int RelocateCodeObject(HeapObject* obj); - - // Copy a new object. - int RelocateNewObject(HeapObject* obj); #ifdef DEBUG // ----------------------------------------------------------------------- @@ -512,15 +776,19 @@ class MarkCompactCollector { #endif Heap* heap_; - MarkingStack marking_stack_; + MarkingDeque marking_deque_; CodeFlusher* code_flusher_; Object* encountered_weak_maps_; + List<Page*> evacuation_candidates_; + List<Code*> invalidated_code_; + friend class Heap; - friend class OverflowedObjectsScanner; }; +const char* AllocationSpaceName(AllocationSpace space); + } } // namespace v8::internal #endif // V8_MARK_COMPACT_H_ diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc index b6ad5ac35..c70463d63 100644 --- a/deps/v8/src/messages.cc +++ b/deps/v8/src/messages.cc @@ -1,5 +1,4 @@ - -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -81,11 +80,11 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject( } Handle<Object> stack_trace_handle = stack_trace.is_null() - ? FACTORY->undefined_value() + ? Handle<Object>::cast(FACTORY->undefined_value()) : Handle<Object>::cast(stack_trace); Handle<Object> stack_frames_handle = stack_frames.is_null() - ? FACTORY->undefined_value() + ? Handle<Object>::cast(FACTORY->undefined_value()) : Handle<Object>::cast(stack_frames); Handle<JSMessageObject> message = @@ -149,12 +148,15 @@ Handle<String> MessageHandler::GetMessage(Handle<Object> data) { JSFunction::cast( Isolate::Current()->js_builtins_object()-> GetPropertyNoExceptionThrown(*fmt_str))); - Object** argv[1] = { data.location() }; + Handle<Object> argv[] = { data }; bool caught_exception; Handle<Object> result = Execution::TryCall(fun, - Isolate::Current()->js_builtins_object(), 1, argv, &caught_exception); + Isolate::Current()->js_builtins_object(), + ARRAY_SIZE(argv), + argv, + &caught_exception); if (caught_exception || !result->IsString()) { return FACTORY->LookupAsciiSymbol("<error>"); diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h index c4c4fd259..553c511c3 100644 --- a/deps/v8/src/mips/assembler-mips-inl.h +++ b/deps/v8/src/mips/assembler-mips-inl.h @@ -78,7 +78,6 @@ bool Operand::is_reg() const { } - // ----------------------------------------------------------------------------- // RelocInfo. @@ -120,6 +119,11 @@ int RelocInfo::target_address_size() { void RelocInfo::set_target_address(Address target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); Assembler::set_target_address_at(pc_, target); + if (host() != NULL && IsCodeTarget(rmode_)) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } } @@ -149,6 +153,10 @@ Object** RelocInfo::target_object_address() { void RelocInfo::set_target_object(Object* target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target)); + if (host() != NULL && target->IsHeapObject()) { + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), &Memory::Object_at(pc_), HeapObject::cast(target)); + } } @@ -180,6 +188,12 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) { ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Memory::Address_at(pc_) = address; + if (host() != NULL) { + // TODO(1550) We are passing NULL as a slot because cell can never be on + // evacuation candidate. + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), NULL, cell); + } } @@ -200,6 +214,11 @@ void RelocInfo::set_call_address(Address target) { // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot(). Assembler::set_target_address_at(pc_, target); + if (host() != NULL) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } } @@ -242,12 +261,7 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() { void RelocInfo::Visit(ObjectVisitor* visitor) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - Object** p = target_object_address(); - Object* orig = *p; - visitor->VisitPointer(p); - if (*p != orig) { - set_target_object(*p); - } + visitor->VisitEmbeddedPointer(this); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { @@ -257,9 +271,9 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { #ifdef ENABLE_DEBUGGER_SUPPORT // TODO(isolates): Get a cached isolate below. } else if (((RelocInfo::IsJSReturn(mode) && - IsPatchedReturnSequence()) || - (RelocInfo::IsDebugBreakSlot(mode) && - IsPatchedDebugBreakSlotSequence())) && + IsPatchedReturnSequence()) || + (RelocInfo::IsDebugBreakSlot(mode) && + IsPatchedDebugBreakSlotSequence())) && Isolate::Current()->debug()->has_break_points()) { visitor->VisitDebugTarget(this); #endif @@ -273,7 +287,7 @@ template<typename StaticVisitor> void RelocInfo::Visit(Heap* heap) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - StaticVisitor::VisitPointer(heap, target_object_address()); + StaticVisitor::VisitEmbeddedPointer(heap, this); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(heap, this); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc index e01a0ca70..e933181d4 100644 --- a/deps/v8/src/mips/assembler-mips.cc +++ b/deps/v8/src/mips/assembler-mips.cc @@ -74,7 +74,9 @@ static uint64_t CpuFeaturesImpliedByCompiler() { void CpuFeatures::Probe() { - ASSERT(!initialized_); + unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() | + CpuFeaturesImpliedByCompiler()); + ASSERT(supported_ == 0 || supported_ == standard_features); #ifdef DEBUG initialized_ = true; #endif @@ -82,8 +84,7 @@ void CpuFeatures::Probe() { // Get the features implied by the OS and the compiler settings. This is the // minimal set of features which is also allowed for generated code in the // snapshot. - supported_ |= OS::CpuFeaturesImpliedByPlatform(); - supported_ |= CpuFeaturesImpliedByCompiler(); + supported_ |= standard_features; if (Serializer::enabled()) { // No probing for features if we might serialize (generate snapshot). @@ -2018,7 +2019,8 @@ void Assembler::dd(uint32_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { - RelocInfo rinfo(pc_, rmode, data); // We do not try to reuse pool constants. + // We do not try to reuse pool constants. + RelocInfo rinfo(pc_, rmode, data, NULL); if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { // Adjust code for new modes. ASSERT(RelocInfo::IsDebugBreakSlot(rmode) @@ -2041,7 +2043,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { } ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here. if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { - RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId()); + RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL); ClearRecordedAstId(); reloc_info_writer.Write(&reloc_info_with_ast_id); } else { diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc index d77230448..5609d5ee4 100644 --- a/deps/v8/src/mips/builtins-mips.cc +++ b/deps/v8/src/mips/builtins-mips.cc @@ -587,10 +587,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { __ bind(&convert_argument); __ push(function); // Preserve the function. __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0); - __ EnterInternalFrame(); - __ push(v0); - __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(v0); + __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); + } __ pop(function); __ mov(argument, v0); __ Branch(&argument_is_string); @@ -606,10 +607,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // create a string wrapper. __ bind(&gc_required); __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0); - __ EnterInternalFrame(); - __ push(argument); - __ CallRuntime(Runtime::kNewStringWrapper, 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(argument); + __ CallRuntime(Runtime::kNewStringWrapper, 1); + } __ Ret(); } @@ -622,13 +624,13 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // -- sp[...]: constructor arguments // ----------------------------------- - Label non_function_call; + Label slow, non_function_call; // Check that the function is not a smi. __ And(t0, a1, Operand(kSmiTagMask)); __ Branch(&non_function_call, eq, t0, Operand(zero_reg)); // Check that the function is a JSFunction. __ GetObjectType(a1, a2, a2); - __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE)); + __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE)); // Jump to the function-specific construct stub. __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); @@ -638,13 +640,21 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // a0: number of arguments // a1: called object + // a2: object type + Label do_call; + __ bind(&slow); + __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE)); + __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); + __ jmp(&do_call); + __ bind(&non_function_call); + __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); + __ bind(&do_call); // CALL_NON_FUNCTION expects the non-function constructor as receiver // (instead of the original receiver from the call site). The receiver is // stack element argc. // Set expected number of arguments to zero (not changing a0). __ mov(a2, zero_reg); - __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ SetCallKind(t1, CALL_AS_METHOD); __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), RelocInfo::CODE_TARGET); @@ -667,331 +677,336 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // ----------------------------------- // Enter a construct frame. - __ EnterConstructFrame(); + { + FrameScope scope(masm, StackFrame::CONSTRUCT); - // Preserve the two incoming parameters on the stack. - __ sll(a0, a0, kSmiTagSize); // Tag arguments count. - __ MultiPushReversed(a0.bit() | a1.bit()); + // Preserve the two incoming parameters on the stack. + __ sll(a0, a0, kSmiTagSize); // Tag arguments count. + __ MultiPushReversed(a0.bit() | a1.bit()); - // Use t7 to hold undefined, which is used in several places below. - __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); + // Use t7 to hold undefined, which is used in several places below. + __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); - Label rt_call, allocated; - // Try to allocate the object without transitioning into C code. If any of the - // preconditions is not met, the code bails out to the runtime call. - if (FLAG_inline_new) { - Label undo_allocation; + Label rt_call, allocated; + // Try to allocate the object without transitioning into C code. If any of + // the preconditions is not met, the code bails out to the runtime call. + if (FLAG_inline_new) { + Label undo_allocation; #ifdef ENABLE_DEBUGGER_SUPPORT - ExternalReference debug_step_in_fp = - ExternalReference::debug_step_in_fp_address(isolate); - __ li(a2, Operand(debug_step_in_fp)); - __ lw(a2, MemOperand(a2)); - __ Branch(&rt_call, ne, a2, Operand(zero_reg)); + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(isolate); + __ li(a2, Operand(debug_step_in_fp)); + __ lw(a2, MemOperand(a2)); + __ Branch(&rt_call, ne, a2, Operand(zero_reg)); #endif - // Load the initial map and verify that it is in fact a map. - // a1: constructor function - __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); - __ And(t0, a2, Operand(kSmiTagMask)); - __ Branch(&rt_call, eq, t0, Operand(zero_reg)); - __ GetObjectType(a2, a3, t4); - __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE)); - - // Check that the constructor is not constructing a JSFunction (see comments - // in Runtime_NewObject in runtime.cc). In which case the initial map's - // instance type would be JS_FUNCTION_TYPE. - // a1: constructor function - // a2: initial map - __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset)); - __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE)); - - if (count_constructions) { - Label allocate; - // Decrease generous allocation count. - __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); - MemOperand constructor_count = - FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset); - __ lbu(t0, constructor_count); - __ Subu(t0, t0, Operand(1)); - __ sb(t0, constructor_count); - __ Branch(&allocate, ne, t0, Operand(zero_reg)); - - __ Push(a1, a2); - - __ push(a1); // Constructor. - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); - - __ pop(a2); - __ pop(a1); - - __ bind(&allocate); - } + // Load the initial map and verify that it is in fact a map. + // a1: constructor function + __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); + __ And(t0, a2, Operand(kSmiTagMask)); + __ Branch(&rt_call, eq, t0, Operand(zero_reg)); + __ GetObjectType(a2, a3, t4); + __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE)); + + // Check that the constructor is not constructing a JSFunction (see + // comments in Runtime_NewObject in runtime.cc). In which case the + // initial map's instance type would be JS_FUNCTION_TYPE. + // a1: constructor function + // a2: initial map + __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset)); + __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE)); - // Now allocate the JSObject on the heap. - // a1: constructor function - // a2: initial map - __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset)); - __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS); + if (count_constructions) { + Label allocate; + // Decrease generous allocation count. + __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + MemOperand constructor_count = + FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset); + __ lbu(t0, constructor_count); + __ Subu(t0, t0, Operand(1)); + __ sb(t0, constructor_count); + __ Branch(&allocate, ne, t0, Operand(zero_reg)); + + __ Push(a1, a2); + + __ push(a1); // Constructor. + // The call will replace the stub, so the countdown is only done once. + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); + + __ pop(a2); + __ pop(a1); + + __ bind(&allocate); + } - // Allocated the JSObject, now initialize the fields. Map is set to initial - // map and properties and elements are set to empty fixed array. - // a1: constructor function - // a2: initial map - // a3: object size - // t4: JSObject (not tagged) - __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex); - __ mov(t5, t4); - __ sw(a2, MemOperand(t5, JSObject::kMapOffset)); - __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset)); - __ sw(t6, MemOperand(t5, JSObject::kElementsOffset)); - __ Addu(t5, t5, Operand(3*kPointerSize)); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); - ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); - ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); - - // Fill all the in-object properties with appropriate filler. - // a1: constructor function - // a2: initial map - // a3: object size (in words) - // t4: JSObject (not tagged) - // t5: First in-object property of JSObject (not tagged) - __ sll(t0, a3, kPointerSizeLog2); - __ addu(t6, t4, t0); // End of object. - ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); - { Label loop, entry; + // Now allocate the JSObject on the heap. + // a1: constructor function + // a2: initial map + __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset)); + __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS); + + // Allocated the JSObject, now initialize the fields. Map is set to + // initial map and properties and elements are set to empty fixed array. + // a1: constructor function + // a2: initial map + // a3: object size + // t4: JSObject (not tagged) + __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex); + __ mov(t5, t4); + __ sw(a2, MemOperand(t5, JSObject::kMapOffset)); + __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset)); + __ sw(t6, MemOperand(t5, JSObject::kElementsOffset)); + __ Addu(t5, t5, Operand(3*kPointerSize)); + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); + ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); + + // Fill all the in-object properties with appropriate filler. + // a1: constructor function + // a2: initial map + // a3: object size (in words) + // t4: JSObject (not tagged) + // t5: First in-object property of JSObject (not tagged) + __ sll(t0, a3, kPointerSizeLog2); + __ addu(t6, t4, t0); // End of object. + ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); + __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); if (count_constructions) { + __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset)); + __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, + kBitsPerByte); + __ sll(t0, a0, kPointerSizeLog2); + __ addu(a0, t5, t0); + // a0: offset of first field after pre-allocated fields + if (FLAG_debug_code) { + __ Assert(le, "Unexpected number of pre-allocated property fields.", + a0, Operand(t6)); + } + __ InitializeFieldsWithFiller(t5, a0, t7); // To allow for truncation. __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex); - } else { - __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); } - __ jmp(&entry); - __ bind(&loop); - __ sw(t7, MemOperand(t5, 0)); - __ addiu(t5, t5, kPointerSize); - __ bind(&entry); - __ Branch(&loop, Uless, t5, Operand(t6)); - } - - // Add the object tag to make the JSObject real, so that we can continue and - // jump into the continuation code at any time from now on. Any failures - // need to undo the allocation, so that the heap is in a consistent state - // and verifiable. - __ Addu(t4, t4, Operand(kHeapObjectTag)); - - // Check if a non-empty properties array is needed. Continue with allocated - // object if not fall through to runtime call if it is. - // a1: constructor function - // t4: JSObject - // t5: start of next object (not tagged) - __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset)); - // The field instance sizes contains both pre-allocated property fields and - // in-object properties. - __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset)); - __ And(t6, - a0, - Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8)); - __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8); - __ Addu(a3, a3, Operand(t0)); - __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8)); - __ srl(t0, t6, Map::kInObjectPropertiesByte * 8); - __ subu(a3, a3, t0); - - // Done if no extra properties are to be allocated. - __ Branch(&allocated, eq, a3, Operand(zero_reg)); - __ Assert(greater_equal, "Property allocation count failed.", - a3, Operand(zero_reg)); - - // Scale the number of elements by pointer size and add the header for - // FixedArrays to the start of the next object calculation from above. - // a1: constructor - // a3: number of elements in properties array - // t4: JSObject - // t5: start of next object - __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize)); - __ AllocateInNewSpace( - a0, - t5, - t6, - a2, - &undo_allocation, - static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); - - // Initialize the FixedArray. - // a1: constructor - // a3: number of elements in properties array (un-tagged) - // t4: JSObject - // t5: start of next object - __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex); - __ mov(a2, t5); - __ sw(t6, MemOperand(a2, JSObject::kMapOffset)); - __ sll(a0, a3, kSmiTagSize); - __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset)); - __ Addu(a2, a2, Operand(2 * kPointerSize)); - - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); - ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); - - // Initialize the fields to undefined. - // a1: constructor - // a2: First element of FixedArray (not tagged) - // a3: number of elements in properties array - // t4: JSObject - // t5: FixedArray (not tagged) - __ sll(t3, a3, kPointerSizeLog2); - __ addu(t6, a2, t3); // End of object. - ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); - { Label loop, entry; - if (count_constructions) { - __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); - } else if (FLAG_debug_code) { - __ LoadRoot(t8, Heap::kUndefinedValueRootIndex); - __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8)); + __ InitializeFieldsWithFiller(t5, t6, t7); + + // Add the object tag to make the JSObject real, so that we can continue + // and jump into the continuation code at any time from now on. Any + // failures need to undo the allocation, so that the heap is in a + // consistent state and verifiable. + __ Addu(t4, t4, Operand(kHeapObjectTag)); + + // Check if a non-empty properties array is needed. Continue with + // allocated object if not fall through to runtime call if it is. + // a1: constructor function + // t4: JSObject + // t5: start of next object (not tagged) + __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset)); + // The field instance sizes contains both pre-allocated property fields + // and in-object properties. + __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset)); + __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, + kBitsPerByte); + __ Addu(a3, a3, Operand(t6)); + __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte, + kBitsPerByte); + __ subu(a3, a3, t6); + + // Done if no extra properties are to be allocated. + __ Branch(&allocated, eq, a3, Operand(zero_reg)); + __ Assert(greater_equal, "Property allocation count failed.", + a3, Operand(zero_reg)); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // a1: constructor + // a3: number of elements in properties array + // t4: JSObject + // t5: start of next object + __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize)); + __ AllocateInNewSpace( + a0, + t5, + t6, + a2, + &undo_allocation, + static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); + + // Initialize the FixedArray. + // a1: constructor + // a3: number of elements in properties array (un-tagged) + // t4: JSObject + // t5: start of next object + __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex); + __ mov(a2, t5); + __ sw(t6, MemOperand(a2, JSObject::kMapOffset)); + __ sll(a0, a3, kSmiTagSize); + __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset)); + __ Addu(a2, a2, Operand(2 * kPointerSize)); + + ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); + ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); + + // Initialize the fields to undefined. + // a1: constructor + // a2: First element of FixedArray (not tagged) + // a3: number of elements in properties array + // t4: JSObject + // t5: FixedArray (not tagged) + __ sll(t3, a3, kPointerSizeLog2); + __ addu(t6, a2, t3); // End of object. + ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); + { Label loop, entry; + if (count_constructions) { + __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); + } else if (FLAG_debug_code) { + __ LoadRoot(t8, Heap::kUndefinedValueRootIndex); + __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8)); + } + __ jmp(&entry); + __ bind(&loop); + __ sw(t7, MemOperand(a2)); + __ addiu(a2, a2, kPointerSize); + __ bind(&entry); + __ Branch(&loop, less, a2, Operand(t6)); } - __ jmp(&entry); - __ bind(&loop); - __ sw(t7, MemOperand(a2)); - __ addiu(a2, a2, kPointerSize); - __ bind(&entry); - __ Branch(&loop, less, a2, Operand(t6)); + + // Store the initialized FixedArray into the properties field of + // the JSObject. + // a1: constructor function + // t4: JSObject + // t5: FixedArray (not tagged) + __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag. + __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset)); + + // Continue with JSObject being successfully allocated. + // a1: constructor function + // a4: JSObject + __ jmp(&allocated); + + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // t4: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(t4, t5); } - // Store the initialized FixedArray into the properties field of - // the JSObject. + __ bind(&rt_call); + // Allocate the new receiver object using the runtime call. // a1: constructor function - // t4: JSObject - // t5: FixedArray (not tagged) - __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag. - __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset)); + __ push(a1); // Argument for Runtime_NewObject. + __ CallRuntime(Runtime::kNewObject, 1); + __ mov(t4, v0); - // Continue with JSObject being successfully allocated. - // a1: constructor function - // a4: JSObject - __ jmp(&allocated); - - // Undo the setting of the new top so that the heap is verifiable. For - // example, the map's unused properties potentially do not match the - // allocated objects unused properties. - // t4: JSObject (previous new top) - __ bind(&undo_allocation); - __ UndoAllocationInNewSpace(t4, t5); - } + // Receiver for constructor call allocated. + // t4: JSObject + __ bind(&allocated); + __ push(t4); - __ bind(&rt_call); - // Allocate the new receiver object using the runtime call. - // a1: constructor function - __ push(a1); // Argument for Runtime_NewObject. - __ CallRuntime(Runtime::kNewObject, 1); - __ mov(t4, v0); + // Push the function and the allocated receiver from the stack. + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ lw(a1, MemOperand(sp, kPointerSize)); + __ MultiPushReversed(a1.bit() | t4.bit()); - // Receiver for constructor call allocated. - // t4: JSObject - __ bind(&allocated); - __ push(t4); + // Reload the number of arguments from the stack. + // a1: constructor function + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + __ lw(a3, MemOperand(sp, 4 * kPointerSize)); - // Push the function and the allocated receiver from the stack. - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ lw(a1, MemOperand(sp, kPointerSize)); - __ MultiPushReversed(a1.bit() | t4.bit()); + // Setup pointer to last argument. + __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); - // Reload the number of arguments from the stack. - // a1: constructor function - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - __ lw(a3, MemOperand(sp, 4 * kPointerSize)); + // Setup number of arguments for function call below. + __ srl(a0, a3, kSmiTagSize); - // Setup pointer to last argument. - __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); + // Copy arguments and receiver to the expression stack. + // a0: number of arguments + // a1: constructor function + // a2: address of last argument (caller sp) + // a3: number of arguments (smi-tagged) + // sp[0]: receiver + // sp[1]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + Label loop, entry; + __ jmp(&entry); + __ bind(&loop); + __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); + __ Addu(t0, a2, Operand(t0)); + __ lw(t1, MemOperand(t0)); + __ push(t1); + __ bind(&entry); + __ Addu(a3, a3, Operand(-2)); + __ Branch(&loop, greater_equal, a3, Operand(zero_reg)); - // Setup number of arguments for function call below. - __ srl(a0, a3, kSmiTagSize); + // Call the function. + // a0: number of arguments + // a1: constructor function + if (is_api_function) { + __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + Handle<Code> code = + masm->isolate()->builtins()->HandleApiCallConstruct(); + ParameterCount expected(0); + __ InvokeCode(code, expected, expected, + RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); + } else { + ParameterCount actual(a0); + __ InvokeFunction(a1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } - // Copy arguments and receiver to the expression stack. - // a0: number of arguments - // a1: constructor function - // a2: address of last argument (caller sp) - // a3: number of arguments (smi-tagged) - // sp[0]: receiver - // sp[1]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - Label loop, entry; - __ jmp(&entry); - __ bind(&loop); - __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); - __ Addu(t0, a2, Operand(t0)); - __ lw(t1, MemOperand(t0)); - __ push(t1); - __ bind(&entry); - __ Addu(a3, a3, Operand(-2)); - __ Branch(&loop, greater_equal, a3, Operand(zero_reg)); + // Pop the function from the stack. + // v0: result + // sp[0]: constructor function + // sp[2]: receiver + // sp[3]: constructor function + // sp[4]: number of arguments (smi-tagged) + __ Pop(); - // Call the function. - // a0: number of arguments - // a1: constructor function - if (is_api_function) { - __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); - Handle<Code> code = - masm->isolate()->builtins()->HandleApiCallConstruct(); - ParameterCount expected(0); - __ InvokeCode(code, expected, expected, - RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); - } else { - ParameterCount actual(a0); - __ InvokeFunction(a1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Restore context from the frame. + __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, exit; + + // If the result is a smi, it is *not* an object in the ECMA sense. + // v0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ And(t0, v0, Operand(kSmiTagMask)); + __ Branch(&use_receiver, eq, t0, Operand(zero_reg)); + + // If the type of the result (stored in its map) is less than + // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. + __ GetObjectType(v0, a3, a3); + __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); + + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ lw(v0, MemOperand(sp)); + + // Remove receiver from the stack, remove caller arguments, and + // return. + __ bind(&exit); + // v0: result + // sp[0]: receiver (newly allocated object) + // sp[1]: constructor function + // sp[2]: number of arguments (smi-tagged) + __ lw(a1, MemOperand(sp, 2 * kPointerSize)); + + // Leave construct frame. } - // Pop the function from the stack. - // v0: result - // sp[0]: constructor function - // sp[2]: receiver - // sp[3]: constructor function - // sp[4]: number of arguments (smi-tagged) - __ Pop(); - - // Restore context from the frame. - __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - - // If the result is an object (in the ECMA sense), we should get rid - // of the receiver and use the result; see ECMA-262 section 13.2.2-7 - // on page 74. - Label use_receiver, exit; - - // If the result is a smi, it is *not* an object in the ECMA sense. - // v0: result - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ And(t0, v0, Operand(kSmiTagMask)); - __ Branch(&use_receiver, eq, t0, Operand(zero_reg)); - - // If the type of the result (stored in its map) is less than - // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. - __ GetObjectType(v0, a3, a3); - __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); - - // Throw away the result of the constructor invocation and use the - // on-stack receiver as the result. - __ bind(&use_receiver); - __ lw(v0, MemOperand(sp)); - - // Remove receiver from the stack, remove caller arguments, and - // return. - __ bind(&exit); - // v0: result - // sp[0]: receiver (newly allocated object) - // sp[1]: constructor function - // sp[2]: number of arguments (smi-tagged) - __ lw(a1, MemOperand(sp, 2 * kPointerSize)); - __ LeaveConstructFrame(); __ sll(t0, a1, kPointerSizeLog2 - 1); __ Addu(sp, sp, t0); __ Addu(sp, sp, kPointerSize); @@ -1031,58 +1046,60 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ mov(cp, zero_reg); // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Set up the context from the function argument. - __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + // Set up the context from the function argument. + __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); - // Set up the roots register. - ExternalReference roots_address = - ExternalReference::roots_address(masm->isolate()); - __ li(s6, Operand(roots_address)); + // Set up the roots register. + ExternalReference roots_address = + ExternalReference::roots_address(masm->isolate()); + __ li(s6, Operand(roots_address)); - // Push the function and the receiver onto the stack. - __ Push(a1, a2); + // Push the function and the receiver onto the stack. + __ Push(a1, a2); - // Copy arguments to the stack in a loop. - // a3: argc - // s0: argv, ie points to first arg - Label loop, entry; - __ sll(t0, a3, kPointerSizeLog2); - __ addu(t2, s0, t0); - __ b(&entry); - __ nop(); // Branch delay slot nop. - // t2 points past last arg. - __ bind(&loop); - __ lw(t0, MemOperand(s0)); // Read next parameter. - __ addiu(s0, s0, kPointerSize); - __ lw(t0, MemOperand(t0)); // Dereference handle. - __ push(t0); // Push parameter. - __ bind(&entry); - __ Branch(&loop, ne, s0, Operand(t2)); - - // Initialize all JavaScript callee-saved registers, since they will be seen - // by the garbage collector as part of handlers. - __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); - __ mov(s1, t0); - __ mov(s2, t0); - __ mov(s3, t0); - __ mov(s4, t0); - __ mov(s5, t0); - // s6 holds the root address. Do not clobber. - // s7 is cp. Do not init. - - // Invoke the code and pass argc as a0. - __ mov(a0, a3); - if (is_construct) { - __ Call(masm->isolate()->builtins()->JSConstructCall()); - } else { - ParameterCount actual(a0); - __ InvokeFunction(a1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); - } + // Copy arguments to the stack in a loop. + // a3: argc + // s0: argv, ie points to first arg + Label loop, entry; + __ sll(t0, a3, kPointerSizeLog2); + __ addu(t2, s0, t0); + __ b(&entry); + __ nop(); // Branch delay slot nop. + // t2 points past last arg. + __ bind(&loop); + __ lw(t0, MemOperand(s0)); // Read next parameter. + __ addiu(s0, s0, kPointerSize); + __ lw(t0, MemOperand(t0)); // Dereference handle. + __ push(t0); // Push parameter. + __ bind(&entry); + __ Branch(&loop, ne, s0, Operand(t2)); - __ LeaveInternalFrame(); + // Initialize all JavaScript callee-saved registers, since they will be seen + // by the garbage collector as part of handlers. + __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); + __ mov(s1, t0); + __ mov(s2, t0); + __ mov(s3, t0); + __ mov(s4, t0); + __ mov(s5, t0); + // s6 holds the root address. Do not clobber. + // s7 is cp. Do not init. + + // Invoke the code and pass argc as a0. + __ mov(a0, a3); + if (is_construct) { + __ Call(masm->isolate()->builtins()->JSConstructCall()); + } else { + ParameterCount actual(a0); + __ InvokeFunction(a1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } + + // Leave internal frame. + } __ Jump(ra); } @@ -1100,27 +1117,28 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { void Builtins::Generate_LazyCompile(MacroAssembler* masm) { // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Preserve the function. - __ push(a1); - // Push call kind information. - __ push(t1); + // Preserve the function. + __ push(a1); + // Push call kind information. + __ push(t1); - // Push the function on the stack as the argument to the runtime function. - __ push(a1); - // Call the runtime function. - __ CallRuntime(Runtime::kLazyCompile, 1); - // Calculate the entry point. - __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag); + // Push the function on the stack as the argument to the runtime function. + __ push(a1); + // Call the runtime function. + __ CallRuntime(Runtime::kLazyCompile, 1); + // Calculate the entry point. + __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag); - // Restore call kind information. - __ pop(t1); - // Restore saved function. - __ pop(a1); + // Restore call kind information. + __ pop(t1); + // Restore saved function. + __ pop(a1); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down temporary frame. + } // Do a tail-call of the compiled function. __ Jump(t9); @@ -1129,26 +1147,27 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) { void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Preserve the function. - __ push(a1); - // Push call kind information. - __ push(t1); + // Preserve the function. + __ push(a1); + // Push call kind information. + __ push(t1); - // Push the function on the stack as the argument to the runtime function. - __ push(a1); - __ CallRuntime(Runtime::kLazyRecompile, 1); - // Calculate the entry point. - __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); + // Push the function on the stack as the argument to the runtime function. + __ push(a1); + __ CallRuntime(Runtime::kLazyRecompile, 1); + // Calculate the entry point. + __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); - // Restore call kind information. - __ pop(t1); - // Restore saved function. - __ pop(a1); + // Restore call kind information. + __ pop(t1); + // Restore saved function. + __ pop(a1); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down temporary frame. + } // Do a tail-call of the compiled function. __ Jump(t9); @@ -1190,19 +1209,20 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 2. Get the function to call (passed as receiver) from the stack, check // if it is a function. // a0: actual number of arguments - Label non_function; + Label slow, non_function; __ sll(at, a0, kPointerSizeLog2); __ addu(at, sp, at); __ lw(a1, MemOperand(at)); __ And(at, a1, Operand(kSmiTagMask)); __ Branch(&non_function, eq, at, Operand(zero_reg)); __ GetObjectType(a1, a2, a2); - __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE)); + __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE)); // 3a. Patch the first argument if necessary when calling a function. // a0: actual number of arguments // a1: function Label shift_arguments; + __ li(t0, Operand(0, RelocInfo::NONE)); // Indicate regular JS_FUNCTION. { Label convert_to_object, use_global_receiver, patch_receiver; // Change context eagerly in case we need the global receiver. __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); @@ -1210,13 +1230,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // Do not transform the receiver for strict mode functions. __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset)); - __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize))); - __ Branch(&shift_arguments, ne, t0, Operand(zero_reg)); + __ Branch(&shift_arguments, ne, t3, Operand(zero_reg)); // Do not transform the receiver for native (Compilerhints already in a3). - __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); - __ Branch(&shift_arguments, ne, t0, Operand(zero_reg)); + __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); + __ Branch(&shift_arguments, ne, t3, Operand(zero_reg)); // Compute the receiver in non-strict mode. // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2). @@ -1238,21 +1258,25 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); __ bind(&convert_to_object); - __ EnterInternalFrame(); // In order to preserve argument count. - __ sll(a0, a0, kSmiTagSize); // Smi tagged. - __ push(a0); - - __ push(a2); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ mov(a2, v0); - - __ pop(a0); - __ sra(a0, a0, kSmiTagSize); // Un-tag. - __ LeaveInternalFrame(); - // Restore the function to a1. + // Enter an internal frame in order to preserve argument count. + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ sll(a0, a0, kSmiTagSize); // Smi tagged. + __ push(a0); + + __ push(a2); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(a2, v0); + + __ pop(a0); + __ sra(a0, a0, kSmiTagSize); // Un-tag. + // Leave internal frame. + } + // Restore the function to a1, and the flag to t0. __ sll(at, a0, kPointerSizeLog2); __ addu(at, sp, at); __ lw(a1, MemOperand(at)); + __ li(t0, Operand(0, RelocInfo::NONE)); __ Branch(&patch_receiver); // Use the global receiver object from the called function as the @@ -1273,25 +1297,31 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ Branch(&shift_arguments); } - // 3b. Patch the first argument when calling a non-function. The + // 3b. Check for function proxy. + __ bind(&slow); + __ li(t0, Operand(1, RelocInfo::NONE)); // Indicate function proxy. + __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE)); + + __ bind(&non_function); + __ li(t0, Operand(2, RelocInfo::NONE)); // Indicate non-function. + + // 3c. Patch the first argument when calling a non-function. The // CALL_NON_FUNCTION builtin expects the non-function callee as // receiver, so overwrite the first argument which will ultimately // become the receiver. // a0: actual number of arguments // a1: function - __ bind(&non_function); - // Restore the function in case it has been modified. + // t0: call type (0: JS function, 1: function proxy, 2: non-function) __ sll(at, a0, kPointerSizeLog2); __ addu(a2, sp, at); __ sw(a1, MemOperand(a2, -kPointerSize)); - // Clear a1 to indicate a non-function being called. - __ mov(a1, zero_reg); // 4. Shift arguments and return address one slot down on the stack // (overwriting the original receiver). Adjust argument count to make // the original first argument the new receiver. // a0: actual number of arguments // a1: function + // t0: call type (0: JS function, 1: function proxy, 2: non-function) __ bind(&shift_arguments); { Label loop; // Calculate the copy start address (destination). Copy end address is sp. @@ -1309,14 +1339,26 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ Pop(); } - // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin. + // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin, + // or a function proxy via CALL_FUNCTION_PROXY. // a0: actual number of arguments // a1: function - { Label function; - __ Branch(&function, ne, a1, Operand(zero_reg)); - __ mov(a2, zero_reg); // expected arguments is 0 for CALL_NON_FUNCTION - __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION); + // t0: call type (0: JS function, 1: function proxy, 2: non-function) + { Label function, non_proxy; + __ Branch(&function, eq, t0, Operand(zero_reg)); + // Expected number of arguments is 0 for CALL_NON_FUNCTION. + __ mov(a2, zero_reg); __ SetCallKind(t1, CALL_AS_METHOD); + __ Branch(&non_proxy, ne, t0, Operand(1)); + + __ push(a1); // Re-add proxy object as additional argument. + __ Addu(a0, a0, Operand(1)); + __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY); + __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + + __ bind(&non_proxy); + __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION); __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), RelocInfo::CODE_TARGET); __ bind(&function); @@ -1350,134 +1392,161 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { const int kRecvOffset = 3 * kPointerSize; const int kFunctionOffset = 4 * kPointerSize; - __ EnterInternalFrame(); - - __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function. - __ push(a0); - __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array. - __ push(a0); - // Returns (in v0) number of arguments to copy to stack as Smi. - __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); - - // Check the stack for overflow. We are not trying need to catch - // interruptions (e.g. debug break and preemption) here, so the "real stack - // limit" is checked. - Label okay; - __ LoadRoot(a2, Heap::kRealStackLimitRootIndex); - // Make a2 the space we have left. The stack might already be overflowed - // here which will cause a2 to become negative. - __ subu(a2, sp, a2); - // Check if the arguments will overflow the stack. - __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize); - __ Branch(&okay, gt, a2, Operand(t0)); // Signed comparison. - - // Out of stack space. - __ lw(a1, MemOperand(fp, kFunctionOffset)); - __ push(a1); - __ push(v0); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); - // End of stack check. - - // Push current limit and index. - __ bind(&okay); - __ push(v0); // Limit. - __ mov(a1, zero_reg); // Initial index. - __ push(a1); - - // Change context eagerly to get the right global object if necessary. - __ lw(a0, MemOperand(fp, kFunctionOffset)); - __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset)); - // Load the shared function info while the function is still in a0. - __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset)); - - // Compute the receiver. - Label call_to_object, use_global_receiver, push_receiver; - __ lw(a0, MemOperand(fp, kRecvOffset)); - - // Do not transform the receiver for strict mode functions. - __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset)); - __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + - kSmiTagSize))); - __ Branch(&push_receiver, ne, t0, Operand(zero_reg)); - - // Do not transform the receiver for native (Compilerhints already in a2). - __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); - __ Branch(&push_receiver, ne, t0, Operand(zero_reg)); - - // Compute the receiver in non-strict mode. - __ And(t0, a0, Operand(kSmiTagMask)); - __ Branch(&call_to_object, eq, t0, Operand(zero_reg)); - __ LoadRoot(a1, Heap::kNullValueRootIndex); - __ Branch(&use_global_receiver, eq, a0, Operand(a1)); - __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); - __ Branch(&use_global_receiver, eq, a0, Operand(a2)); - - // Check if the receiver is already a JavaScript object. - // a0: receiver - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ GetObjectType(a0, a1, a1); - __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE)); - - // Convert the receiver to a regular object. - // a0: receiver - __ bind(&call_to_object); - __ push(a0); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver. - __ Branch(&push_receiver); - - // Use the current global receiver object as the receiver. - __ bind(&use_global_receiver); - const int kGlobalOffset = - Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; - __ lw(a0, FieldMemOperand(cp, kGlobalOffset)); - __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset)); - __ lw(a0, FieldMemOperand(a0, kGlobalOffset)); - __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset)); - - // Push the receiver. - // a0: receiver - __ bind(&push_receiver); - __ push(a0); - - // Copy all arguments from the array to the stack. - Label entry, loop; - __ lw(a0, MemOperand(fp, kIndexOffset)); - __ Branch(&entry); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Load the current argument from the arguments array and push it to the - // stack. - // a0: current argument index - __ bind(&loop); - __ lw(a1, MemOperand(fp, kArgsOffset)); - __ push(a1); - __ push(a0); + __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function. + __ push(a0); + __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array. + __ push(a0); + // Returns (in v0) number of arguments to copy to stack as Smi. + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + __ LoadRoot(a2, Heap::kRealStackLimitRootIndex); + // Make a2 the space we have left. The stack might already be overflowed + // here which will cause a2 to become negative. + __ subu(a2, sp, a2); + // Check if the arguments will overflow the stack. + __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize); + __ Branch(&okay, gt, a2, Operand(t3)); // Signed comparison. + + // Out of stack space. + __ lw(a1, MemOperand(fp, kFunctionOffset)); + __ push(a1); + __ push(v0); + __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + // End of stack check. + + // Push current limit and index. + __ bind(&okay); + __ push(v0); // Limit. + __ mov(a1, zero_reg); // Initial index. + __ push(a1); - // Call the runtime to access the property in the arguments array. - __ CallRuntime(Runtime::kGetProperty, 2); - __ push(v0); + // Get the receiver. + __ lw(a0, MemOperand(fp, kRecvOffset)); - // Use inline caching to access the arguments. - __ lw(a0, MemOperand(fp, kIndexOffset)); - __ Addu(a0, a0, Operand(1 << kSmiTagSize)); - __ sw(a0, MemOperand(fp, kIndexOffset)); + // Check that the function is a JS function (otherwise it must be a proxy). + Label push_receiver; + __ lw(a1, MemOperand(fp, kFunctionOffset)); + __ GetObjectType(a1, a2, a2); + __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE)); - // Test if the copy loop has finished copying all the elements from the - // arguments object. - __ bind(&entry); - __ lw(a1, MemOperand(fp, kLimitOffset)); - __ Branch(&loop, ne, a0, Operand(a1)); - // Invoke the function. - ParameterCount actual(a0); - __ sra(a0, a0, kSmiTagSize); - __ lw(a1, MemOperand(fp, kFunctionOffset)); - __ InvokeFunction(a1, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); - - // Tear down the internal frame and remove function, receiver and args. - __ LeaveInternalFrame(); - __ Addu(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); + // Change context eagerly to get the right global object if necessary. + __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + // Load the shared function info while the function is still in a1. + __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + + // Compute the receiver. + // Do not transform the receiver for strict mode functions. + Label call_to_object, use_global_receiver; + __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset)); + __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + + kSmiTagSize))); + __ Branch(&push_receiver, ne, t3, Operand(zero_reg)); + + // Do not transform the receiver for native (Compilerhints already in a2). + __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); + __ Branch(&push_receiver, ne, t3, Operand(zero_reg)); + + // Compute the receiver in non-strict mode. + __ And(t3, a0, Operand(kSmiTagMask)); + __ Branch(&call_to_object, eq, t3, Operand(zero_reg)); + __ LoadRoot(a1, Heap::kNullValueRootIndex); + __ Branch(&use_global_receiver, eq, a0, Operand(a1)); + __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + __ Branch(&use_global_receiver, eq, a0, Operand(a2)); + + // Check if the receiver is already a JavaScript object. + // a0: receiver + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ GetObjectType(a0, a1, a1); + __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE)); + + // Convert the receiver to a regular object. + // a0: receiver + __ bind(&call_to_object); + __ push(a0); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver. + __ Branch(&push_receiver); + + // Use the current global receiver object as the receiver. + __ bind(&use_global_receiver); + const int kGlobalOffset = + Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; + __ lw(a0, FieldMemOperand(cp, kGlobalOffset)); + __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset)); + __ lw(a0, FieldMemOperand(a0, kGlobalOffset)); + __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset)); + + // Push the receiver. + // a0: receiver + __ bind(&push_receiver); + __ push(a0); + + // Copy all arguments from the array to the stack. + Label entry, loop; + __ lw(a0, MemOperand(fp, kIndexOffset)); + __ Branch(&entry); + + // Load the current argument from the arguments array and push it to the + // stack. + // a0: current argument index + __ bind(&loop); + __ lw(a1, MemOperand(fp, kArgsOffset)); + __ push(a1); + __ push(a0); + + // Call the runtime to access the property in the arguments array. + __ CallRuntime(Runtime::kGetProperty, 2); + __ push(v0); + + // Use inline caching to access the arguments. + __ lw(a0, MemOperand(fp, kIndexOffset)); + __ Addu(a0, a0, Operand(1 << kSmiTagSize)); + __ sw(a0, MemOperand(fp, kIndexOffset)); + + // Test if the copy loop has finished copying all the elements from the + // arguments object. + __ bind(&entry); + __ lw(a1, MemOperand(fp, kLimitOffset)); + __ Branch(&loop, ne, a0, Operand(a1)); + + // Invoke the function. + Label call_proxy; + ParameterCount actual(a0); + __ sra(a0, a0, kSmiTagSize); + __ lw(a1, MemOperand(fp, kFunctionOffset)); + __ GetObjectType(a1, a2, a2); + __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE)); + + __ InvokeFunction(a1, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + + scope.GenerateLeaveFrame(); + + __ Ret(USE_DELAY_SLOT); + __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot. + + // Invoke the function proxy. + __ bind(&call_proxy); + __ push(a1); // Add function proxy as last argument. + __ Addu(a0, a0, Operand(1)); + __ li(a2, Operand(0, RelocInfo::NONE)); + __ SetCallKind(t1, CALL_AS_METHOD); + __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY); + __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + // Tear down the internal frame and remove function, receiver and args. + } + + __ Ret(USE_DELAY_SLOT); + __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot. } diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc index 521b8e58f..fe251b9e6 100644 --- a/deps/v8/src/mips/code-stubs-mips.cc +++ b/deps/v8/src/mips/code-stubs-mips.cc @@ -190,6 +190,71 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { } +void FastNewBlockContextStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [sp]: function. + // [sp + kPointerSize]: serialized scope info + + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + __ AllocateInNewSpace(FixedArray::SizeFor(length), + v0, a1, a2, &gc, TAG_OBJECT); + + // Load the function from the stack. + __ lw(a3, MemOperand(sp, 0)); + + // Load the serialized scope info from the stack. + __ lw(a1, MemOperand(sp, 1 * kPointerSize)); + + // Setup the object header. + __ LoadRoot(a2, Heap::kBlockContextMapRootIndex); + __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); + __ li(a2, Operand(Smi::FromInt(length))); + __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset)); + + // If this block context is nested in the global context we get a smi + // sentinel instead of a function. The block context should get the + // canonical empty function of the global context as its closure which + // we still have to look up. + Label after_sentinel; + __ JumpIfNotSmi(a3, &after_sentinel); + if (FLAG_debug_code) { + const char* message = "Expected 0 as a Smi sentinel"; + __ Assert(eq, message, a3, Operand(zero_reg)); + } + __ lw(a3, GlobalObjectOperand()); + __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset)); + __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX)); + __ bind(&after_sentinel); + + // Setup the fixed slots. + __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX)); + __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX)); + __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX)); + + // Copy the global object from the previous context. + __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX)); + __ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX)); + + // Initialize the rest of the slots to the hole value. + __ LoadRoot(a1, Heap::kTheHoleValueRootIndex); + for (int i = 0; i < slots_; i++) { + __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS)); + } + + // Remove the on-stack argument and return. + __ mov(cp, v0); + __ Addu(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); +} + + void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // Stack layout on entry: // [sp]: constant elements. @@ -615,7 +680,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, Register object, Destination destination, - FPURegister double_dst, + DoubleRegister double_dst, Register dst1, Register dst2, Register heap_number_map, @@ -651,25 +716,16 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, // Load the double value. __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); - // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). - // On MIPS a lot of things cannot be implemented the same way so right - // now it makes a lot more sense to just do things manually. - - // Save FCSR. - __ cfc1(scratch1, FCSR); - // Disable FPU exceptions. - __ ctc1(zero_reg, FCSR); - __ trunc_w_d(single_scratch, double_dst); - // Retrieve FCSR. - __ cfc1(scratch2, FCSR); - // Restore FCSR. - __ ctc1(scratch1, FCSR); - - // Check for inexact conversion or exception. - __ And(scratch2, scratch2, kFCSRFlagMask); + Register except_flag = scratch2; + __ EmitFPUTruncate(kRoundToZero, + single_scratch, + double_dst, + scratch1, + except_flag, + kCheckForInexactConversion); // Jump to not_int32 if the operation did not succeed. - __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); + __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); if (destination == kCoreRegisters) { __ Move(dst1, dst2, double_dst); @@ -706,7 +762,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3, - FPURegister double_scratch, + DoubleRegister double_scratch, Label* not_int32) { ASSERT(!dst.is(object)); ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); @@ -735,27 +791,19 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, // Load the double value. __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); - // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). - // On MIPS a lot of things cannot be implemented the same way so right - // now it makes a lot more sense to just do things manually. - - // Save FCSR. - __ cfc1(scratch1, FCSR); - // Disable FPU exceptions. - __ ctc1(zero_reg, FCSR); - __ trunc_w_d(double_scratch, double_scratch); - // Retrieve FCSR. - __ cfc1(scratch2, FCSR); - // Restore FCSR. - __ ctc1(scratch1, FCSR); - - // Check for inexact conversion or exception. - __ And(scratch2, scratch2, kFCSRFlagMask); + FPURegister single_scratch = double_scratch.low(); + Register except_flag = scratch2; + __ EmitFPUTruncate(kRoundToZero, + single_scratch, + double_scratch, + scratch1, + except_flag, + kCheckForInexactConversion); // Jump to not_int32 if the operation did not succeed. - __ Branch(not_int32, ne, scratch2, Operand(zero_reg)); + __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); // Get the result in the destination register. - __ mfc1(dst, double_scratch); + __ mfc1(dst, single_scratch); } else { // Load the double value in the destination registers. @@ -881,9 +929,11 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( __ Move(f12, a0, a1); __ Move(f14, a2, a3); } - // Call C routine that may not cause GC or other trouble. - __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()), - 4); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); + } // Store answer in the overwritable heap number. if (!IsMipsSoftFloatABI) { CpuFeatures::Scope scope(FPU); @@ -901,6 +951,35 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( } +bool WriteInt32ToHeapNumberStub::IsPregenerated() { + // These variants are compiled ahead of time. See next method. + if (the_int_.is(a1) && + the_heap_number_.is(v0) && + scratch_.is(a2) && + sign_.is(a3)) { + return true; + } + if (the_int_.is(a2) && + the_heap_number_.is(v0) && + scratch_.is(a3) && + sign_.is(a0)) { + return true; + } + // Other register combinations are generated as and when they are needed, + // so it is unsafe to call them from stubs (we can't generate a stub while + // we are generating a stub). + return false; +} + + +void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { + WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3); + WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0); + stub1.GetCode()->set_is_pregenerated(true); + stub2.GetCode()->set_is_pregenerated(true); +} + + // See comment for class, this does NOT work for int32's that are in Smi range. void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { Label max_negative_int; @@ -1258,7 +1337,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { if (!CpuFeatures::IsSupported(FPU)) { __ push(ra); - __ PrepareCallCFunction(4, t4); // Two doubles count as 4 arguments. + __ PrepareCallCFunction(0, 2, t4); if (!IsMipsSoftFloatABI) { // We are not using MIPS FPU instructions, and parameters for the runtime // function call are prepaired in a0-a3 registers, but function we are @@ -1268,19 +1347,17 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { __ Move(f12, a0, a1); __ Move(f14, a2, a3); } - __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4); + + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), + 0, 2); __ pop(ra); // Because this function returns int, result is in v0. __ Ret(); } else { CpuFeatures::Scope scope(FPU); Label equal, less_than; - __ c(EQ, D, f12, f14); - __ bc1t(&equal); - __ nop(); - - __ c(OLT, D, f12, f14); - __ bc1t(&less_than); - __ nop(); + __ BranchF(&equal, NULL, eq, f12, f14); + __ BranchF(&less_than, NULL, lt, f12, f14); // Not equal, not less, not NaN, must be greater. __ li(v0, Operand(GREATER)); @@ -1303,7 +1380,7 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, // If either operand is a JS object or an oddball value, then they are // not equal since their pointers are different. // There is no test for undetectability in strict equality. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); + STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); Label first_non_object; // Get the type of the first operand into a2 and compare it with // FIRST_SPEC_OBJECT_TYPE. @@ -1473,9 +1550,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, __ JumpIfSmi(probe, not_found); __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); - __ c(EQ, D, f12, f14); - __ bc1t(&load_result_from_cache); - __ nop(); // bc1t() requires explicit fill of branch delay slot. + __ BranchF(&load_result_from_cache, NULL, eq, f12, f14); __ Branch(not_found); } else { // Note that there is no cache check for non-FPU case, even though @@ -1591,9 +1666,7 @@ void CompareStub::Generate(MacroAssembler* masm) { __ li(t2, Operand(EQUAL)); // Check if either rhs or lhs is NaN. - __ c(UN, D, f12, f14); - __ bc1t(&nan); - __ nop(); + __ BranchF(NULL, &nan, eq, f12, f14); // Check if LESS condition is satisfied. If true, move conditionally // result to v0. @@ -1711,88 +1784,144 @@ void CompareStub::Generate(MacroAssembler* masm) { } -// The stub returns zero for false, and a non-zero value for true. +// The stub expects its argument in the tos_ register and returns its result in +// it, too: zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { // This stub uses FPU instructions. CpuFeatures::Scope scope(FPU); - Label false_result; - Label not_heap_number; - Register scratch0 = t5.is(tos_) ? t3 : t5; - - // undefined -> false - __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex); - __ Branch(&false_result, eq, tos_, Operand(scratch0)); - - // Boolean -> its value - __ LoadRoot(scratch0, Heap::kFalseValueRootIndex); - __ Branch(&false_result, eq, tos_, Operand(scratch0)); - __ LoadRoot(scratch0, Heap::kTrueValueRootIndex); - // "tos_" is a register and contains a non-zero value. Hence we implicitly - // return true if the equal condition is satisfied. - __ Ret(eq, tos_, Operand(scratch0)); - - // Smis: 0 -> false, all other -> true - __ And(scratch0, tos_, tos_); - __ Branch(&false_result, eq, scratch0, Operand(zero_reg)); - __ And(scratch0, tos_, Operand(kSmiTagMask)); - // "tos_" is a register and contains a non-zero value. Hence we implicitly - // return true if the not equal condition is satisfied. - __ Ret(eq, scratch0, Operand(zero_reg)); - - // 'null' -> false - __ LoadRoot(scratch0, Heap::kNullValueRootIndex); - __ Branch(&false_result, eq, tos_, Operand(scratch0)); - - // HeapNumber => false if +0, -0, or NaN. - __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - __ Branch(¬_heap_number, ne, scratch0, Operand(at)); - - __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset)); - __ fcmp(f12, 0.0, UEQ); - - // "tos_" is a register, and contains a non zero value by default. - // Hence we only need to overwrite "tos_" with zero to return false for - // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. - __ movt(tos_, zero_reg); - __ Ret(); + Label patch; + const Register map = t5.is(tos_) ? t3 : t5; - __ bind(¬_heap_number); - - // It can be an undetectable object. - // Undetectable => false. - __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset)); - __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable)); - __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable)); - - // JavaScript object => true. - __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); - - // "tos_" is a register and contains a non-zero value. - // Hence we implicitly return true if the greater than - // condition is satisfied. - __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); - - // Check for string. - __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); - // "tos_" is a register and contains a non-zero value. - // Hence we implicitly return true if the greater than - // condition is satisfied. - __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE)); - - // String value => false iff empty, i.e., length is zero. - __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset)); - // If length is zero, "tos_" contains zero ==> false. - // If length is not zero, "tos_" contains a non-zero value ==> true. - __ Ret(); + // undefined -> false. + CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); + + // Boolean -> its value. + CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); + CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); + + // 'null' -> false. + CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false); - // Return 0 in "tos_" for false. - __ bind(&false_result); - __ mov(tos_, zero_reg); + if (types_.Contains(SMI)) { + // Smis: 0 -> false, all other -> true + __ And(at, tos_, kSmiTagMask); + // tos_ contains the correct return value already + __ Ret(eq, at, Operand(zero_reg)); + } else if (types_.NeedsMap()) { + // If we need a map later and have a Smi -> patch. + __ JumpIfSmi(tos_, &patch); + } + + if (types_.NeedsMap()) { + __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); + + if (types_.CanBeUndetectable()) { + __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); + __ And(at, at, Operand(1 << Map::kIsUndetectable)); + // Undetectable -> false. + __ movn(tos_, zero_reg, at); + __ Ret(ne, at, Operand(zero_reg)); + } + } + + if (types_.Contains(SPEC_OBJECT)) { + // Spec object -> true. + __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); + // tos_ contains the correct non-zero return value already. + __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE)); + } + + if (types_.Contains(STRING)) { + // String value -> false iff empty. + __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); + Label skip; + __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE)); + __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset)); + __ Ret(); // the string length is OK as the return value + __ bind(&skip); + } + + if (types_.Contains(HEAP_NUMBER)) { + // Heap number -> false iff +0, -0, or NaN. + Label not_heap_number; + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + __ Branch(¬_heap_number, ne, map, Operand(at)); + Label zero_or_nan, number; + __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset)); + __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero); + // "tos_" is a register, and contains a non zero value by default. + // Hence we only need to overwrite "tos_" with zero to return false for + // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. + __ bind(&zero_or_nan); + __ mov(tos_, zero_reg); + __ bind(&number); + __ Ret(); + __ bind(¬_heap_number); + } + + __ bind(&patch); + GenerateTypeTransition(masm); +} + + +void ToBooleanStub::CheckOddball(MacroAssembler* masm, + Type type, + Heap::RootListIndex value, + bool result) { + if (types_.Contains(type)) { + // If we see an expected oddball, return its ToBoolean value tos_. + __ LoadRoot(at, value); + __ Subu(at, at, tos_); // This is a check for equality for the movz below. + // The value of a root is never NULL, so we can avoid loading a non-null + // value into tos_ when we want to return 'true'. + if (!result) { + __ movz(tos_, zero_reg, at); + } + __ Ret(eq, at, Operand(zero_reg)); + } +} + + +void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { + __ Move(a3, tos_); + __ li(a2, Operand(Smi::FromInt(tos_.code()))); + __ li(a1, Operand(Smi::FromInt(types_.ToByte()))); + __ Push(a3, a2, a1); + // Patch the caller to an appropriate specialized stub and return the + // operation result to the caller of the stub. + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()), + 3, + 1); +} + + +void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { + // We don't allow a GC during a store buffer overflow so there is no need to + // store the registers in any particular way, but we do have to store and + // restore them. + __ MultiPush(kJSCallerSaved | ra.bit()); + if (save_doubles_ == kSaveFPRegs) { + CpuFeatures::Scope scope(FPU); + __ MultiPushFPU(kCallerSavedFPU); + } + const int argument_count = 1; + const int fp_argument_count = 0; + const Register scratch = a1; + + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); + __ li(a0, Operand(ExternalReference::isolate_address())); + __ CallCFunction( + ExternalReference::store_buffer_overflow_function(masm->isolate()), + argument_count); + if (save_doubles_ == kSaveFPRegs) { + CpuFeatures::Scope scope(FPU); + __ MultiPopFPU(kCallerSavedFPU); + } + + __ MultiPop(kJSCallerSaved | ra.bit()); __ Ret(); } @@ -1951,12 +2080,13 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - __ push(a0); - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(a1, v0); - __ pop(a0); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(a0); + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mov(a1, v0); + __ pop(a0); + } __ bind(&heapnumber_allocated); __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); @@ -1998,13 +2128,14 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - __ push(v0); // Push the heap number, not the untagged int32. - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(a2, v0); // Move the new heap number into a2. - // Get the heap number into v0, now that the new heap number is in a2. - __ pop(v0); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(v0); // Push the heap number, not the untagged int32. + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ mov(a2, v0); // Move the new heap number into a2. + // Get the heap number into v0, now that the new heap number is in a2. + __ pop(v0); + } // Convert the heap number in v0 to an untagged integer in a1. // This can't go slow-case because it's the same number we already @@ -2115,6 +2246,9 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( void BinaryOpStub::Generate(MacroAssembler* masm) { + // Explicitly allow generation of nested stubs. It is safe here because + // generation code does not use any raw pointers. + AllowStubCallsScope allow_stub_calls(masm, true); switch (operands_type_) { case BinaryOpIC::UNINITIALIZED: GenerateTypeTransition(masm); @@ -2717,26 +2851,16 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Otherwise return a heap number if allowed, or jump to type // transition. - // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate). - // On MIPS a lot of things cannot be implemented the same way so right - // now it makes a lot more sense to just do things manually. - - // Save FCSR. - __ cfc1(scratch1, FCSR); - // Disable FPU exceptions. - __ ctc1(zero_reg, FCSR); - __ trunc_w_d(single_scratch, f10); - // Retrieve FCSR. - __ cfc1(scratch2, FCSR); - // Restore FCSR. - __ ctc1(scratch1, FCSR); - - // Check for inexact conversion or exception. - __ And(scratch2, scratch2, kFCSRFlagMask); + Register except_flag = scratch2; + __ EmitFPUTruncate(kRoundToZero, + single_scratch, + f10, + scratch1, + except_flag); if (result_type_ <= BinaryOpIC::INT32) { - // If scratch2 != 0, result does not fit in a 32-bit integer. - __ Branch(&transition, ne, scratch2, Operand(zero_reg)); + // If except_flag != 0, result does not fit in a 32-bit integer. + __ Branch(&transition, ne, except_flag, Operand(zero_reg)); } // Check if the result fits in a smi. @@ -2929,9 +3053,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ Ret(); } else { // Tail call that writes the int32 in a2 to the heap number in v0, using - // a3 and a1 as scratch. v0 is preserved and returned. + // a3 and a0 as scratch. v0 is preserved and returned. __ mov(a0, t1); - WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1); + WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0); __ TailCallStub(&stub); } @@ -3225,7 +3349,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ lw(t0, MemOperand(cache_entry, 0)); __ lw(t1, MemOperand(cache_entry, 4)); __ lw(t2, MemOperand(cache_entry, 8)); - __ Addu(cache_entry, cache_entry, 12); __ Branch(&calculate, ne, a2, Operand(t0)); __ Branch(&calculate, ne, a3, Operand(t1)); // Cache hit. Load result, cleanup and return. @@ -3259,13 +3382,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Register a0 holds precalculated cache entry address; preserve // it on the stack and pop it into register cache_entry after the // call. - __ push(cache_entry); + __ Push(cache_entry, a2, a3); GenerateCallCFunction(masm, scratch0); __ GetCFunctionDoubleResult(f4); // Try to update the cache. If we cannot allocate a // heap number, we return the result without updating. - __ pop(cache_entry); + __ Pop(cache_entry, a2, a3); __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update); __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset)); @@ -3283,10 +3406,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache); __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset)); - __ EnterInternalFrame(); - __ push(a0); - __ CallRuntime(RuntimeFunction(), 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(a0); + __ CallRuntime(RuntimeFunction(), 1); + } __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset)); __ Ret(); @@ -3299,14 +3423,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // We return the value in f4 without adding it to the cache, but // we cause a scavenging GC so that future allocations will succeed. - __ EnterInternalFrame(); - - // Allocate an aligned object larger than a HeapNumber. - ASSERT(4 * kPointerSize >= HeapNumber::kSize); - __ li(scratch0, Operand(4 * kPointerSize)); - __ push(scratch0); - __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Allocate an aligned object larger than a HeapNumber. + ASSERT(4 * kPointerSize >= HeapNumber::kSize); + __ li(scratch0, Operand(4 * kPointerSize)); + __ push(scratch0); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + } __ Ret(); } } @@ -3317,22 +3442,26 @@ void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, __ push(ra); __ PrepareCallCFunction(2, scratch); if (IsMipsSoftFloatABI) { - __ Move(v0, v1, f4); + __ Move(a0, a1, f4); } else { __ mov_d(f12, f4); } + AllowExternalCallThatCantCauseGC scope(masm); switch (type_) { case TranscendentalCache::SIN: __ CallCFunction( - ExternalReference::math_sin_double_function(masm->isolate()), 2); + ExternalReference::math_sin_double_function(masm->isolate()), + 0, 1); break; case TranscendentalCache::COS: __ CallCFunction( - ExternalReference::math_cos_double_function(masm->isolate()), 2); + ExternalReference::math_cos_double_function(masm->isolate()), + 0, 1); break; case TranscendentalCache::LOG: __ CallCFunction( - ExternalReference::math_log_double_function(masm->isolate()), 2); + ExternalReference::math_log_double_function(masm->isolate()), + 0, 1); break; default: UNIMPLEMENTED(); @@ -3415,12 +3544,15 @@ void MathPowStub::Generate(MacroAssembler* masm) { heapnumbermap, &call_runtime); __ push(ra); - __ PrepareCallCFunction(3, scratch); + __ PrepareCallCFunction(1, 1, scratch); __ SetCallCDoubleArguments(double_base, exponent); - __ CallCFunction( - ExternalReference::power_double_int_function(masm->isolate()), 3); - __ pop(ra); - __ GetCFunctionDoubleResult(double_result); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::power_double_int_function(masm->isolate()), 1, 1); + __ pop(ra); + __ GetCFunctionDoubleResult(double_result); + } __ sdc1(double_result, FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); __ mov(v0, heapnumber); @@ -3443,15 +3575,20 @@ void MathPowStub::Generate(MacroAssembler* masm) { heapnumbermap, &call_runtime); __ push(ra); - __ PrepareCallCFunction(4, scratch); + __ PrepareCallCFunction(0, 2, scratch); // ABI (o32) for func(double a, double b): a in f12, b in f14. ASSERT(double_base.is(f12)); ASSERT(double_exponent.is(f14)); __ SetCallCDoubleArguments(double_base, double_exponent); - __ CallCFunction( - ExternalReference::power_double_double_function(masm->isolate()), 4); - __ pop(ra); - __ GetCFunctionDoubleResult(double_result); + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::power_double_double_function(masm->isolate()), + 0, + 2); + __ pop(ra); + __ GetCFunctionDoubleResult(double_result); + } __ sdc1(double_result, FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); __ mov(v0, heapnumber); @@ -3468,6 +3605,37 @@ bool CEntryStub::NeedsImmovableCode() { } +bool CEntryStub::IsPregenerated() { + return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && + result_size_ == 1; +} + + +void CodeStub::GenerateStubsAheadOfTime() { + CEntryStub::GenerateAheadOfTime(); + WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); + RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); +} + + +void CodeStub::GenerateFPStubs() { + CEntryStub save_doubles(1, kSaveFPRegs); + Handle<Code> code = save_doubles.GetCode(); + code->set_is_pregenerated(true); + StoreBufferOverflowStub stub(kSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + code->GetIsolate()->set_fp_stubs_generated(true); +} + + +void CEntryStub::GenerateAheadOfTime() { + CEntryStub stub(1, kDontSaveFPRegs); + Handle<Code> code = stub.GetCode(); + code->set_is_pregenerated(true); +} + + void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { __ Throw(v0); } @@ -3490,16 +3658,17 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // s1: pointer to the first argument (C callee-saved) // s2: pointer to builtin function (C callee-saved) + Isolate* isolate = masm->isolate(); + if (do_gc) { // Move result passed in v0 into a0 to call PerformGC. __ mov(a0, v0); - __ PrepareCallCFunction(1, a1); - __ CallCFunction( - ExternalReference::perform_gc_function(masm->isolate()), 1); + __ PrepareCallCFunction(1, 0, a1); + __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0); } ExternalReference scope_depth = - ExternalReference::heap_always_allocate_scope_depth(masm->isolate()); + ExternalReference::heap_always_allocate_scope_depth(isolate); if (always_allocate) { __ li(a0, Operand(scope_depth)); __ lw(a1, MemOperand(a0)); @@ -3588,18 +3757,16 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, v0, Operand(reinterpret_cast<int32_t>(out_of_memory))); // Retrieve the pending exception and clear the variable. - __ li(t0, - Operand(ExternalReference::the_hole_value_location(masm->isolate()))); - __ lw(a3, MemOperand(t0)); + __ li(a3, Operand(isolate->factory()->the_hole_value())); __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - masm->isolate()))); + isolate))); __ lw(v0, MemOperand(t0)); __ sw(a3, MemOperand(t0)); // Special handling of termination exceptions which are uncatchable // by javascript code. __ Branch(throw_termination_exception, eq, - v0, Operand(masm->isolate()->factory()->termination_exception())); + v0, Operand(isolate->factory()->termination_exception())); // Handle normal exception. __ jmp(throw_normal_exception); @@ -3628,6 +3795,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { __ Subu(s1, s1, Operand(kPointerSize)); // Enter the exit frame that transitions from JavaScript to C++. + FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame(save_doubles_); // Setup argc and the builtin function in callee-saved registers. @@ -3681,6 +3849,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { Label invoke, exit; + Isolate* isolate = masm->isolate(); // Registers: // a0: entry address @@ -3699,8 +3868,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { CpuFeatures::Scope scope(FPU); // Save callee-saved FPU registers. __ MultiPushFPU(kCalleeSavedFPU); + // Set up the reserved register for 0.0. + __ Move(kDoubleRegZero, 0.0); } + // Load argv in s0 register. int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; if (CpuFeatures::IsSupported(FPU)) { @@ -3715,7 +3887,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ li(t2, Operand(Smi::FromInt(marker))); __ li(t1, Operand(Smi::FromInt(marker))); __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, - masm->isolate()))); + isolate))); __ lw(t0, MemOperand(t0)); __ Push(t3, t2, t1, t0); // Setup frame pointer for the frame to be pushed. @@ -3739,8 +3911,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // If this is the outermost JS call, set js_entry_sp value. Label non_outermost_js; - ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, - masm->isolate()); + ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); __ li(t1, Operand(ExternalReference(js_entry_sp))); __ lw(t2, MemOperand(t1)); __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg)); @@ -3763,7 +3934,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Coming in here the fp will be invalid because the PushTryHandler below // sets it to 0 to signal the existence of the JSEntry frame. __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - masm->isolate()))); + isolate))); __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0. __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); __ b(&exit); // b exposes branch delay slot. @@ -3778,11 +3949,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // saved values before returning a failure to C. // Clear any pending exceptions. - __ li(t0, - Operand(ExternalReference::the_hole_value_location(masm->isolate()))); - __ lw(t1, MemOperand(t0)); + __ li(t1, Operand(isolate->factory()->the_hole_value())); __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - masm->isolate()))); + isolate))); __ sw(t1, MemOperand(t0)); // Invoke the function by calling through JS entry trampoline builtin. @@ -3805,7 +3974,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { if (is_construct) { ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, - masm->isolate()); + isolate); __ li(t0, Operand(construct_entry)); } else { ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate()); @@ -3833,7 +4002,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Restore the top frame descriptors from the stack. __ pop(t1); __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress, - masm->isolate()))); + isolate))); __ sw(t1, MemOperand(t0)); // Reset the stack to the callee saved registers. @@ -3857,11 +4026,10 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // * object: a0 or at sp + 1 * kPointerSize. // * function: a1 or at sp. // -// Inlined call site patching is a crankshaft-specific feature that is not -// implemented on MIPS. +// An inlined call site may have been generated before calling this stub. +// In this case the offset to the inline site to patch is passed on the stack, +// in the safepoint slot for register t0. void InstanceofStub::Generate(MacroAssembler* masm) { - // This is a crankshaft-specific feature that has not been implemented yet. - ASSERT(!HasCallSiteInlineCheck()); // Call site inlining and patching implies arguments in registers. ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); // ReturnTrueFalse is only implemented for inlined call sites. @@ -3875,6 +4043,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) { const Register inline_site = t5; const Register scratch = a2; + const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize; + Label slow, loop, is_instance, is_not_instance, not_js_object; if (!HasArgsInRegisters()) { @@ -3890,10 +4060,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // real lookup and update the call site cache. if (!HasCallSiteInlineCheck()) { Label miss; - __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex); - __ Branch(&miss, ne, function, Operand(t1)); - __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex); - __ Branch(&miss, ne, map, Operand(t1)); + __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex); + __ Branch(&miss, ne, function, Operand(at)); + __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex); + __ Branch(&miss, ne, map, Operand(at)); __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); __ DropAndRet(HasArgsInRegisters() ? 0 : 2); @@ -3913,7 +4083,15 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); } else { - UNIMPLEMENTED_MIPS(); + ASSERT(HasArgsInRegisters()); + // Patch the (relocated) inlined map check. + + // The offset was stored in t0 safepoint slot. + // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) + __ LoadFromSafepointRegisterSlot(scratch, t0); + __ Subu(inline_site, ra, scratch); + // Patch the relocated value to map. + __ PatchRelocatedValue(inline_site, scratch, map); } // Register mapping: a3 is object map and t0 is function prototype. @@ -3939,7 +4117,16 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ mov(v0, zero_reg); __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); } else { - UNIMPLEMENTED_MIPS(); + // Patch the call site to return true. + __ LoadRoot(v0, Heap::kTrueValueRootIndex); + __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // Get the boolean result location in scratch and patch it. + __ PatchRelocatedValue(inline_site, scratch, v0); + + if (!ReturnTrueFalseObject()) { + ASSERT_EQ(Smi::FromInt(0), 0); + __ mov(v0, zero_reg); + } } __ DropAndRet(HasArgsInRegisters() ? 0 : 2); @@ -3948,8 +4135,17 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ li(v0, Operand(Smi::FromInt(1))); __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); } else { - UNIMPLEMENTED_MIPS(); + // Patch the call site to return false. + __ LoadRoot(v0, Heap::kFalseValueRootIndex); + __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); + // Get the boolean result location in scratch and patch it. + __ PatchRelocatedValue(inline_site, scratch, v0); + + if (!ReturnTrueFalseObject()) { + __ li(v0, Operand(Smi::FromInt(1))); + } } + __ DropAndRet(HasArgsInRegisters() ? 0 : 2); Label object_not_null, object_not_null_or_smi; @@ -3986,10 +4182,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); } else { - __ EnterInternalFrame(); - __ Push(a0, a1); - __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a0, a1); + __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); + } __ mov(a0, v0); __ LoadRoot(v0, Heap::kTrueValueRootIndex); __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg)); @@ -4411,10 +4608,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { #ifdef V8_INTERPRETED_REGEXP __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); #else // V8_INTERPRETED_REGEXP - if (!FLAG_regexp_entry_native) { - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); - return; - } // Stack frame on entry. // sp[0]: last_match_info (expected JSArray) @@ -4427,6 +4620,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { static const int kSubjectOffset = 2 * kPointerSize; static const int kJSRegExpOffset = 3 * kPointerSize; + Isolate* isolate = masm->isolate(); + Label runtime, invoke_regexp; // Allocation of registers for this function. These are in callee save @@ -4442,9 +4637,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Ensure that a RegExp stack is allocated. ExternalReference address_of_regexp_stack_memory_address = ExternalReference::address_of_regexp_stack_memory_address( - masm->isolate()); + isolate); ExternalReference address_of_regexp_stack_memory_size = - ExternalReference::address_of_regexp_stack_memory_size(masm->isolate()); + ExternalReference::address_of_regexp_stack_memory_size(isolate); __ li(a0, Operand(address_of_regexp_stack_memory_size)); __ lw(a0, MemOperand(a0, 0)); __ Branch(&runtime, eq, a0, Operand(zero_reg)); @@ -4525,7 +4720,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { FieldMemOperand(a0, JSArray::kElementsOffset)); __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); __ Branch(&runtime, ne, a0, Operand( - masm->isolate()->factory()->fixed_array_map())); + isolate->factory()->fixed_array_map())); // Check that the last match info has space for the capture registers and the // additional information. __ lw(a0, @@ -4616,7 +4811,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // subject: Subject string // regexp_data: RegExp data (FixedArray) // All checks done. Now push arguments for native regexp code. - __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(), + __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, a0, a2); // Isolates: note we add an additional parameter here (isolate pointer). @@ -4656,13 +4851,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Argument 5: static offsets vector buffer. __ li(a0, Operand( - ExternalReference::address_of_static_offsets_vector(masm->isolate()))); + ExternalReference::address_of_static_offsets_vector(isolate))); __ sw(a0, MemOperand(sp, 1 * kPointerSize)); // For arguments 4 and 3 get string length, calculate start of string data // and calculate the shift of the index (0 for ASCII and 1 for two byte). - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); - __ Addu(t2, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte. // Load the length from the original subject string from the previous stack // frame. Therefore we have to use fp, which points exactly to two pointer @@ -4715,11 +4909,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // stack overflow (on the backtrack stack) was detected in RegExp code but // haven't created the exception yet. Handle that in the runtime system. // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ li(a1, Operand( - ExternalReference::the_hole_value_location(masm->isolate()))); - __ lw(a1, MemOperand(a1, 0)); + __ li(a1, Operand(isolate->factory()->the_hole_value())); __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, - masm->isolate()))); + isolate))); __ lw(v0, MemOperand(a2, 0)); __ Branch(&runtime, eq, v0, Operand(a1)); @@ -4737,7 +4929,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ bind(&failure); // For failure and exception return null. - __ li(v0, Operand(masm->isolate()->factory()->null_value())); + __ li(v0, Operand(isolate->factory()->null_value())); __ Addu(sp, sp, Operand(4 * kPointerSize)); __ Ret(); @@ -4757,20 +4949,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ sw(a2, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastCaptureCountOffset)); // Store last subject and last input. - __ mov(a3, last_match_info_elements); // Moved up to reduce latency. __ sw(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastSubjectOffset)); - __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0); + __ mov(a2, subject); + __ RecordWriteField(last_match_info_elements, + RegExpImpl::kLastSubjectOffset, + a2, + t3, + kRAHasNotBeenSaved, + kDontSaveFPRegs); __ sw(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastInputOffset)); - __ mov(a3, last_match_info_elements); - __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0); + __ RecordWriteField(last_match_info_elements, + RegExpImpl::kLastInputOffset, + subject, + t3, + kRAHasNotBeenSaved, + kDontSaveFPRegs); // Get the static offsets vector filled by the native regexp code. ExternalReference address_of_static_offsets_vector = - ExternalReference::address_of_static_offsets_vector(masm->isolate()); + ExternalReference::address_of_static_offsets_vector(isolate); __ li(a2, Operand(address_of_static_offsets_vector)); // a1: number of capture registers @@ -4895,8 +5096,24 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { } +void CallFunctionStub::FinishCode(Code* code) { + code->set_has_function_cache(false); +} + + +void CallFunctionStub::Clear(Heap* heap, Address address) { + UNREACHABLE(); +} + + +Object* CallFunctionStub::GetCachedValue(Address address) { + UNREACHABLE(); + return NULL; +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { - Label slow; + Label slow, non_function; // The receiver might implicitly be the global object. This is // indicated by passing the hole as the receiver to the call @@ -4922,7 +5139,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // Check that the function is really a JavaScript function. // a1: pushed function (to be verified) - __ JumpIfSmi(a1, &slow); + __ JumpIfSmi(a1, &non_function); // Get the map of the function object. __ GetObjectType(a1, a2, a2); __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE)); @@ -4950,8 +5167,22 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // Slow-case: Non-function called. __ bind(&slow); + // Check for function proxy. + __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE)); + __ push(a1); // Put proxy as additional argument. + __ li(a0, Operand(argc_ + 1, RelocInfo::NONE)); + __ li(a2, Operand(0, RelocInfo::NONE)); + __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY); + __ SetCallKind(t1, CALL_AS_FUNCTION); + { + Handle<Code> adaptor = + masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); + __ Jump(adaptor, RelocInfo::CODE_TARGET); + } + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead // of the original receiver from the call site). + __ bind(&non_function); __ sw(a1, MemOperand(sp, argc_ * kPointerSize)); __ li(a0, Operand(argc_)); // Setup the number of arguments. __ mov(a2, zero_reg); @@ -5057,24 +5288,27 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { __ Branch(&call_runtime_, ne, result_, Operand(t0)); // Get the first of the two strings and load its instance type. - __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); + __ lw(result_, FieldMemOperand(object_, ConsString::kFirstOffset)); __ jmp(&assure_seq_string); // SlicedString, unpack and add offset. __ bind(&sliced_string); __ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset)); __ addu(scratch_, scratch_, result_); - __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset)); + __ lw(result_, FieldMemOperand(object_, SlicedString::kParentOffset)); // Assure that we are dealing with a sequential string. Go to runtime if not. __ bind(&assure_seq_string); - __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); + __ lw(result_, FieldMemOperand(result_, HeapObject::kMapOffset)); __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); // Check that parent is not an external string. Go to runtime otherwise. STATIC_ASSERT(kSeqStringTag == 0); __ And(t0, result_, Operand(kStringRepresentationMask)); __ Branch(&call_runtime_, ne, t0, Operand(zero_reg)); + // Actually fetch the parent string if it is confirmed to be sequential. + STATIC_ASSERT(SlicedString::kParentOffset == ConsString::kFirstOffset); + __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset)); // Check for 1-byte or 2-byte string. __ bind(&flat_string); @@ -6463,39 +6697,25 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { __ Subu(a2, a0, Operand(kHeapObjectTag)); __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); - Label fpu_eq, fpu_lt, fpu_gt; - // Compare operands (test if unordered). - __ c(UN, D, f0, f2); - // Don't base result on status bits when a NaN is involved. - __ bc1t(&unordered); - __ nop(); + // Return a result of -1, 0, or 1, or use CompareStub for NaNs. + Label fpu_eq, fpu_lt; + // Test if equal, and also handle the unordered/NaN case. + __ BranchF(&fpu_eq, &unordered, eq, f0, f2); - // Test if equal. - __ c(EQ, D, f0, f2); - __ bc1t(&fpu_eq); - __ nop(); + // Test if less (unordered case is already handled). + __ BranchF(&fpu_lt, NULL, lt, f0, f2); - // Test if unordered or less (unordered case is already handled). - __ c(ULT, D, f0, f2); - __ bc1t(&fpu_lt); - __ nop(); + // Otherwise it's greater, so just fall thru, and return. + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(GREATER)); // In delay slot. - // Otherwise it's greater. - __ bc1f(&fpu_gt); - __ nop(); - - // Return a result of -1, 0, or 1. __ bind(&fpu_eq); - __ li(v0, Operand(EQUAL)); - __ Ret(); + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(EQUAL)); // In delay slot. __ bind(&fpu_lt); - __ li(v0, Operand(LESS)); - __ Ret(); - - __ bind(&fpu_gt); - __ li(v0, Operand(GREATER)); - __ Ret(); + __ Ret(USE_DELAY_SLOT); + __ li(v0, Operand(LESS)); // In delay slot. __ bind(&unordered); } @@ -6646,12 +6866,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { // Call the runtime system in a fresh internal frame. ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); - __ EnterInternalFrame(); - __ Push(a1, a0); - __ li(t0, Operand(Smi::FromInt(op_))); - __ push(t0); - __ CallExternalReference(miss, 3); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(a1, a0); + __ li(t0, Operand(Smi::FromInt(op_))); + __ push(t0); + __ CallExternalReference(miss, 3); + } // Compute the entry point of the rewritten stub. __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Restore registers. @@ -6867,6 +7088,8 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. // Registers: // result: StringDictionary to probe // a1: key @@ -6960,6 +7183,269 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { } +struct AheadOfTimeWriteBarrierStubList { + Register object, value, address; + RememberedSetAction action; +}; + + +struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { + // Used in RegExpExecStub. + { s2, s0, t3, EMIT_REMEMBERED_SET }, + { s2, a2, t3, EMIT_REMEMBERED_SET }, + // Used in CompileArrayPushCall. + // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. + // Also used in KeyedStoreIC::GenerateGeneric. + { a3, t0, t1, EMIT_REMEMBERED_SET }, + // Used in CompileStoreGlobal. + { t0, a1, a2, OMIT_REMEMBERED_SET }, + // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. + { a1, a2, a3, EMIT_REMEMBERED_SET }, + { a3, a2, a1, EMIT_REMEMBERED_SET }, + // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. + { a2, a1, a3, EMIT_REMEMBERED_SET }, + { a3, a1, a2, EMIT_REMEMBERED_SET }, + // KeyedStoreStubCompiler::GenerateStoreFastElement. + { t0, a2, a3, EMIT_REMEMBERED_SET }, + // Null termination. + { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET} +}; + + +bool RecordWriteStub::IsPregenerated() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + if (object_.is(entry->object) && + value_.is(entry->value) && + address_.is(entry->address) && + remembered_set_action_ == entry->action && + save_fp_regs_mode_ == kDontSaveFPRegs) { + return true; + } + } + return false; +} + + +bool StoreBufferOverflowStub::IsPregenerated() { + return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated(); +} + + +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { + StoreBufferOverflowStub stub1(kDontSaveFPRegs); + stub1.GetCode()->set_is_pregenerated(true); +} + + +void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + RecordWriteStub stub(entry->object, + entry->value, + entry->address, + entry->action, + kDontSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + } +} + + +// Takes the input in 3 registers: address_ value_ and object_. A pointer to +// the value has just been written into the object, now this stub makes sure +// we keep the GC informed. The word in the object where the value has been +// written is in the address register. +void RecordWriteStub::Generate(MacroAssembler* masm) { + Label skip_to_incremental_noncompacting; + Label skip_to_incremental_compacting; + + // The first two branch+nop instructions are generated with labels so as to + // get the offset fixed up correctly by the bind(Label*) call. We patch it + // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this + // position) and the "beq zero_reg, zero_reg, ..." when we start and stop + // incremental heap marking. + // See RecordWriteStub::Patch for details. + __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting); + __ nop(); + __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting); + __ nop(); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } + __ Ret(); + + __ bind(&skip_to_incremental_noncompacting); + GenerateIncremental(masm, INCREMENTAL); + + __ bind(&skip_to_incremental_compacting); + GenerateIncremental(masm, INCREMENTAL_COMPACTION); + + // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. + // Will be checked in IncrementalMarking::ActivateGeneratedStub. + + PatchBranchIntoNop(masm, 0); + PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize); +} + + +void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { + regs_.Save(masm); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + Label dont_need_remembered_set; + + __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0)); + __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. + regs_.scratch0(), + &dont_need_remembered_set); + + __ CheckPageFlag(regs_.object(), + regs_.scratch0(), + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + ne, + &dont_need_remembered_set); + + // First notify the incremental marker if necessary, then update the + // remembered set. + CheckNeedsToInformIncrementalMarker( + masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + + __ bind(&dont_need_remembered_set); + } + + CheckNeedsToInformIncrementalMarker( + masm, kReturnOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ Ret(); +} + + +void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { + regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); + int argument_count = 3; + __ PrepareCallCFunction(argument_count, regs_.scratch0()); + Register address = + a0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); + ASSERT(!address.is(regs_.object())); + ASSERT(!address.is(a0)); + __ Move(address, regs_.address()); + __ Move(a0, regs_.object()); + if (mode == INCREMENTAL_COMPACTION) { + __ Move(a1, address); + } else { + ASSERT(mode == INCREMENTAL); + __ lw(a1, MemOperand(address, 0)); + } + __ li(a2, Operand(ExternalReference::isolate_address())); + + AllowExternalCallThatCantCauseGC scope(masm); + if (mode == INCREMENTAL_COMPACTION) { + __ CallCFunction( + ExternalReference::incremental_evacuation_record_write_function( + masm->isolate()), + argument_count); + } else { + ASSERT(mode == INCREMENTAL); + __ CallCFunction( + ExternalReference::incremental_marking_record_write_function( + masm->isolate()), + argument_count); + } + regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); +} + + +void RecordWriteStub::CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode) { + Label on_black; + Label need_incremental; + Label need_incremental_pop_scratch; + + // Let's look at the color of the object: If it is not black we don't have + // to inform the incremental marker. + __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ Ret(); + } + + __ bind(&on_black); + + // Get the value from the slot. + __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0)); + + if (mode == INCREMENTAL_COMPACTION) { + Label ensure_not_white; + + __ CheckPageFlag(regs_.scratch0(), // Contains value. + regs_.scratch1(), // Scratch. + MemoryChunk::kEvacuationCandidateMask, + eq, + &ensure_not_white); + + __ CheckPageFlag(regs_.object(), + regs_.scratch1(), // Scratch. + MemoryChunk::kSkipEvacuationSlotsRecordingMask, + eq, + &need_incremental); + + __ bind(&ensure_not_white); + } + + // We need extra registers for this, so we push the object and the address + // register temporarily. + __ Push(regs_.object(), regs_.address()); + __ EnsureNotWhite(regs_.scratch0(), // The value. + regs_.scratch1(), // Scratch. + regs_.object(), // Scratch. + regs_.address(), // Scratch. + &need_incremental_pop_scratch); + __ Pop(regs_.object(), regs_.address()); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ Ret(); + } + + __ bind(&need_incremental_pop_scratch); + __ Pop(regs_.object(), regs_.address()); + + __ bind(&need_incremental); + + // Fall through when we need to inform the incremental marker. +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h index aa224bcfa..ef6b88908 100644 --- a/deps/v8/src/mips/code-stubs-mips.h +++ b/deps/v8/src/mips/code-stubs-mips.h @@ -59,6 +59,25 @@ class TranscendentalCacheStub: public CodeStub { }; +class StoreBufferOverflowStub: public CodeStub { + public: + explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) + : save_doubles_(save_fp) { } + + void Generate(MacroAssembler* masm); + + virtual bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + private: + SaveFPRegsMode save_doubles_; + + Major MajorKey() { return StoreBufferOverflow; } + int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } +}; + + class UnaryOpStub: public CodeStub { public: UnaryOpStub(Token::Value op, @@ -324,7 +343,15 @@ class WriteInt32ToHeapNumberStub : public CodeStub { : the_int_(the_int), the_heap_number_(the_heap_number), scratch_(scratch), - sign_(scratch2) { } + sign_(scratch2) { + ASSERT(IntRegisterBits::is_valid(the_int_.code())); + ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.code())); + ASSERT(ScratchRegisterBits::is_valid(scratch_.code())); + ASSERT(SignRegisterBits::is_valid(sign_.code())); + } + + bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); private: Register the_int_; @@ -336,13 +363,15 @@ class WriteInt32ToHeapNumberStub : public CodeStub { class IntRegisterBits: public BitField<int, 0, 4> {}; class HeapNumberRegisterBits: public BitField<int, 4, 4> {}; class ScratchRegisterBits: public BitField<int, 8, 4> {}; + class SignRegisterBits: public BitField<int, 12, 4> {}; Major MajorKey() { return WriteInt32ToHeapNumber; } int MinorKey() { // Encode the parameters in a unique 16 bit value. return IntRegisterBits::encode(the_int_.code()) | HeapNumberRegisterBits::encode(the_heap_number_.code()) - | ScratchRegisterBits::encode(scratch_.code()); + | ScratchRegisterBits::encode(scratch_.code()) + | SignRegisterBits::encode(sign_.code()); } void Generate(MacroAssembler* masm); @@ -375,6 +404,215 @@ class NumberToStringStub: public CodeStub { }; +class RecordWriteStub: public CodeStub { + public: + RecordWriteStub(Register object, + Register value, + Register address, + RememberedSetAction remembered_set_action, + SaveFPRegsMode fp_mode) + : object_(object), + value_(value), + address_(address), + remembered_set_action_(remembered_set_action), + save_fp_regs_mode_(fp_mode), + regs_(object, // An input reg. + address, // An input reg. + value) { // One scratch reg. + } + + enum Mode { + STORE_BUFFER_ONLY, + INCREMENTAL, + INCREMENTAL_COMPACTION + }; + + virtual bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { + const unsigned offset = masm->instr_at(pos) & kImm16Mask; + masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) | + (zero_reg.code() << kRtShift) | (offset & kImm16Mask)); + ASSERT(Assembler::IsBne(masm->instr_at(pos))); + } + + static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { + const unsigned offset = masm->instr_at(pos) & kImm16Mask; + masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) | + (zero_reg.code() << kRtShift) | (offset & kImm16Mask)); + ASSERT(Assembler::IsBeq(masm->instr_at(pos))); + } + + static Mode GetMode(Code* stub) { + Instr first_instruction = Assembler::instr_at(stub->instruction_start()); + Instr second_instruction = Assembler::instr_at(stub->instruction_start() + + 2 * Assembler::kInstrSize); + + if (Assembler::IsBeq(first_instruction)) { + return INCREMENTAL; + } + + ASSERT(Assembler::IsBne(first_instruction)); + + if (Assembler::IsBeq(second_instruction)) { + return INCREMENTAL_COMPACTION; + } + + ASSERT(Assembler::IsBne(second_instruction)); + + return STORE_BUFFER_ONLY; + } + + static void Patch(Code* stub, Mode mode) { + MacroAssembler masm(NULL, + stub->instruction_start(), + stub->instruction_size()); + switch (mode) { + case STORE_BUFFER_ONLY: + ASSERT(GetMode(stub) == INCREMENTAL || + GetMode(stub) == INCREMENTAL_COMPACTION); + PatchBranchIntoNop(&masm, 0); + PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize); + break; + case INCREMENTAL: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + PatchNopIntoBranch(&masm, 0); + break; + case INCREMENTAL_COMPACTION: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize); + break; + } + ASSERT(GetMode(stub) == mode); + CPU::FlushICache(stub->instruction_start(), 4 * Assembler::kInstrSize); + } + + private: + // This is a helper class for freeing up 3 scratch registers. The input is + // two registers that must be preserved and one scratch register provided by + // the caller. + class RegisterAllocation { + public: + RegisterAllocation(Register object, + Register address, + Register scratch0) + : object_(object), + address_(address), + scratch0_(scratch0) { + ASSERT(!AreAliased(scratch0, object, address, no_reg)); + scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_); + } + + void Save(MacroAssembler* masm) { + ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); + // We don't have to save scratch0_ because it was given to us as + // a scratch register. + masm->push(scratch1_); + } + + void Restore(MacroAssembler* masm) { + masm->pop(scratch1_); + } + + // If we have to call into C then we need to save and restore all caller- + // saved registers that were not already preserved. The scratch registers + // will be restored by other means so we don't bother pushing them here. + void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { + masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit()); + if (mode == kSaveFPRegs) { + CpuFeatures::Scope scope(FPU); + masm->MultiPushFPU(kCallerSavedFPU); + } + } + + inline void RestoreCallerSaveRegisters(MacroAssembler*masm, + SaveFPRegsMode mode) { + if (mode == kSaveFPRegs) { + CpuFeatures::Scope scope(FPU); + masm->MultiPopFPU(kCallerSavedFPU); + } + masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit()); + } + + inline Register object() { return object_; } + inline Register address() { return address_; } + inline Register scratch0() { return scratch0_; } + inline Register scratch1() { return scratch1_; } + + private: + Register object_; + Register address_; + Register scratch0_; + Register scratch1_; + + Register GetRegThatIsNotOneOf(Register r1, + Register r2, + Register r3) { + for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { + Register candidate = Register::FromAllocationIndex(i); + if (candidate.is(r1)) continue; + if (candidate.is(r2)) continue; + if (candidate.is(r3)) continue; + return candidate; + } + UNREACHABLE(); + return no_reg; + } + friend class RecordWriteStub; + }; + + enum OnNoNeedToInformIncrementalMarker { + kReturnOnNoNeedToInformIncrementalMarker, + kUpdateRememberedSetOnNoNeedToInformIncrementalMarker + }; + + void Generate(MacroAssembler* masm); + void GenerateIncremental(MacroAssembler* masm, Mode mode); + void CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode); + void InformIncrementalMarker(MacroAssembler* masm, Mode mode); + + Major MajorKey() { return RecordWrite; } + + int MinorKey() { + return ObjectBits::encode(object_.code()) | + ValueBits::encode(value_.code()) | + AddressBits::encode(address_.code()) | + RememberedSetActionBits::encode(remembered_set_action_) | + SaveFPRegsModeBits::encode(save_fp_regs_mode_); + } + + bool MustBeInStubCache() { + // All stubs must be registered in the stub cache + // otherwise IncrementalMarker would not be able to find + // and patch it. + return true; + } + + void Activate(Code* code) { + code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); + } + + class ObjectBits: public BitField<int, 0, 5> {}; + class ValueBits: public BitField<int, 5, 5> {}; + class AddressBits: public BitField<int, 10, 5> {}; + class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {}; + class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {}; + + Register object_; + Register value_; + Register address_; + RememberedSetAction remembered_set_action_; + SaveFPRegsMode save_fp_regs_mode_; + Label slow_; + RegisterAllocation regs_; +}; + + // Enter C code from generated RegExp code in a way that allows // the C code to fix the return address in case of a GC. // Currently only needed on ARM and MIPS. @@ -578,6 +816,8 @@ class StringDictionaryLookupStub: public CodeStub { Register r0, Register r1); + virtual bool SometimesSetsUpAFrame() { return false; } + private: static const int kInlinedProbes = 4; static const int kTotalProbes = 20; @@ -590,7 +830,7 @@ class StringDictionaryLookupStub: public CodeStub { StringDictionary::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return StringDictionaryNegativeLookup; } + Major MajorKey() { return StringDictionaryLookup; } int MinorKey() { return LookupModeBits::encode(mode_); diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc index 4400b643a..ff146dd4e 100644 --- a/deps/v8/src/mips/codegen-mips.cc +++ b/deps/v8/src/mips/codegen-mips.cc @@ -38,12 +38,16 @@ namespace internal { // Platform-specific RuntimeCallHelper functions. void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { - masm->EnterInternalFrame(); + masm->EnterFrame(StackFrame::INTERNAL); + ASSERT(!masm->has_frame()); + masm->set_has_frame(true); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { - masm->LeaveInternalFrame(); + masm->LeaveFrame(StackFrame::INTERNAL); + ASSERT(masm->has_frame()); + masm->set_has_frame(false); } diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h index a8de9c861..b020d8057 100644 --- a/deps/v8/src/mips/codegen-mips.h +++ b/deps/v8/src/mips/codegen-mips.h @@ -71,21 +71,6 @@ class CodeGenerator: public AstVisitor { int pos, bool right_here = false); - // Constants related to patching of inlined load/store. - static int GetInlinedKeyedLoadInstructionsAfterPatch() { - // This is in correlation with the padding in MacroAssembler::Abort. - return FLAG_debug_code ? 45 : 20; - } - - static const int kInlinedKeyedStoreInstructionsAfterPatch = 13; - - static int GetInlinedNamedStoreInstructionsAfterPatch() { - ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1); - // Magic number 5: instruction count after patched map load: - // li: 2 (liu & ori), Branch : 2 (bne & nop), sw : 1 - return Isolate::Current()->inlined_write_barrier_size() + 5; - } - private: DISALLOW_COPY_AND_ASSIGN(CodeGenerator); }; diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc index e323c505e..5b3ae89db 100644 --- a/deps/v8/src/mips/debug-mips.cc +++ b/deps/v8/src/mips/debug-mips.cc @@ -124,55 +124,58 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() { static void Generate_DebugBreakCallHelper(MacroAssembler* masm, RegList object_regs, RegList non_object_regs) { - __ EnterInternalFrame(); - - // Store the registers containing live values on the expression stack to - // make sure that these are correctly updated during GC. Non object values - // are stored as a smi causing it to be untouched by GC. - ASSERT((object_regs & ~kJSCallerSaved) == 0); - ASSERT((non_object_regs & ~kJSCallerSaved) == 0); - ASSERT((object_regs & non_object_regs) == 0); - if ((object_regs | non_object_regs) != 0) { - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if ((non_object_regs & (1 << r)) != 0) { - if (FLAG_debug_code) { - __ And(at, reg, 0xc0000000); - __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg)); + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Store the registers containing live values on the expression stack to + // make sure that these are correctly updated during GC. Non object values + // are stored as a smi causing it to be untouched by GC. + ASSERT((object_regs & ~kJSCallerSaved) == 0); + ASSERT((non_object_regs & ~kJSCallerSaved) == 0); + ASSERT((object_regs & non_object_regs) == 0); + if ((object_regs | non_object_regs) != 0) { + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((non_object_regs & (1 << r)) != 0) { + if (FLAG_debug_code) { + __ And(at, reg, 0xc0000000); + __ Assert( + eq, "Unable to encode value as smi", at, Operand(zero_reg)); + } + __ sll(reg, reg, kSmiTagSize); } - __ sll(reg, reg, kSmiTagSize); } + __ MultiPush(object_regs | non_object_regs); } - __ MultiPush(object_regs | non_object_regs); - } #ifdef DEBUG - __ RecordComment("// Calling from debug break to runtime - come in - over"); + __ RecordComment("// Calling from debug break to runtime - come in - over"); #endif - __ mov(a0, zero_reg); // No arguments. - __ li(a1, Operand(ExternalReference::debug_break(masm->isolate()))); - - CEntryStub ceb(1); - __ CallStub(&ceb); - - // Restore the register values from the expression stack. - if ((object_regs | non_object_regs) != 0) { - __ MultiPop(object_regs | non_object_regs); - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if ((non_object_regs & (1 << r)) != 0) { - __ srl(reg, reg, kSmiTagSize); - } - if (FLAG_debug_code && - (((object_regs |non_object_regs) & (1 << r)) == 0)) { - __ li(reg, kDebugZapValue); + __ mov(a0, zero_reg); // No arguments. + __ li(a1, Operand(ExternalReference::debug_break(masm->isolate()))); + + CEntryStub ceb(1); + __ CallStub(&ceb); + + // Restore the register values from the expression stack. + if ((object_regs | non_object_regs) != 0) { + __ MultiPop(object_regs | non_object_regs); + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((non_object_regs & (1 << r)) != 0) { + __ srl(reg, reg, kSmiTagSize); + } + if (FLAG_debug_code && + (((object_regs |non_object_regs) & (1 << r)) == 0)) { + __ li(reg, kDebugZapValue); + } } } - } - __ LeaveInternalFrame(); + // Leave the internal frame. + } // Now that the break point has been handled, resume normal execution by // jumping to the target address intended by the caller and that was diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc index 18b623199..280b8cb54 100644 --- a/deps/v8/src/mips/deoptimizer-mips.cc +++ b/deps/v8/src/mips/deoptimizer-mips.cc @@ -53,7 +53,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } -void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, +void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, + Address pc_after, Code* check_code, Code* replacement_code) { UNIMPLEMENTED(); diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h index 2c838938b..a2ebce682 100644 --- a/deps/v8/src/mips/frames-mips.h +++ b/deps/v8/src/mips/frames-mips.h @@ -85,6 +85,20 @@ static const RegList kCalleeSavedFPU = 1 << 30; // f30 static const int kNumCalleeSavedFPU = 6; + +static const RegList kCallerSavedFPU = + 1 << 0 | // f0 + 1 << 2 | // f2 + 1 << 4 | // f4 + 1 << 6 | // f6 + 1 << 8 | // f8 + 1 << 10 | // f10 + 1 << 12 | // f12 + 1 << 14 | // f14 + 1 << 16 | // f16 + 1 << 18; // f18 + + // Number of registers for which space is reserved in safepoints. Must be a // multiple of 8. static const int kNumSafepointRegisters = 24; diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc index b042a3eca..b3f054087 100644 --- a/deps/v8/src/mips/full-codegen-mips.cc +++ b/deps/v8/src/mips/full-codegen-mips.cc @@ -47,6 +47,7 @@ #include "stub-cache.h" #include "mips/code-stubs-mips.h" +#include "mips/macro-assembler-mips.h" namespace v8 { namespace internal { @@ -62,9 +63,11 @@ static unsigned GetPropertyId(Property* property) { // A patch site is a location in the code which it is possible to patch. This // class has a number of methods to emit the code which is patchable and the // method EmitPatchInfo to record a marker back to the patchable code. This -// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16 -// bit immediate value is used) is the delta from the pc to the first +// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy +// (raw 16 bit immediate value is used) is the delta from the pc to the first // instruction of the patchable code. +// The marker instruction is effectively a NOP (dest is zero_reg) and will +// never be emitted by normal code. class JumpPatchSite BASE_EMBEDDED { public: explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) { @@ -103,7 +106,7 @@ class JumpPatchSite BASE_EMBEDDED { if (patch_site_.is_bound()) { int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_); Register reg = Register::from_code(delta_to_patch_site / kImm16Mask); - __ andi(at, reg, delta_to_patch_site % kImm16Mask); + __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask); #ifdef DEBUG info_emitted_ = true; #endif @@ -162,6 +165,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { __ bind(&ok); } + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + FrameScope frame_scope(masm_, StackFrame::MANUAL); + int locals_count = info->scope()->num_stack_slots(); __ Push(ra, fp, cp, a1); @@ -207,14 +215,12 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // Load parameter from stack. __ lw(a0, MemOperand(fp, parameter_offset)); // Store it in the context. - __ li(a1, Operand(Context::SlotOffset(var->index()))); - __ addu(a2, cp, a1); - __ sw(a0, MemOperand(a2, 0)); - // Update the write barrier. This clobbers all involved - // registers, so we have to use two more registers to avoid - // clobbering cp. - __ mov(a2, cp); - __ RecordWrite(a2, a1, a3); + MemOperand target = ContextOperand(cp, var->index()); + __ sw(a0, target); + + // Update the write barrier. + __ RecordWriteContextSlot( + cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs); } } } @@ -272,7 +278,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { int ignored = 0; - EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored); + EmitDeclaration(scope()->function(), CONST, NULL, &ignored); } VisitDeclarations(scope()->declarations()); } @@ -310,17 +316,25 @@ void FullCodeGenerator::ClearAccumulator() { void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) { + // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need + // to make sure it is constant. Branch may emit a skip-or-jump sequence + // instead of the normal Branch. It seems that the "skip" part of that + // sequence is about as long as this Branch would be so it is safe to ignore + // that. + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); Comment cmnt(masm_, "[ Stack check"); Label ok; __ LoadRoot(t0, Heap::kStackLimitRootIndex); - __ Branch(&ok, hs, sp, Operand(t0)); + __ sltu(at, sp, t0); + __ beq(at, zero_reg, &ok); + // CallStub will emit a li t9, ... first, so it is safe to use the delay slot. StackCheckStub stub; + __ CallStub(&stub); // Record a mapping of this PC offset to the OSR id. This is used to find // the AST id from the unoptimized code in order to use it as a key into // the deoptimization input data found in the optimized code. RecordStackCheck(stmt->OsrEntryId()); - __ CallStub(&stub); __ bind(&ok); PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); // Record a mapping of the OSR id to this PC. This is used if the OSR @@ -670,10 +684,12 @@ void FullCodeGenerator::SetVar(Variable* var, __ sw(src, location); // Emit the write barrier code if the location is in the heap. if (var->IsContextSlot()) { - __ RecordWrite(scratch0, - Operand(Context::SlotOffset(var->index())), - scratch1, - src); + __ RecordWriteContextSlot(scratch0, + location.offset(), + src, + scratch1, + kRAHasBeenSaved, + kDontSaveFPRegs); } } @@ -705,7 +721,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state, void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, - Variable::Mode mode, + VariableMode mode, FunctionLiteral* function, int* global_count) { // If it was not possible to allocate the variable at compile time, we @@ -723,7 +739,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, Comment cmnt(masm_, "[ Declaration"); VisitForAccumulatorValue(function); __ sw(result_register(), StackOperand(variable)); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { Comment cmnt(masm_, "[ Declaration"); __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); __ sw(t0, StackOperand(variable)); @@ -750,10 +766,16 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, __ sw(result_register(), ContextOperand(cp, variable->index())); int offset = Context::SlotOffset(variable->index()); // We know that we have written a function, which is not a smi. - __ mov(a1, cp); - __ RecordWrite(a1, Operand(offset), a2, result_register()); + __ RecordWriteContextSlot(cp, + offset, + result_register(), + a2, + kRAHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); PrepareForBailoutForId(proxy->id(), NO_REGISTERS); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { Comment cmnt(masm_, "[ Declaration"); __ LoadRoot(at, Heap::kTheHoleValueRootIndex); __ sw(at, ContextOperand(cp, variable->index())); @@ -766,10 +788,8 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, Comment cmnt(masm_, "[ Declaration"); __ li(a2, Operand(variable->name())); // Declaration nodes are always introduced in one of three modes. - ASSERT(mode == Variable::VAR || - mode == Variable::CONST || - mode == Variable::LET); - PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE; + ASSERT(mode == VAR || mode == CONST || mode == LET); + PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE; __ li(a1, Operand(Smi::FromInt(attr))); // Push initial value, if any. // Note: For variables we must not push an initial value (such as @@ -779,7 +799,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, __ Push(cp, a2, a1); // Push initial value for function declaration. VisitForStackValue(function); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { __ LoadRoot(a0, Heap::kTheHoleValueRootIndex); __ Push(cp, a2, a1, a0); } else { @@ -1201,17 +1221,25 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, // introducing variables. In those cases, we do not want to // perform a runtime call for all variables in the scope // containing the eval. - if (var->mode() == Variable::DYNAMIC_GLOBAL) { + if (var->mode() == DYNAMIC_GLOBAL) { EmitLoadGlobalCheckExtensions(var, typeof_state, slow); __ Branch(done); - } else if (var->mode() == Variable::DYNAMIC_LOCAL) { + } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ lw(v0, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == Variable::CONST) { + if (local->mode() == CONST || + local->mode() == LET) { __ LoadRoot(at, Heap::kTheHoleValueRootIndex); __ subu(at, v0, at); // Sub as compare: at == 0 on eq. - __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); - __ movz(v0, a0, at); // Conditional move: return Undefined if TheHole. + if (local->mode() == CONST) { + __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); + __ movz(v0, a0, at); // Conditional move: return Undefined if TheHole. + } else { // LET + __ Branch(done, ne, at, Operand(zero_reg)); + __ li(a0, Operand(var->name())); + __ push(a0); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + } } __ Branch(done); } @@ -1244,14 +1272,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { Comment cmnt(masm_, var->IsContextSlot() ? "Context variable" : "Stack variable"); - if (var->mode() != Variable::LET && var->mode() != Variable::CONST) { + if (var->mode() != LET && var->mode() != CONST) { context()->Plug(var); } else { // Let and const need a read barrier. GetVar(v0, var); __ LoadRoot(at, Heap::kTheHoleValueRootIndex); __ subu(at, v0, at); // Sub as compare: at == 0 on eq. - if (var->mode() == Variable::LET) { + if (var->mode() == LET) { Label done; __ Branch(&done, ne, at, Operand(zero_reg)); __ li(a0, Operand(var->name())); @@ -1491,14 +1519,23 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { VisitForAccumulatorValue(subexpr); // Store the subexpression value in the array's elements. - __ lw(a1, MemOperand(sp)); // Copy of array literal. - __ lw(a1, FieldMemOperand(a1, JSObject::kElementsOffset)); + __ lw(t6, MemOperand(sp)); // Copy of array literal. + __ lw(a1, FieldMemOperand(t6, JSObject::kElementsOffset)); int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ sw(result_register(), FieldMemOperand(a1, offset)); + Label no_map_change; + __ JumpIfSmi(result_register(), &no_map_change); // Update the write barrier for the array store with v0 as the scratch // register. - __ RecordWrite(a1, Operand(offset), a2, result_register()); + __ RecordWriteField( + a1, offset, result_register(), a2, kRAHasBeenSaved, kDontSaveFPRegs, + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset)); + __ CheckFastSmiOnlyElements(a3, a2, &no_map_change); + __ push(t6); // Copy of array literal. + __ CallRuntime(Runtime::kNonSmiElementStored, 1); + __ bind(&no_map_change); PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); } @@ -1850,7 +1887,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); } - } else if (var->mode() == Variable::LET && op != Token::INIT_LET) { + } else if (var->mode() == LET && op != Token::INIT_LET) { // Non-initializing assignment to let variable needs a write barrier. if (var->IsLookupSlot()) { __ push(v0); // Value. @@ -1875,11 +1912,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, // RecordWrite may destroy all its register arguments. __ mov(a3, result_register()); int offset = Context::SlotOffset(var->index()); - __ RecordWrite(a1, Operand(offset), a2, a3); + __ RecordWriteContextSlot( + a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs); } } - } else if (var->mode() != Variable::CONST) { + } else if (var->mode() != CONST) { // Assignment to var or initializing assignment to let. if (var->IsStackAllocated() || var->IsContextSlot()) { MemOperand location = VarOperand(var, a1); @@ -1893,7 +1931,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ sw(v0, location); if (var->IsContextSlot()) { __ mov(a3, v0); - __ RecordWrite(a1, Operand(Context::SlotOffset(var->index())), a2, a3); + int offset = Context::SlotOffset(var->index()); + __ RecordWriteContextSlot( + a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs); } } else { ASSERT(var->IsLookupSlot()); @@ -2121,10 +2161,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, __ push(a1); // Push the strict mode flag. In harmony mode every eval call // is a strict mode eval call. - StrictModeFlag strict_mode = strict_mode_flag(); - if (FLAG_harmony_block_scoping) { - strict_mode = kStrictMode; - } + StrictModeFlag strict_mode = + FLAG_harmony_scoping ? kStrictMode : strict_mode_flag(); __ li(a1, Operand(Smi::FromInt(strict_mode))); __ push(a1); @@ -2170,7 +2208,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { // context lookup in the runtime system. Label done; Variable* var = proxy->var(); - if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) { + if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) { Label slow; EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow); // Push the function and resolve eval. @@ -2671,18 +2709,23 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) { // Check that the object is a JS object but take special care of JS // functions to make sure they have 'Function' as their class. + // Assume that there are only two callable types, and one of them is at + // either end of the type range for JS object types. Saves extra comparisons. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ GetObjectType(v0, v0, a1); // Map is now in v0. __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE)); - // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and - // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after - // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == - LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); - __ Branch(&function, ge, a1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE)); + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE)); - // Check if the constructor in the map is a function. + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE)); + // Assume that there is no larger type. + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); + + // Check if the constructor in the map is a JS function. __ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset)); __ GetObjectType(v0, a1, a1); __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE)); @@ -2861,7 +2904,9 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) { __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset)); // Update the write barrier. Save the value as it will be // overwritten by the write barrier code and is needed afterward. - __ RecordWrite(a1, Operand(JSValue::kValueOffset - kHeapObjectTag), a2, a3); + __ mov(a2, v0); + __ RecordWriteField( + a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs); __ bind(&done); context()->Plug(v0); @@ -3154,16 +3199,31 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) { __ sw(scratch1, MemOperand(index2, 0)); __ sw(scratch2, MemOperand(index1, 0)); - Label new_space; - __ InNewSpace(elements, scratch1, eq, &new_space); + Label no_remembered_set; + __ CheckPageFlag(elements, + scratch1, + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + ne, + &no_remembered_set); // Possible optimization: do a check that both values are Smis // (or them and test against Smi mask). - __ mov(scratch1, elements); - __ RecordWriteHelper(elements, index1, scratch2); - __ RecordWriteHelper(scratch1, index2, scratch2); // scratch1 holds elements. + // We are swapping two objects in an array and the incremental marker never + // pauses in the middle of scanning a single object. Therefore the + // incremental marker is not disturbed, so we don't need to call the + // RecordWrite stub that notifies the incremental marker. + __ RememberedSetHelper(elements, + index1, + scratch2, + kDontSaveFPRegs, + MacroAssembler::kFallThroughAtEnd); + __ RememberedSetHelper(elements, + index2, + scratch2, + kDontSaveFPRegs, + MacroAssembler::kFallThroughAtEnd); - __ bind(&new_space); + __ bind(&no_remembered_set); // We are done. Drop elements from the stack, and return undefined. __ Drop(3); __ LoadRoot(v0, Heap::kUndefinedValueRootIndex); @@ -3921,10 +3981,14 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { } void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, - Handle<String> check, - Label* if_true, - Label* if_false, - Label* fall_through) { + Handle<String> check) { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + { AccumulatorValueContext context(this); VisitForTypeofValue(expr); } @@ -3964,10 +4028,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through); } else if (check->Equals(isolate()->heap()->function_symbol())) { __ JumpIfSmi(v0, if_false); - __ GetObjectType(v0, a1, v0); // Leave map in a1. - Split(ge, v0, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE), - if_true, if_false, fall_through); - + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + __ GetObjectType(v0, v0, a1); + __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE)); + Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE), + if_true, if_false, fall_through); } else if (check->Equals(isolate()->heap()->object_symbol())) { __ JumpIfSmi(v0, if_false); if (!FLAG_harmony_typeof) { @@ -3986,18 +4051,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } else { if (if_false != fall_through) __ jmp(if_false); } -} - - -void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr, - Label* if_true, - Label* if_false, - Label* fall_through) { - VisitForAccumulatorValue(expr); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - Split(eq, v0, Operand(at), if_true, if_false, fall_through); + context()->Plug(if_true, if_false); } @@ -4005,9 +4059,12 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Comment cmnt(masm_, "[ CompareOperation"); SetSourcePosition(expr->position()); + // First we try a fast inlined version of the compare when one of + // the operands is a literal. + if (TryLiteralCompare(expr)) return; + // Always perform the comparison for its control flow. Pack the result // into the expression's context after the comparison is performed. - Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4015,13 +4072,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - // First we try a fast inlined version of the compare when one of - // the operands is a literal. - if (TryLiteralCompare(expr, if_true, if_false, fall_through)) { - context()->Plug(if_true, if_false); - return; - } - Token::Value op = expr->op(); VisitForStackValue(expr->left()); switch (op) { @@ -4046,11 +4096,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { default: { VisitForAccumulatorValue(expr->right()); Condition cc = eq; - bool strict = false; switch (op) { case Token::EQ_STRICT: - strict = true; - // Fall through. case Token::EQ: cc = eq; __ mov(a0, result_register()); @@ -4109,8 +4156,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } -void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { - Comment cmnt(masm_, "[ CompareToNull"); +void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, + Expression* sub_expr, + NilValue nil) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4118,15 +4166,21 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - VisitForAccumulatorValue(expr->expression()); + VisitForAccumulatorValue(sub_expr); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); + Heap::RootListIndex nil_value = nil == kNullValue ? + Heap::kNullValueRootIndex : + Heap::kUndefinedValueRootIndex; __ mov(a0, result_register()); - __ LoadRoot(a1, Heap::kNullValueRootIndex); - if (expr->is_strict()) { + __ LoadRoot(a1, nil_value); + if (expr->op() == Token::EQ_STRICT) { Split(eq, a0, Operand(a1), if_true, if_false, fall_through); } else { + Heap::RootListIndex other_nil_value = nil == kNullValue ? + Heap::kUndefinedValueRootIndex : + Heap::kNullValueRootIndex; __ Branch(if_true, eq, a0, Operand(a1)); - __ LoadRoot(a1, Heap::kUndefinedValueRootIndex); + __ LoadRoot(a1, other_nil_value); __ Branch(if_true, eq, a0, Operand(a1)); __ And(at, a0, Operand(kSmiTagMask)); __ Branch(if_false, eq, at, Operand(zero_reg)); diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc index a76c215a4..fb33eb665 100644 --- a/deps/v8/src/mips/ic-mips.cc +++ b/deps/v8/src/mips/ic-mips.cc @@ -210,7 +210,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Update the write barrier. Make sure not to clobber the value. __ mov(scratch1, value); - __ RecordWrite(elements, scratch2, scratch1); + __ RecordWrite( + elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs); } @@ -504,21 +505,22 @@ static void GenerateCallMiss(MacroAssembler* masm, // Get the receiver of the function from the stack. __ lw(a3, MemOperand(sp, argc*kPointerSize)); - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Push the receiver and the name of the function. - __ Push(a3, a2); + // Push the receiver and the name of the function. + __ Push(a3, a2); - // Call the entry. - __ li(a0, Operand(2)); - __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate))); + // Call the entry. + __ li(a0, Operand(2)); + __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate))); - CEntryStub stub(1); - __ CallStub(&stub); + CEntryStub stub(1); + __ CallStub(&stub); - // Move result to a1 and leave the internal frame. - __ mov(a1, v0); - __ LeaveInternalFrame(); + // Move result to a1 and leave the internal frame. + __ mov(a1, v0); + } // Check if the receiver is a global object of some sort. // This can happen only for regular CallIC but not KeyedCallIC. @@ -649,12 +651,13 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // This branch is taken when calling KeyedCallIC_Miss is neither required // nor beneficial. __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3); - __ EnterInternalFrame(); - __ push(a2); // Save the key. - __ Push(a1, a2); // Pass the receiver and the key. - __ CallRuntime(Runtime::kKeyedGetProperty, 2); - __ pop(a2); // Restore the key. - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(a2); // Save the key. + __ Push(a1, a2); // Pass the receiver and the key. + __ CallRuntime(Runtime::kKeyedGetProperty, 2); + __ pop(a2); // Restore the key. + } __ mov(a1, v0); __ jmp(&do_call); @@ -902,9 +905,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { MemOperand mapped_location = GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, ¬in, &slow); __ sw(a0, mapped_location); - // Verify mapped_location MemOperand is register, with no offset. - ASSERT_EQ(mapped_location.offset(), 0); - __ RecordWrite(a3, mapped_location.rm(), t5); + __ Addu(t2, a3, t1); + __ mov(t5, a0); + __ RecordWrite(a3, t2, t5, kRAHasNotBeenSaved, kDontSaveFPRegs); __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); // (In delay slot) return the value stored in v0. __ bind(¬in); @@ -912,8 +915,9 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { MemOperand unmapped_location = GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow); __ sw(a0, unmapped_location); - ASSERT_EQ(unmapped_location.offset(), 0); - __ RecordWrite(a3, unmapped_location.rm(), t5); + __ Addu(t2, a3, t0); + __ mov(t5, a0); + __ RecordWrite(a3, t2, t5, kRAHasNotBeenSaved, kDontSaveFPRegs); __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); // (In delay slot) return the value stored in v0. __ bind(&slow); @@ -1201,109 +1205,144 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // -- a2 : receiver // -- ra : return address // ----------------------------------- - - Label slow, fast, array, extra, exit; + Label slow, array, extra, check_if_double_array; + Label fast_object_with_map_check, fast_object_without_map_check; + Label fast_double_with_map_check, fast_double_without_map_check; // Register usage. Register value = a0; Register key = a1; Register receiver = a2; Register elements = a3; // Elements array of the receiver. - // t0 is used as ip in the arm version. - // t3-t4 are used as temporaries. + Register elements_map = t2; + Register receiver_map = t3; + // t0 and t1 are used as general scratch registers. // Check that the key is a smi. __ JumpIfNotSmi(key, &slow); // Check that the object isn't a smi. __ JumpIfSmi(receiver, &slow); - // Get the map of the object. - __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Check that the receiver does not require access checks. We need // to do this because this generic stub does not perform map checks. - __ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset)); + __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded)); __ Branch(&slow, ne, t0, Operand(zero_reg)); // Check if the object is a JS array or not. - __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset)); - - __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE)); + __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); + __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE)); // Check that the object is some kind of JSObject. - __ Branch(&slow, lt, t3, Operand(FIRST_JS_RECEIVER_TYPE)); - __ Branch(&slow, eq, t3, Operand(JS_PROXY_TYPE)); - __ Branch(&slow, eq, t3, Operand(JS_FUNCTION_PROXY_TYPE)); + __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE)); // Object case: Check key against length in the elements array. __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); - // Check that the object is in fast mode and writable. - __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex); - __ Branch(&slow, ne, t3, Operand(t0)); // Check array bounds. Both the key and the length of FixedArray are smis. __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); - __ Branch(&fast, lo, key, Operand(t0)); - // Fall thru to slow if un-tagged index >= length. + __ Branch(&fast_object_with_map_check, lo, key, Operand(t0)); // Slow case, handle jump to runtime. __ bind(&slow); - // Entry registers are intact. // a0: value. // a1: key. // a2: receiver. - GenerateRuntimeSetProperty(masm, strict_mode); // Extra capacity case: Check if there is extra capacity to // perform the store and update the length. Used for adding one // element to the array by writing to array[array.length]. - __ bind(&extra); + // Condition code from comparing key and array length is still available. // Only support writing to array[array.length]. __ Branch(&slow, ne, key, Operand(t0)); // Check for room in the elements backing store. // Both the key and the length of FixedArray are smis. __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ Branch(&slow, hs, key, Operand(t0)); + __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ Branch(&check_if_double_array, ne, elements_map, + Operand(masm->isolate()->factory()->fixed_array_map())); // Calculate key + 1 as smi. - STATIC_ASSERT(0 == kSmiTag); - __ Addu(t3, key, Operand(Smi::FromInt(1))); - __ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset)); - __ Branch(&fast); - + STATIC_ASSERT(kSmiTag == 0); + __ Addu(t0, key, Operand(Smi::FromInt(1))); + __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ Branch(&fast_object_without_map_check); + + __ bind(&check_if_double_array); + __ Branch(&slow, ne, elements_map, + Operand(masm->isolate()->factory()->fixed_double_array_map())); + // Add 1 to key, and go to common element store code for doubles. + STATIC_ASSERT(kSmiTag == 0); + __ Addu(t0, key, Operand(Smi::FromInt(1))); + __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ jmp(&fast_double_without_map_check); // Array case: Get the length and the elements array from the JS // array. Check that the array is in fast mode (and writable); if it // is the length is always a smi. - __ bind(&array); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); - __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex); - __ Branch(&slow, ne, t3, Operand(t0)); // Check the key against the length in the array. __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ Branch(&extra, hs, key, Operand(t0)); // Fall through to fast case. - __ bind(&fast); - // Fast case, store the value to the elements backing store. - __ Addu(t4, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ sll(t1, key, kPointerSizeLog2 - kSmiTagSize); - __ Addu(t4, t4, Operand(t1)); - __ sw(value, MemOperand(t4)); - // Skip write barrier if the written value is a smi. - __ JumpIfSmi(value, &exit); - + __ bind(&fast_object_with_map_check); + Register scratch_value = t0; + Register address = t1; + __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ Branch(&fast_double_with_map_check, ne, elements_map, + Operand(masm->isolate()->factory()->fixed_array_map())); + __ bind(&fast_object_without_map_check); + // Smi stores don't require further checks. + Label non_smi_value; + __ JumpIfNotSmi(value, &non_smi_value); + // It's irrelevant whether array is smi-only or not when writing a smi. + __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize); + __ Addu(address, address, scratch_value); + __ sw(value, MemOperand(address)); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, value); + + __ bind(&non_smi_value); + // Escape to slow case when writing non-smi into smi-only array. + __ CheckFastObjectElements(receiver_map, scratch_value, &slow); + // Fast elements array, store the value to the elements backing store. + __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize); + __ Addu(address, address, scratch_value); + __ sw(value, MemOperand(address)); // Update write barrier for the elements array address. - __ Subu(t3, t4, Operand(elements)); - - __ RecordWrite(elements, Operand(t3), t4, t5); - __ bind(&exit); - - __ mov(v0, a0); // Return the value written. + __ mov(v0, value); // Preserve the value which is returned. + __ RecordWrite(elements, + address, + value, + kRAHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); __ Ret(); + + __ bind(&fast_double_with_map_check); + // Check for fast double array case. If this fails, call through to the + // runtime. + __ Branch(&slow, ne, elements_map, + Operand(masm->isolate()->factory()->fixed_double_array_map())); + __ bind(&fast_double_without_map_check); + __ StoreNumberToDoubleElements(value, + key, + receiver, + elements, + t0, + t1, + t2, + t3, + &slow); + __ Ret(USE_DELAY_SLOT); + __ mov(v0, value); } @@ -1572,7 +1611,8 @@ void PatchInlinedSmiCode(Address address) { // If the instruction following the call is not a andi at, rx, #yyy, nothing // was inlined. Instr instr = Assembler::instr_at(andi_instruction_address); - if (!Assembler::IsAndImmediate(instr)) { + if (!(Assembler::IsAndImmediate(instr) && + Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) { return; } diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index 4c48ef183..2964fbc86 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -42,7 +42,8 @@ namespace internal { MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) : Assembler(arg_isolate, buffer, size), generating_stub_(false), - allow_stub_calls_(true) { + allow_stub_calls_(true), + has_frame_(false) { if (isolate() != NULL) { code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), isolate()); @@ -80,46 +81,15 @@ void MacroAssembler::StoreRoot(Register source, } -void MacroAssembler::RecordWriteHelper(Register object, - Register address, - Register scratch) { - if (emit_debug_code()) { - // Check that the object is not in new space. - Label not_in_new_space; - InNewSpace(object, scratch, ne, ¬_in_new_space); - Abort("new-space object passed to RecordWriteHelper"); - bind(¬_in_new_space); - } - - // Calculate page address: Clear bits from 0 to kPageSizeBits. - if (mips32r2) { - Ins(object, zero_reg, 0, kPageSizeBits); - } else { - // The Ins macro is slow on r1, so use shifts instead. - srl(object, object, kPageSizeBits); - sll(object, object, kPageSizeBits); - } - - // Calculate region number. - Ext(address, address, Page::kRegionSizeLog2, - kPageSizeBits - Page::kRegionSizeLog2); - - // Mark region dirty. - lw(scratch, MemOperand(object, Page::kDirtyFlagOffset)); - li(at, Operand(1)); - sllv(at, at, address); - or_(scratch, scratch, at); - sw(scratch, MemOperand(object, Page::kDirtyFlagOffset)); -} - - // Push and pop all registers that can hold pointers. void MacroAssembler::PushSafepointRegisters() { // Safepoints expect a block of kNumSafepointRegisters values on the // stack, so adjust the stack for unsaved registers. const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; ASSERT(num_unsaved >= 0); - Subu(sp, sp, Operand(num_unsaved * kPointerSize)); + if (num_unsaved > 0) { + Subu(sp, sp, Operand(num_unsaved * kPointerSize)); + } MultiPush(kSafepointSavedRegisters); } @@ -127,7 +97,9 @@ void MacroAssembler::PushSafepointRegisters() { void MacroAssembler::PopSafepointRegisters() { const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; MultiPop(kSafepointSavedRegisters); - Addu(sp, sp, Operand(num_unsaved * kPointerSize)); + if (num_unsaved > 0) { + Addu(sp, sp, Operand(num_unsaved * kPointerSize)); + } } @@ -180,6 +152,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { + UNIMPLEMENTED_MIPS(); // General purpose registers are pushed last on the stack. int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize; int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; @@ -187,8 +160,6 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { } - - void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc, @@ -200,38 +171,53 @@ void MacroAssembler::InNewSpace(Register object, } -// Will clobber 4 registers: object, scratch0, scratch1, at. The -// register 'object' contains a heap object pointer. The heap object -// tag is shifted away. -void MacroAssembler::RecordWrite(Register object, - Operand offset, - Register scratch0, - Register scratch1) { - // The compiled code assumes that record write doesn't change the - // context register, so we check that none of the clobbered - // registers are cp. - ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp)); - +void MacroAssembler::RecordWriteField( + Register object, + int offset, + Register value, + Register dst, + RAStatus ra_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { + ASSERT(!AreAliased(value, dst, t8, object)); + // First, check if a write barrier is even needed. The tests below + // catch stores of Smis. Label done; - // First, test that the object is not in the new space. We cannot set - // region marks for new space pages. - InNewSpace(object, scratch0, eq, &done); + // Skip barrier if writing a smi. + if (smi_check == INLINE_SMI_CHECK) { + JumpIfSmi(value, &done); + } - // Add offset into the object. - Addu(scratch0, object, offset); + // Although the object register is tagged, the offset is relative to the start + // of the object, so so offset must be a multiple of kPointerSize. + ASSERT(IsAligned(offset, kPointerSize)); - // Record the actual write. - RecordWriteHelper(object, scratch0, scratch1); + Addu(dst, object, Operand(offset - kHeapObjectTag)); + if (emit_debug_code()) { + Label ok; + And(t8, dst, Operand((1 << kPointerSizeLog2) - 1)); + Branch(&ok, eq, t8, Operand(zero_reg)); + stop("Unaligned cell in write barrier"); + bind(&ok); + } + + RecordWrite(object, + dst, + value, + ra_status, + save_fp, + remembered_set_action, + OMIT_SMI_CHECK); bind(&done); - // Clobber all input registers when running with the debug-code flag + // Clobber clobbered input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - li(object, Operand(BitCast<int32_t>(kZapValue))); - li(scratch0, Operand(BitCast<int32_t>(kZapValue))); - li(scratch1, Operand(BitCast<int32_t>(kZapValue))); + li(value, Operand(BitCast<int32_t>(kZapValue + 4))); + li(dst, Operand(BitCast<int32_t>(kZapValue + 8))); } } @@ -241,29 +227,97 @@ void MacroAssembler::RecordWrite(Register object, // tag is shifted away. void MacroAssembler::RecordWrite(Register object, Register address, - Register scratch) { + Register value, + RAStatus ra_status, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { + ASSERT(!AreAliased(object, address, value, t8)); + ASSERT(!AreAliased(object, address, value, t9)); // The compiled code assumes that record write doesn't change the // context register, so we check that none of the clobbered // registers are cp. - ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp)); + ASSERT(!address.is(cp) && !value.is(cp)); Label done; - // First, test that the object is not in the new space. We cannot set - // region marks for new space pages. - InNewSpace(object, scratch, eq, &done); + if (smi_check == INLINE_SMI_CHECK) { + ASSERT_EQ(0, kSmiTag); + And(t8, value, Operand(kSmiTagMask)); + Branch(&done, eq, t8, Operand(zero_reg)); + } + + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + eq, + &done); + CheckPageFlag(object, + value, // Used as scratch. + MemoryChunk::kPointersFromHereAreInterestingMask, + eq, + &done); // Record the actual write. - RecordWriteHelper(object, address, scratch); + if (ra_status == kRAHasNotBeenSaved) { + push(ra); + } + RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); + CallStub(&stub); + if (ra_status == kRAHasNotBeenSaved) { + pop(ra); + } bind(&done); - // Clobber all input registers when running with the debug-code flag + // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - li(object, Operand(BitCast<int32_t>(kZapValue))); - li(address, Operand(BitCast<int32_t>(kZapValue))); - li(scratch, Operand(BitCast<int32_t>(kZapValue))); + li(address, Operand(BitCast<int32_t>(kZapValue + 12))); + li(value, Operand(BitCast<int32_t>(kZapValue + 16))); + } +} + + +void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. + Register address, + Register scratch, + SaveFPRegsMode fp_mode, + RememberedSetFinalAction and_then) { + Label done; + if (FLAG_debug_code) { + Label ok; + JumpIfNotInNewSpace(object, scratch, &ok); + stop("Remembered set pointer is in new space"); + bind(&ok); + } + // Load store buffer top. + ExternalReference store_buffer = + ExternalReference::store_buffer_top(isolate()); + li(t8, Operand(store_buffer)); + lw(scratch, MemOperand(t8)); + // Store pointer to buffer and increment buffer top. + sw(address, MemOperand(scratch)); + Addu(scratch, scratch, kPointerSize); + // Write back new top of buffer. + sw(scratch, MemOperand(t8)); + // Call stub on end of buffer. + // Check for end of buffer. + And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); + if (and_then == kFallThroughAtEnd) { + Branch(&done, eq, t8, Operand(zero_reg)); + } else { + ASSERT(and_then == kReturnAtEnd); + Ret(eq, t8, Operand(zero_reg)); + } + push(ra); + StoreBufferOverflowStub store_buffer_overflow = + StoreBufferOverflowStub(fp_mode); + CallStub(&store_buffer_overflow); + pop(ra); + bind(&done); + if (and_then == kReturnAtEnd) { + Ret(); } } @@ -707,7 +761,7 @@ void MacroAssembler::MultiPush(RegList regs) { int16_t stack_offset = num_to_push * kPointerSize; Subu(sp, sp, Operand(stack_offset)); - for (int16_t i = kNumRegisters; i > 0; i--) { + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { if ((regs & (1 << i)) != 0) { stack_offset -= kPointerSize; sw(ToRegister(i), MemOperand(sp, stack_offset)); @@ -746,7 +800,7 @@ void MacroAssembler::MultiPop(RegList regs) { void MacroAssembler::MultiPopReversed(RegList regs) { int16_t stack_offset = 0; - for (int16_t i = kNumRegisters; i > 0; i--) { + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { if ((regs & (1 << i)) != 0) { lw(ToRegister(i), MemOperand(sp, stack_offset)); stack_offset += kPointerSize; @@ -762,7 +816,7 @@ void MacroAssembler::MultiPushFPU(RegList regs) { int16_t stack_offset = num_to_push * kDoubleSize; Subu(sp, sp, Operand(stack_offset)); - for (int16_t i = kNumRegisters; i > 0; i--) { + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { if ((regs & (1 << i)) != 0) { stack_offset -= kDoubleSize; sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); @@ -804,7 +858,7 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) { CpuFeatures::Scope scope(FPU); int16_t stack_offset = 0; - for (int16_t i = kNumRegisters; i > 0; i--) { + for (int16_t i = kNumRegisters - 1; i >= 0; i--) { if ((regs & (1 << i)) != 0) { ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); stack_offset += kDoubleSize; @@ -814,6 +868,21 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) { } +void MacroAssembler::FlushICache(Register address, unsigned instructions) { + RegList saved_regs = kJSCallerSaved | ra.bit(); + MultiPush(saved_regs); + AllowExternalCallThatCantCauseGC scope(this); + + // Save to a0 in case address == t0. + Move(a0, address); + PrepareCallCFunction(2, t0); + + li(a1, instructions * kInstrSize); + CallCFunction(ExternalReference::flush_icache_function(isolate()), 2); + MultiPop(saved_regs); +} + + void MacroAssembler::Ext(Register rt, Register rs, uint16_t pos, @@ -940,11 +1009,9 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd, mtc1(at, FPURegister::from_code(scratch.code() + 1)); mtc1(zero_reg, scratch); // Test if scratch > fd. - c(OLT, D, fd, scratch); - - Label simple_convert; // If fd < 2^31 we can convert it normally. - bc1t(&simple_convert); + Label simple_convert; + BranchF(&simple_convert, NULL, lt, fd, scratch); // First we subtract 2^31 from fd, then trunc it to rs // and add 2^31 to rs. @@ -964,6 +1031,102 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd, } +void MacroAssembler::BranchF(Label* target, + Label* nan, + Condition cc, + FPURegister cmp1, + FPURegister cmp2, + BranchDelaySlot bd) { + if (cc == al) { + Branch(bd, target); + return; + } + + ASSERT(nan || target); + // Check for unordered (NaN) cases. + if (nan) { + c(UN, D, cmp1, cmp2); + bc1t(nan); + } + + if (target) { + // Here NaN cases were either handled by this function or are assumed to + // have been handled by the caller. + // Unsigned conditions are treated as their signed counterpart. + switch (cc) { + case Uless: + case less: + c(OLT, D, cmp1, cmp2); + bc1t(target); + break; + case Ugreater: + case greater: + c(ULE, D, cmp1, cmp2); + bc1f(target); + break; + case Ugreater_equal: + case greater_equal: + c(ULT, D, cmp1, cmp2); + bc1f(target); + break; + case Uless_equal: + case less_equal: + c(OLE, D, cmp1, cmp2); + bc1t(target); + break; + case eq: + c(EQ, D, cmp1, cmp2); + bc1t(target); + break; + case ne: + c(EQ, D, cmp1, cmp2); + bc1f(target); + break; + default: + CHECK(0); + }; + } + + if (bd == PROTECT) { + nop(); + } +} + + +void MacroAssembler::Move(FPURegister dst, double imm) { + ASSERT(CpuFeatures::IsEnabled(FPU)); + static const DoubleRepresentation minus_zero(-0.0); + static const DoubleRepresentation zero(0.0); + DoubleRepresentation value(imm); + // Handle special values first. + bool force_load = dst.is(kDoubleRegZero); + if (value.bits == zero.bits && !force_load) { + mov_d(dst, kDoubleRegZero); + } else if (value.bits == minus_zero.bits && !force_load) { + neg_d(dst, kDoubleRegZero); + } else { + uint32_t lo, hi; + DoubleAsTwoUInt32(imm, &lo, &hi); + // Move the low part of the double into the lower of the corresponding FPU + // register of FPU register pair. + if (lo != 0) { + li(at, Operand(lo)); + mtc1(at, dst); + } else { + mtc1(zero_reg, dst); + } + // Move the high part of the double into the higher of the corresponding FPU + // register of FPU register pair. + if (hi != 0) { + li(at, Operand(hi)); + mtc1(at, dst.high()); + } else { + mtc1(zero_reg, dst.high()); + } + } +} + + // Tries to get a signed int32 out of a double precision floating point heap // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the // 32bits signed integer range. @@ -1062,6 +1225,53 @@ void MacroAssembler::ConvertToInt32(Register source, } +void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, + FPURegister result, + DoubleRegister double_input, + Register scratch1, + Register except_flag, + CheckForInexactConversion check_inexact) { + ASSERT(CpuFeatures::IsSupported(FPU)); + CpuFeatures::Scope scope(FPU); + + int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions. + + if (check_inexact == kDontCheckForInexactConversion) { + // Ingore inexact exceptions. + except_mask &= ~kFCSRInexactFlagMask; + } + + // Save FCSR. + cfc1(scratch1, FCSR); + // Disable FPU exceptions. + ctc1(zero_reg, FCSR); + + // Do operation based on rounding mode. + switch (rounding_mode) { + case kRoundToNearest: + round_w_d(result, double_input); + break; + case kRoundToZero: + trunc_w_d(result, double_input); + break; + case kRoundToPlusInf: + ceil_w_d(result, double_input); + break; + case kRoundToMinusInf: + floor_w_d(result, double_input); + break; + } // End of switch-statement. + + // Retrieve FCSR. + cfc1(except_flag, FCSR); + // Restore FCSR. + ctc1(scratch1, FCSR); + + // Check for fpu exceptions. + And(except_flag, except_flag, Operand(except_mask)); +} + + void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, Register input_high, Register input_low, @@ -1148,22 +1358,21 @@ void MacroAssembler::EmitECMATruncate(Register result, FPURegister double_input, FPURegister single_scratch, Register scratch, - Register input_high, - Register input_low) { + Register scratch2, + Register scratch3) { CpuFeatures::Scope scope(FPU); - ASSERT(!input_high.is(result)); - ASSERT(!input_low.is(result)); - ASSERT(!input_low.is(input_high)); + ASSERT(!scratch2.is(result)); + ASSERT(!scratch3.is(result)); + ASSERT(!scratch3.is(scratch2)); ASSERT(!scratch.is(result) && - !scratch.is(input_high) && - !scratch.is(input_low)); + !scratch.is(scratch2) && + !scratch.is(scratch3)); ASSERT(!single_scratch.is(double_input)); Label done; Label manual; // Clear cumulative exception flags and save the FCSR. - Register scratch2 = input_high; cfc1(scratch2, FCSR); ctc1(zero_reg, FCSR); // Try a conversion to a signed integer. @@ -1180,6 +1389,8 @@ void MacroAssembler::EmitECMATruncate(Register result, Branch(&done, eq, scratch, Operand(zero_reg)); // Load the double value and perform a manual truncation. + Register input_high = scratch2; + Register input_low = scratch3; Move(input_low, input_high, double_input); EmitOutOfInt32RangeTruncate(result, input_high, @@ -1211,15 +1422,6 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) -bool MacroAssembler::UseAbsoluteCodePointers() { - if (is_trampoline_emitted()) { - return true; - } else { - return false; - } -} - - void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { BranchShort(offset, bdslot); } @@ -1233,11 +1435,18 @@ void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs, void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) { - bool is_label_near = is_near(L); - if (UseAbsoluteCodePointers() && !is_label_near) { - Jr(L, bdslot); + if (L->is_bound()) { + if (is_near(L)) { + BranchShort(L, bdslot); + } else { + Jr(L, bdslot); + } } else { - BranchShort(L, bdslot); + if (is_trampoline_emitted()) { + Jr(L, bdslot); + } else { + BranchShort(L, bdslot); + } } } @@ -1245,15 +1454,26 @@ void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) { void MacroAssembler::Branch(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { - bool is_label_near = is_near(L); - if (UseAbsoluteCodePointers() && !is_label_near) { - Label skip; - Condition neg_cond = NegateCondition(cond); - BranchShort(&skip, neg_cond, rs, rt); - Jr(L, bdslot); - bind(&skip); + if (L->is_bound()) { + if (is_near(L)) { + BranchShort(L, cond, rs, rt, bdslot); + } else { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + Jr(L, bdslot); + bind(&skip); + } } else { - BranchShort(L, cond, rs, rt, bdslot); + if (is_trampoline_emitted()) { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + Jr(L, bdslot); + bind(&skip); + } else { + BranchShort(L, cond, rs, rt, bdslot); + } } } @@ -1276,8 +1496,8 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, Register scratch = at; if (rt.is_reg()) { - // We don't want any other register but scratch clobbered. - ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_)); + // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or + // rt. r2 = rt.rm_; switch (cond) { case cc_always: @@ -1779,11 +1999,18 @@ void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs, void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { - bool is_label_near = is_near(L); - if (UseAbsoluteCodePointers() && !is_label_near) { - Jalr(L, bdslot); + if (L->is_bound()) { + if (is_near(L)) { + BranchAndLinkShort(L, bdslot); + } else { + Jalr(L, bdslot); + } } else { - BranchAndLinkShort(L, bdslot); + if (is_trampoline_emitted()) { + Jalr(L, bdslot); + } else { + BranchAndLinkShort(L, bdslot); + } } } @@ -1791,15 +2018,26 @@ void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { - bool is_label_near = is_near(L); - if (UseAbsoluteCodePointers() && !is_label_near) { - Label skip; - Condition neg_cond = NegateCondition(cond); - BranchShort(&skip, neg_cond, rs, rt); - Jalr(L, bdslot); - bind(&skip); + if (L->is_bound()) { + if (is_near(L)) { + BranchAndLinkShort(L, cond, rs, rt, bdslot); + } else { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + Jalr(L, bdslot); + bind(&skip); + } } else { - BranchAndLinkShort(L, cond, rs, rt, bdslot); + if (is_trampoline_emitted()) { + Label skip; + Condition neg_cond = NegateCondition(cond); + BranchShort(&skip, neg_cond, rs, rt); + Jalr(L, bdslot); + bind(&skip); + } else { + BranchAndLinkShort(L, cond, rs, rt, bdslot); + } } } @@ -2306,10 +2544,10 @@ void MacroAssembler::Push(Handle<Object> handle) { #ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { - ASSERT(allow_stub_calls()); mov(a0, zero_reg); li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); CEntryStub ces(1); + ASSERT(AllowThisStubCall(&ces)); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } @@ -2972,15 +3210,140 @@ void MacroAssembler::CopyBytes(Register src, } +void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler) { + Label loop, entry; + Branch(&entry); + bind(&loop); + sw(filler, MemOperand(start_offset)); + Addu(start_offset, start_offset, kPointerSize); + bind(&entry); + Branch(&loop, lt, start_offset, Operand(end_offset)); +} + + void MacroAssembler::CheckFastElements(Register map, Register scratch, Label* fail) { - STATIC_ASSERT(FAST_ELEMENTS == 0); + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue)); } +void MacroAssembler::CheckFastObjectElements(Register map, + Register scratch, + Label* fail) { + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); + lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); + Branch(fail, ls, scratch, + Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); + Branch(fail, hi, scratch, + Operand(Map::kMaximumBitField2FastElementValue)); +} + + +void MacroAssembler::CheckFastSmiOnlyElements(Register map, + Register scratch, + Label* fail) { + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); + Branch(fail, hi, scratch, + Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); +} + + +void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, + Register key_reg, + Register receiver_reg, + Register elements_reg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* fail) { + Label smi_value, maybe_nan, have_double_value, is_nan, done; + Register mantissa_reg = scratch2; + Register exponent_reg = scratch3; + + // Handle smi values specially. + JumpIfSmi(value_reg, &smi_value); + + // Ensure that the object is a heap number + CheckMap(value_reg, + scratch1, + isolate()->factory()->heap_number_map(), + fail, + DONT_DO_SMI_CHECK); + + // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 + // in the exponent. + li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); + lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); + Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1)); + + lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + + bind(&have_double_value); + sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize); + Addu(scratch1, scratch1, elements_reg); + sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); + uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); + sw(exponent_reg, FieldMemOperand(scratch1, offset)); + jmp(&done); + + bind(&maybe_nan); + // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise + // it's an Infinity, and the non-NaN code path applies. + Branch(&is_nan, gt, exponent_reg, Operand(scratch1)); + lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); + Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg)); + bind(&is_nan); + // Load canonical NaN for storing into the double array. + uint64_t nan_int64 = BitCast<uint64_t>( + FixedDoubleArray::canonical_not_the_hole_nan_as_double()); + li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); + li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); + jmp(&have_double_value); + + bind(&smi_value); + Addu(scratch1, elements_reg, + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); + Addu(scratch1, scratch1, scratch2); + // scratch1 is now effective address of the double element + + FloatingPointHelper::Destination destination; + if (CpuFeatures::IsSupported(FPU)) { + destination = FloatingPointHelper::kFPURegisters; + } else { + destination = FloatingPointHelper::kCoreRegisters; + } + + Register untagged_value = receiver_reg; + SmiUntag(untagged_value, value_reg); + FloatingPointHelper::ConvertIntToDouble(this, + untagged_value, + destination, + f0, + mantissa_reg, + exponent_reg, + scratch4, + f2); + if (destination == FloatingPointHelper::kFPURegisters) { + CpuFeatures::Scope scope(FPU); + sdc1(f0, MemOperand(scratch1, 0)); + } else { + sw(mantissa_reg, MemOperand(scratch1, 0)); + sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); + } + bind(&done); +} + + void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map, @@ -3171,13 +3534,18 @@ void MacroAssembler::InvokeCode(Register code, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + Label done; InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, call_wrapper, call_kind); if (flag == CALL_FUNCTION) { + call_wrapper.BeforeCall(CallSize(code)); SetCallKind(t1, call_kind); Call(code); + call_wrapper.AfterCall(); } else { ASSERT(flag == JUMP_FUNCTION); SetCallKind(t1, call_kind); @@ -3195,6 +3563,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code, RelocInfo::Mode rmode, InvokeFlag flag, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + Label done; InvokePrologue(expected, actual, code, no_reg, &done, flag, @@ -3217,6 +3588,9 @@ void MacroAssembler::InvokeFunction(Register function, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + // Contract with called JS functions requires that function is passed in a1. ASSERT(function.is(a1)); Register expected_reg = a2; @@ -3239,6 +3613,9 @@ void MacroAssembler::InvokeFunction(JSFunction* function, const ParameterCount& actual, InvokeFlag flag, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + ASSERT(function->is_compiled()); // Get the function and setup the context. @@ -3249,7 +3626,11 @@ void MacroAssembler::InvokeFunction(JSFunction* function, Handle<Code> code(function->code()); ParameterCount expected(function->shared()->formal_parameter_count()); if (V8::UseCrankshaft()) { - UNIMPLEMENTED_MIPS(); + // TODO(kasperl): For now, we always call indirectly through the + // code field in the function to allow recompilation to take effect + // without changing any of the call sites. + lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); + InvokeCode(a3, expected, actual, flag, NullCallWrapper(), call_kind); } else { InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind); } @@ -3349,14 +3730,14 @@ void MacroAssembler::GetObjectType(Register object, void MacroAssembler::CallStub(CodeStub* stub, Condition cond, Register r1, const Operand& r2) { - ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. + ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2); } MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond, Register r1, const Operand& r2) { - ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. + ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -3368,7 +3749,7 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond, void MacroAssembler::TailCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. + ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); Jump(stub->GetCode(), RelocInfo::CODE_TARGET); } @@ -3377,7 +3758,6 @@ MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond, Register r1, const Operand& r2) { - ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. Object* result; { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -3486,6 +3866,12 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( } +bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { + if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; + return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); +} + + void MacroAssembler::IllegalOperation(int num_arguments) { if (num_arguments > 0) { addiu(sp, sp, num_arguments * kPointerSize); @@ -3566,7 +3952,16 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst, ASSERT(!overflow_dst.is(scratch)); ASSERT(!overflow_dst.is(left)); ASSERT(!overflow_dst.is(right)); - ASSERT(!left.is(right)); + + if (left.is(right) && dst.is(left)) { + ASSERT(!dst.is(t9)); + ASSERT(!scratch.is(t9)); + ASSERT(!left.is(t9)); + ASSERT(!right.is(t9)); + ASSERT(!overflow_dst.is(t9)); + mov(t9, right); + right = t9; + } if (dst.is(left)) { mov(scratch, left); // Preserve left. @@ -3599,10 +3994,17 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, ASSERT(!overflow_dst.is(scratch)); ASSERT(!overflow_dst.is(left)); ASSERT(!overflow_dst.is(right)); - ASSERT(!left.is(right)); ASSERT(!scratch.is(left)); ASSERT(!scratch.is(right)); + // This happens with some crankshaft code. Since Subu works fine if + // left == right, let's not make that restriction here. + if (left.is(right)) { + mov(dst, zero_reg); + mov(overflow_dst, zero_reg); + return; + } + if (dst.is(left)) { mov(scratch, left); // Preserve left. subu(dst, left, right); // Left is overwritten. @@ -3651,8 +4053,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); li(a0, Operand(function->nargs)); li(a1, Operand(ExternalReference(function, isolate()))); - CEntryStub stub(1); - stub.SaveDoubles(); + CEntryStub stub(1, kSaveFPRegs); CallStub(&stub); } @@ -3722,6 +4123,9 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference( void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper& call_wrapper) { + // You can't call a builtin without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + GetBuiltinEntry(t9, id); if (flag == CALL_FUNCTION) { call_wrapper.BeforeCall(CallSize(t9)); @@ -3854,14 +4258,20 @@ void MacroAssembler::Abort(const char* msg) { RecordComment(msg); } #endif - // Disable stub call restrictions to always allow calls to abort. - AllowStubCallsScope allow_scope(this, true); li(a0, Operand(p0)); push(a0); li(a0, Operand(Smi::FromInt(p1 - p0))); push(a0); - CallRuntime(Runtime::kAbort, 2); + // Disable stub call restrictions to always allow calls to abort. + if (!has_frame_) { + // We don't actually want to generate a pile of code for this, so just + // claim there is a stack frame, without generating one. + FrameScope scope(this, StackFrame::NONE); + CallRuntime(Runtime::kAbort, 2); + } else { + CallRuntime(Runtime::kAbort, 2); + } // Will not return here. if (is_trampoline_pool_blocked()) { // If the calling code cares about the exact number of @@ -4245,7 +4655,23 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, static const int kRegisterPassedArguments = 4; -void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { +int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, + int num_double_arguments) { + int stack_passed_words = 0; + num_reg_arguments += 2 * num_double_arguments; + + // Up to four simple arguments are passed in registers a0..a3. + if (num_reg_arguments > kRegisterPassedArguments) { + stack_passed_words += num_reg_arguments - kRegisterPassedArguments; + } + stack_passed_words += kCArgSlotCount; + return stack_passed_words; +} + + +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, + int num_double_arguments, + Register scratch) { int frame_alignment = ActivationFrameAlignment(); // Up to four simple arguments are passed in registers a0..a3. @@ -4253,9 +4679,8 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { // mips, even though those argument slots are not normally used. // Remaining arguments are pushed on the stack, above (higher address than) // the argument slots. - int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? - 0 : num_arguments - kRegisterPassedArguments) + - kCArgSlotCount; + int stack_passed_arguments = CalculateStackPassedWords( + num_reg_arguments, num_double_arguments); if (frame_alignment > kPointerSize) { // Make stack end at alignment and make room for num_arguments - 4 words // and the original value of sp. @@ -4270,26 +4695,43 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { } +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, + Register scratch) { + PrepareCallCFunction(num_reg_arguments, 0, scratch); +} + + +void MacroAssembler::CallCFunction(ExternalReference function, + int num_reg_arguments, + int num_double_arguments) { + li(t8, Operand(function)); + CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments); +} + + +void MacroAssembler::CallCFunction(Register function, + int num_reg_arguments, + int num_double_arguments) { + CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); +} + + void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { - CallCFunctionHelper(no_reg, function, t8, num_arguments); + CallCFunction(function, num_arguments, 0); } void MacroAssembler::CallCFunction(Register function, - Register scratch, int num_arguments) { - CallCFunctionHelper(function, - ExternalReference::the_hole_value_location(isolate()), - scratch, - num_arguments); + CallCFunction(function, num_arguments, 0); } void MacroAssembler::CallCFunctionHelper(Register function, - ExternalReference function_reference, - Register scratch, - int num_arguments) { + int num_reg_arguments, + int num_double_arguments) { + ASSERT(has_frame()); // Make sure that the stack is aligned before calling a C function unless // running in the simulator. The simulator has its own alignment check which // provides more information. @@ -4317,19 +4759,15 @@ void MacroAssembler::CallCFunctionHelper(Register function, // allow preemption, so the return address in the link register // stays correct. - if (function.is(no_reg)) { - function = t9; - li(function, Operand(function_reference)); - } else if (!function.is(t9)) { + if (!function.is(t9)) { mov(t9, function); function = t9; } Call(function); - int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ? - 0 : num_arguments - kRegisterPassedArguments) + - kCArgSlotCount; + int stack_passed_arguments = CalculateStackPassedWords( + num_reg_arguments, num_double_arguments); if (OS::ActivationFrameAlignment() > kPointerSize) { lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); @@ -4342,6 +4780,235 @@ void MacroAssembler::CallCFunctionHelper(Register function, #undef BRANCH_ARGS_CHECK +void MacroAssembler::PatchRelocatedValue(Register li_location, + Register scratch, + Register new_value) { + lw(scratch, MemOperand(li_location)); + // At this point scratch is a lui(at, ...) instruction. + if (emit_debug_code()) { + And(scratch, scratch, kOpcodeMask); + Check(eq, "The instruction to patch should be a lui.", + scratch, Operand(LUI)); + lw(scratch, MemOperand(li_location)); + } + srl(t9, new_value, kImm16Bits); + Ins(scratch, t9, 0, kImm16Bits); + sw(scratch, MemOperand(li_location)); + + lw(scratch, MemOperand(li_location, kInstrSize)); + // scratch is now ori(at, ...). + if (emit_debug_code()) { + And(scratch, scratch, kOpcodeMask); + Check(eq, "The instruction to patch should be an ori.", + scratch, Operand(ORI)); + lw(scratch, MemOperand(li_location, kInstrSize)); + } + Ins(scratch, new_value, 0, kImm16Bits); + sw(scratch, MemOperand(li_location, kInstrSize)); + + // Update the I-cache so the new lui and ori can be executed. + FlushICache(li_location, 2); +} + + +void MacroAssembler::CheckPageFlag( + Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met) { + And(scratch, object, Operand(~Page::kPageAlignmentMask)); + lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); + And(scratch, scratch, Operand(mask)); + Branch(condition_met, cc, scratch, Operand(zero_reg)); +} + + +void MacroAssembler::JumpIfBlack(Register object, + Register scratch0, + Register scratch1, + Label* on_black) { + HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); +} + + +void MacroAssembler::HasColor(Register object, + Register bitmap_scratch, + Register mask_scratch, + Label* has_color, + int first_bit, + int second_bit) { + ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8)); + ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9)); + + GetMarkBits(object, bitmap_scratch, mask_scratch); + + Label other_color, word_boundary; + lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + And(t8, t9, Operand(mask_scratch)); + Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg)); + // Shift left 1 by adding. + Addu(mask_scratch, mask_scratch, Operand(mask_scratch)); + Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg)); + And(t8, t9, Operand(mask_scratch)); + Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg)); + jmp(&other_color); + + bind(&word_boundary); + lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); + And(t9, t9, Operand(1)); + Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg)); + bind(&other_color); +} + + +// Detect some, but not all, common pointer-free objects. This is used by the +// incremental write barrier which doesn't care about oddballs (they are always +// marked black immediately so this code is not hit). +void MacroAssembler::JumpIfDataObject(Register value, + Register scratch, + Label* not_data_object) { + ASSERT(!AreAliased(value, scratch, t8, no_reg)); + Label is_data_object; + lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); + LoadRoot(t8, Heap::kHeapNumberMapRootIndex); + Branch(&is_data_object, eq, t8, Operand(scratch)); + ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + // If it's a string and it's not a cons string then it's an object containing + // no GC pointers. + lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); + Branch(not_data_object, ne, t8, Operand(zero_reg)); + bind(&is_data_object); +} + + +void MacroAssembler::GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg) { + ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); + And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); + Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); + const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; + Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits); + sll(t8, t8, kPointerSizeLog2); + Addu(bitmap_reg, bitmap_reg, t8); + li(t8, Operand(1)); + sllv(mask_reg, t8, mask_reg); +} + + +void MacroAssembler::EnsureNotWhite( + Register value, + Register bitmap_scratch, + Register mask_scratch, + Register load_scratch, + Label* value_is_white_and_not_data) { + ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8)); + GetMarkBits(value, bitmap_scratch, mask_scratch); + + // If the value is black or grey we don't need to do anything. + ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); + ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + + Label done; + + // Since both black and grey have a 1 in the first position and white does + // not have a 1 there we only need to check one bit. + lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + And(t8, mask_scratch, load_scratch); + Branch(&done, ne, t8, Operand(zero_reg)); + + if (FLAG_debug_code) { + // Check for impossible bit pattern. + Label ok; + // sll may overflow, making the check conservative. + sll(t8, mask_scratch, 1); + And(t8, load_scratch, t8); + Branch(&ok, eq, t8, Operand(zero_reg)); + stop("Impossible marking bit pattern"); + bind(&ok); + } + + // Value is white. We check whether it is data that doesn't need scanning. + // Currently only checks for HeapNumber and non-cons strings. + Register map = load_scratch; // Holds map while checking type. + Register length = load_scratch; // Holds length of object after testing type. + Label is_data_object; + + // Check for heap-number + lw(map, FieldMemOperand(value, HeapObject::kMapOffset)); + LoadRoot(t8, Heap::kHeapNumberMapRootIndex); + { + Label skip; + Branch(&skip, ne, t8, Operand(map)); + li(length, HeapNumber::kSize); + Branch(&is_data_object); + bind(&skip); + } + + // Check for strings. + ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + // If it's a string and it's not a cons string then it's an object containing + // no GC pointers. + Register instance_type = load_scratch; + lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); + And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); + Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg)); + // It's a non-indirect (non-cons and non-slice) string. + // If it's external, the length is just ExternalString::kSize. + // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). + // External strings are the only ones with the kExternalStringTag bit + // set. + ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); + ASSERT_EQ(0, kConsStringTag & kExternalStringTag); + And(t8, instance_type, Operand(kExternalStringTag)); + { + Label skip; + Branch(&skip, eq, t8, Operand(zero_reg)); + li(length, ExternalString::kSize); + Branch(&is_data_object); + bind(&skip); + } + + // Sequential string, either ASCII or UC16. + // For ASCII (char-size of 1) we shift the smi tag away to get the length. + // For UC16 (char-size of 2) we just leave the smi tag in place, thereby + // getting the length multiplied by 2. + ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); + ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + lw(t9, FieldMemOperand(value, String::kLengthOffset)); + And(t8, instance_type, Operand(kStringEncodingMask)); + { + Label skip; + Branch(&skip, eq, t8, Operand(zero_reg)); + srl(t9, t9, 1); + bind(&skip); + } + Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); + And(length, length, Operand(~kObjectAlignmentMask)); + + bind(&is_data_object); + // Value is a data object, and it is white. Mark it black. Since we know + // that the object is white we can make it black by flipping one bit. + lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + Or(t8, t8, Operand(mask_scratch)); + sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); + + And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); + lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); + Addu(t8, t8, Operand(length)); + sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); + + bind(&done); +} + + void MacroAssembler::LoadInstanceDescriptors(Register map, Register descriptors) { lw(descriptors, @@ -4353,6 +5020,60 @@ void MacroAssembler::LoadInstanceDescriptors(Register map, } +void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { + ASSERT(!output_reg.is(input_reg)); + Label done; + li(output_reg, Operand(255)); + // Normal branch: nop in delay slot. + Branch(&done, gt, input_reg, Operand(output_reg)); + // Use delay slot in this branch. + Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg)); + mov(output_reg, zero_reg); // In delay slot. + mov(output_reg, input_reg); // Value is in range 0..255. + bind(&done); +} + + +void MacroAssembler::ClampDoubleToUint8(Register result_reg, + DoubleRegister input_reg, + DoubleRegister temp_double_reg) { + Label above_zero; + Label done; + Label in_bounds; + + Move(temp_double_reg, 0.0); + BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg); + + // Double value is less than zero, NaN or Inf, return 0. + mov(result_reg, zero_reg); + Branch(&done); + + // Double value is >= 255, return 255. + bind(&above_zero); + Move(temp_double_reg, 255.0); + BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg); + li(result_reg, Operand(255)); + Branch(&done); + + // In 0-255 range, round and truncate. + bind(&in_bounds); + round_w_d(temp_double_reg, input_reg); + mfc1(result_reg, temp_double_reg); + bind(&done); +} + + +bool AreAliased(Register r1, Register r2, Register r3, Register r4) { + if (r1.is(r2)) return true; + if (r1.is(r3)) return true; + if (r1.is(r4)) return true; + if (r2.is(r3)) return true; + if (r2.is(r4)) return true; + if (r3.is(r4)) return true; + return false; +} + + CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), instructions_(instructions), diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h index 5dd012e93..6f81a4bd6 100644 --- a/deps/v8/src/mips/macro-assembler-mips.h +++ b/deps/v8/src/mips/macro-assembler-mips.h @@ -50,15 +50,16 @@ class JumpTarget; // trying to update gp register for position-independent-code. Whenever // MIPS generated code calls C code, it must be via t9 register. -// Registers aliases + +// Register aliases. // cp is assumed to be a callee saved register. +const Register lithiumScratchReg = s3; // Scratch register. +const Register lithiumScratchReg2 = s4; // Scratch register. +const Register condReg = s5; // Simulated (partial) condition code for mips. const Register roots = s6; // Roots array pointer. const Register cp = s7; // JavaScript context pointer. const Register fp = s8_fp; // Alias for fp. -// Registers used for condition evaluation. -const Register condReg1 = s4; -const Register condReg2 = s5; - +const DoubleRegister lithiumScratchDouble = f30; // Double scratch register. // Flags used for the AllocateInNewSpace functions. enum AllocationFlags { @@ -90,6 +91,43 @@ enum BranchDelaySlot { PROTECT }; + +enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; +enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; +enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; + +bool AreAliased(Register r1, Register r2, Register r3, Register r4); + + +// ----------------------------------------------------------------------------- +// Static helper functions. + +static MemOperand ContextOperand(Register context, int index) { + return MemOperand(context, Context::SlotOffset(index)); +} + + +static inline MemOperand GlobalObjectOperand() { + return ContextOperand(cp, Context::GLOBAL_INDEX); +} + + +// Generate a MemOperand for loading a field from an object. +static inline MemOperand FieldMemOperand(Register object, int offset) { + return MemOperand(object, offset - kHeapObjectTag); +} + + +// Generate a MemOperand for storing arguments 5..N on the stack +// when calling CallCFunction(). +static inline MemOperand CFunctionArgumentOperand(int index) { + ASSERT(index > kCArgSlotCount); + // Argument 5 takes the slot just past the four Arg-slots. + int offset = (index - 5) * kPointerSize + kCArgsSlotsSize; + return MemOperand(sp, offset); +} + + // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: @@ -138,21 +176,22 @@ class MacroAssembler: public Assembler { void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS); - int CallSize(Register target, COND_ARGS); + static int CallSize(Register target, COND_ARGS); void Call(Register target, COND_ARGS); - int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS); + static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS); void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); - int CallSize(Handle<Code> code, - RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, - unsigned ast_id = kNoASTId, - COND_ARGS); + static int CallSize(Handle<Code> code, + RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, + unsigned ast_id = kNoASTId, + COND_ARGS); void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, unsigned ast_id = kNoASTId, COND_ARGS); void Ret(COND_ARGS); - inline void Ret(BranchDelaySlot bd) { - Ret(al, zero_reg, Operand(zero_reg), bd); + inline void Ret(BranchDelaySlot bd, Condition cond = al, + Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) { + Ret(cond, rs, rt, bd); } #undef COND_ARGS @@ -197,6 +236,8 @@ class MacroAssembler: public Assembler { mtc1(src_high, FPURegister::from_code(dst.code() + 1)); } + void Move(FPURegister dst, double imm); + // Jump unconditionally to given label. // We NEED a nop in the branch delay slot, as it used by v8, for example in // CodeGenerator::ProcessDeferred(). @@ -206,6 +247,7 @@ class MacroAssembler: public Assembler { Branch(L); } + // Load an object from the root table. void LoadRoot(Register destination, Heap::RootListIndex index); @@ -221,39 +263,127 @@ class MacroAssembler: public Assembler { Condition cond, Register src1, const Operand& src2); - // Check if object is in new space. - // scratch can be object itself, but it will be clobbered. - void InNewSpace(Register object, - Register scratch, - Condition cc, // eq for new space, ne otherwise. - Label* branch); + // --------------------------------------------------------------------------- + // GC Support + void IncrementalMarkingRecordWriteHelper(Register object, + Register value, + Register address); + + enum RememberedSetFinalAction { + kReturnAtEnd, + kFallThroughAtEnd + }; + + + // Record in the remembered set the fact that we have a pointer to new space + // at the address pointed to by the addr register. Only works if addr is not + // in new space. + void RememberedSetHelper(Register object, // Used for debug code. + Register addr, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetFinalAction and_then); + + void CheckPageFlag(Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met); + + // Check if object is in new space. Jumps if the object is not in new space. + // The register scratch can be object itself, but it will be clobbered. + void JumpIfNotInNewSpace(Register object, + Register scratch, + Label* branch) { + InNewSpace(object, scratch, ne, branch); + } + + // Check if object is in new space. Jumps if the object is in new space. + // The register scratch can be object itself, but scratch will be clobbered. + void JumpIfInNewSpace(Register object, + Register scratch, + Label* branch) { + InNewSpace(object, scratch, eq, branch); + } - // For the page containing |object| mark the region covering [address] - // dirty. The object address must be in the first 8K of an allocated page. - void RecordWriteHelper(Register object, - Register address, - Register scratch); - - // For the page containing |object| mark the region covering - // [object+offset] dirty. The object address must be in the first 8K - // of an allocated page. The 'scratch' registers are used in the - // implementation and all 3 registers are clobbered by the - // operation, as well as the 'at' register. RecordWrite updates the - // write barrier even when storing smis. - void RecordWrite(Register object, - Operand offset, + // Check if an object has a given incremental marking color. + void HasColor(Register object, + Register scratch0, + Register scratch1, + Label* has_color, + int first_bit, + int second_bit); + + void JumpIfBlack(Register object, Register scratch0, - Register scratch1); + Register scratch1, + Label* on_black); + + // Checks the color of an object. If the object is already grey or black + // then we just fall through, since it is already live. If it is white and + // we can determine that it doesn't need to be scanned, then we just mark it + // black and fall through. For the rest we jump to the label so the + // incremental marker can fix its assumptions. + void EnsureNotWhite(Register object, + Register scratch1, + Register scratch2, + Register scratch3, + Label* object_is_white_and_not_data); + + // Detects conservatively whether an object is data-only, ie it does need to + // be scanned by the garbage collector. + void JumpIfDataObject(Register value, + Register scratch, + Label* not_data_object); + + // Notify the garbage collector that we wrote a pointer into an object. + // |object| is the object being stored into, |value| is the object being + // stored. value and scratch registers are clobbered by the operation. + // The offset is the offset from the start of the object, not the offset from + // the tagged HeapObject pointer. For use with FieldOperand(reg, off). + void RecordWriteField( + Register object, + int offset, + Register value, + Register scratch, + RAStatus ra_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); + + // As above, but the offset has the tag presubtracted. For use with + // MemOperand(reg, off). + inline void RecordWriteContextSlot( + Register context, + int offset, + Register value, + Register scratch, + RAStatus ra_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK) { + RecordWriteField(context, + offset + kHeapObjectTag, + value, + scratch, + ra_status, + save_fp, + remembered_set_action, + smi_check); + } - // For the page containing |object| mark the region covering - // [address] dirty. The object address must be in the first 8K of an - // allocated page. All 3 registers are clobbered by the operation, - // as well as the ip register. RecordWrite updates the write barrier - // even when storing smis. - void RecordWrite(Register object, - Register address, - Register scratch); + // For a given |object| notify the garbage collector that the slot |address| + // has been written. |value| is the object being stored. The value and + // address registers are clobbered by the operation. + void RecordWrite( + Register object, + Register address, + Register value, + RAStatus ra_status, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); // --------------------------------------------------------------------------- @@ -517,6 +647,14 @@ class MacroAssembler: public Assembler { Addu(sp, sp, 2 * kPointerSize); } + // Pop three registers. Pops rightmost register first (from lower address). + void Pop(Register src1, Register src2, Register src3) { + lw(src3, MemOperand(sp, 0 * kPointerSize)); + lw(src2, MemOperand(sp, 1 * kPointerSize)); + lw(src1, MemOperand(sp, 2 * kPointerSize)); + Addu(sp, sp, 3 * kPointerSize); + } + void Pop(uint32_t count = 1) { Addu(sp, sp, Operand(count * kPointerSize)); } @@ -535,10 +673,17 @@ class MacroAssembler: public Assembler { // into register dst. void LoadFromSafepointRegisterSlot(Register dst, Register src); + // Flush the I-cache from asm code. You should use CPU::FlushICache from C. + // Does not handle errors. + void FlushICache(Register address, unsigned instructions); + // MIPS32 R2 instruction macro. void Ins(Register rt, Register rs, uint16_t pos, uint16_t size); void Ext(Register rt, Register rs, uint16_t pos, uint16_t size); + // --------------------------------------------------------------------------- + // FPU macros. These do not handle special cases like NaN or +- inf. + // Convert unsigned word to double. void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch); void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch); @@ -547,6 +692,24 @@ class MacroAssembler: public Assembler { void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch); void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch); + // Wrapper function for the different cmp/branch types. + void BranchF(Label* target, + Label* nan, + Condition cc, + FPURegister cmp1, + FPURegister cmp2, + BranchDelaySlot bd = PROTECT); + + // Alternate (inline) version for better readability with USE_DELAY_SLOT. + inline void BranchF(BranchDelaySlot bd, + Label* target, + Label* nan, + Condition cc, + FPURegister cmp1, + FPURegister cmp2) { + BranchF(target, nan, cc, cmp1, cmp2, bd); + }; + // Convert the HeapNumber pointed to by source to a 32bits signed integer // dest. If the HeapNumber does not fit into a 32bits signed integer branch // to not_int32 label. If FPU is available double_scratch is used but not @@ -558,6 +721,18 @@ class MacroAssembler: public Assembler { FPURegister double_scratch, Label *not_int32); + // Truncates a double using a specific rounding mode. + // The except_flag will contain any exceptions caused by the instruction. + // If check_inexact is kDontCheckForInexactConversion, then the inexacat + // exception is masked. + void EmitFPUTruncate(FPURoundingMode rounding_mode, + FPURegister result, + DoubleRegister double_input, + Register scratch1, + Register except_flag, + CheckForInexactConversion check_inexact + = kDontCheckForInexactConversion); + // Helper for EmitECMATruncate. // This will truncate a floating-point value outside of the singed 32bit // integer range to a 32bit signed integer. @@ -579,15 +754,6 @@ class MacroAssembler: public Assembler { Register scratch2, Register scratch3); - // ------------------------------------------------------------------------- - // Activation frames. - - void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } - void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } - - void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } - void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } - // Enter exit frame. // argc - argument count to be dropped by LeaveExitFrame. // save_doubles - saves FPU registers on stack, currently disabled. @@ -614,6 +780,7 @@ class MacroAssembler: public Assembler { Register map, Register scratch); + // ------------------------------------------------------------------------- // JavaScript invokes. @@ -702,6 +869,13 @@ class MacroAssembler: public Assembler { Register length, Register scratch); + // Initialize fields with filler values. Fields starting at |start_offset| + // not including end_offset are overwritten with the value in |filler|. At + // the end the loop, |start_offset| takes the value of |end_offset|. + void InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler); + // ------------------------------------------------------------------------- // Support functions. @@ -725,6 +899,31 @@ class MacroAssembler: public Assembler { Register scratch, Label* fail); + // Check if a map for a JSObject indicates that the object can have both smi + // and HeapObject elements. Jump to the specified label if it does not. + void CheckFastObjectElements(Register map, + Register scratch, + Label* fail); + + // Check if a map for a JSObject indicates that the object has fast smi only + // elements. Jump to the specified label if it does not. + void CheckFastSmiOnlyElements(Register map, + Register scratch, + Label* fail); + + // Check to see if maybe_number can be stored as a double in + // FastDoubleElements. If it can, store it at the index specified by key in + // the FastDoubleElements array elements, otherwise jump to fail. + void StoreNumberToDoubleElements(Register value_reg, + Register key_reg, + Register receiver_reg, + Register elements_reg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* fail); + // Check if the map of an object is equal to a specified map (either // given directly or as an index into the root list) and branch to // label if not. Skip the smi check if not required (object is known @@ -754,6 +953,21 @@ class MacroAssembler: public Assembler { // occurred. void IllegalOperation(int num_arguments); + + // Load and check the instance type of an object for being a string. + // Loads the type into the second argument register. + // Returns a condition that will be enabled if the object was a string. + Condition IsObjectStringType(Register obj, + Register type, + Register result) { + lw(type, FieldMemOperand(obj, HeapObject::kMapOffset)); + lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); + And(type, type, Operand(kIsNotStringMask)); + ASSERT_EQ(0, kStringTag); + return eq; + } + + // Picks out an array index from the hash field. // Register use: // hash - holds the index's hash. Clobbered. @@ -879,6 +1093,9 @@ class MacroAssembler: public Assembler { int num_arguments, int result_size); + int CalculateStackPassedWords(int num_reg_arguments, + int num_double_arguments); + // Before calling a C-function from generated code, align arguments on stack // and add space for the four mips argument slots. // After aligning the frame, non-register arguments must be stored on the @@ -888,7 +1105,11 @@ class MacroAssembler: public Assembler { // C++ code. // Needs a scratch register to do some arithmetic. This register will be // trashed. - void PrepareCallCFunction(int num_arguments, Register scratch); + void PrepareCallCFunction(int num_reg_arguments, + int num_double_registers, + Register scratch); + void PrepareCallCFunction(int num_reg_arguments, + Register scratch); // Arguments 1-4 are placed in registers a0 thru a3 respectively. // Arguments 5..n are stored to stack using following: @@ -900,7 +1121,13 @@ class MacroAssembler: public Assembler { // return address (unless this is somehow accounted for by the called // function). void CallCFunction(ExternalReference function, int num_arguments); - void CallCFunction(Register function, Register scratch, int num_arguments); + void CallCFunction(Register function, int num_arguments); + void CallCFunction(ExternalReference function, + int num_reg_arguments, + int num_double_arguments); + void CallCFunction(Register function, + int num_reg_arguments, + int num_double_arguments); void GetCFunctionDoubleResult(const DoubleRegister dst); // There are two ways of passing double arguments on MIPS, depending on @@ -976,6 +1203,9 @@ class MacroAssembler: public Assembler { bool generating_stub() { return generating_stub_; } void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } bool allow_stub_calls() { return allow_stub_calls_; } + void set_has_frame(bool value) { has_frame_ = value; } + bool has_frame() { return has_frame_; } + inline bool AllowThisStubCall(CodeStub* stub); // --------------------------------------------------------------------------- // Number utilities. @@ -1003,6 +1233,13 @@ class MacroAssembler: public Assembler { Addu(reg, reg, reg); } + // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow(). + void SmiTagCheckOverflow(Register reg, Register overflow) { + mov(overflow, reg); // Save original value. + addu(reg, reg, reg); + xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0. + } + void SmiTag(Register dst, Register src) { Addu(dst, src, src); } @@ -1017,10 +1254,11 @@ class MacroAssembler: public Assembler { // Jump the register contains a smi. inline void JumpIfSmi(Register value, Label* smi_label, - Register scratch = at) { + Register scratch = at, + BranchDelaySlot bd = PROTECT) { ASSERT_EQ(0, kSmiTag); andi(scratch, value, kSmiTagMask); - Branch(smi_label, eq, scratch, Operand(zero_reg)); + Branch(bd, smi_label, eq, scratch, Operand(zero_reg)); } // Jump if the register contains a non-smi. @@ -1090,13 +1328,29 @@ class MacroAssembler: public Assembler { Register scratch2, Label* failure); + void ClampUint8(Register output_reg, Register input_reg); + + void ClampDoubleToUint8(Register result_reg, + DoubleRegister input_reg, + DoubleRegister temp_double_reg); + + void LoadInstanceDescriptors(Register map, Register descriptors); + + // Activation support. + void EnterFrame(StackFrame::Type type); + void LeaveFrame(StackFrame::Type type); + + // Patch the relocated value (lui/ori pair). + void PatchRelocatedValue(Register li_location, + Register scratch, + Register new_value); + private: void CallCFunctionHelper(Register function, - ExternalReference function_reference, - Register scratch, - int num_arguments); + int num_reg_arguments, + int num_double_arguments); void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT); void BranchShort(int16_t offset, Condition cond, Register rs, @@ -1132,25 +1386,33 @@ class MacroAssembler: public Assembler { // the function in the 'resolved' flag. Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved); - // Activation support. - void EnterFrame(StackFrame::Type type); - void LeaveFrame(StackFrame::Type type); - void InitializeNewString(Register string, Register length, Heap::RootListIndex map_index, Register scratch1, Register scratch2); + // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. + void InNewSpace(Register object, + Register scratch, + Condition cond, // eq for new space, ne otherwise. + Label* branch); + + // Helper for finding the mark bits for an address. Afterwards, the + // bitmap register points at the word with the mark bits and the mask + // the position of the first bit. Leaves addr_reg unchanged. + inline void GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg); + // Compute memory operands for safepoint stack slots. static int SafepointRegisterStackIndex(int reg_code); MemOperand SafepointRegisterSlot(Register reg); MemOperand SafepointRegistersAndDoublesSlot(Register reg); - bool UseAbsoluteCodePointers(); - bool generating_stub_; bool allow_stub_calls_; + bool has_frame_; // This handle will be patched with the code object on installation. Handle<Object> code_object_; @@ -1191,34 +1453,6 @@ class CodePatcher { }; -// ----------------------------------------------------------------------------- -// Static helper functions. - -static MemOperand ContextOperand(Register context, int index) { - return MemOperand(context, Context::SlotOffset(index)); -} - - -static inline MemOperand GlobalObjectOperand() { - return ContextOperand(cp, Context::GLOBAL_INDEX); -} - - -// Generate a MemOperand for loading a field from an object. -static inline MemOperand FieldMemOperand(Register object, int offset) { - return MemOperand(object, offset - kHeapObjectTag); -} - - -// Generate a MemOperand for storing arguments 5..N on the stack -// when calling CallCFunction(). -static inline MemOperand CFunctionArgumentOperand(int index) { - ASSERT(index > kCArgSlotCount); - // Argument 5 takes the slot just past the four Arg-slots. - int offset = (index - 5) * kPointerSize + kCArgsSlotsSize; - return MemOperand(sp, offset); -} - #ifdef GENERATED_CODE_COVERAGE #define CODE_COVERAGE_STRINGIFY(x) #x diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc index 63e836f22..9db5c5bed 100644 --- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc +++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc @@ -377,9 +377,12 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase( // Isolate. __ li(a3, Operand(ExternalReference::isolate_address())); - ExternalReference function = - ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); - __ CallCFunction(function, argument_count); + { + AllowExternalCallThatCantCauseGC scope(masm_); + ExternalReference function = + ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); + __ CallCFunction(function, argument_count); + } // Restore regexp engine registers. __ MultiPop(regexp_registers_to_retain); @@ -607,6 +610,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) { // Entry code: __ bind(&entry_label_); + + // Tell the system that we have a stack frame. Because the type is MANUAL, + // no is generated. + FrameScope scope(masm_, StackFrame::MANUAL); + + // Actually emit code to start a new stack frame. // Push arguments // Save callee-save registers. // Start new stack frame. @@ -1244,13 +1253,14 @@ void RegExpCEntryStub::Generate(MacroAssembler* masm_) { if (stack_alignment < kPointerSize) stack_alignment = kPointerSize; // Stack is already aligned for call, so decrement by alignment // to make room for storing the return address. - __ Subu(sp, sp, Operand(stack_alignment)); - __ sw(ra, MemOperand(sp, 0)); - __ mov(a0, sp); + __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize)); + const int return_address_offset = kCArgsSlotsSize; + __ Addu(a0, sp, return_address_offset); + __ sw(ra, MemOperand(a0, 0)); __ mov(t9, t1); __ Call(t9); - __ lw(ra, MemOperand(sp, 0)); - __ Addu(sp, sp, Operand(stack_alignment)); + __ lw(ra, MemOperand(sp, return_address_offset)); + __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize)); __ Jump(ra); } diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc index 5b949734f..4bad0a2cc 100644 --- a/deps/v8/src/mips/stub-cache-mips.cc +++ b/deps/v8/src/mips/stub-cache-mips.cc @@ -432,7 +432,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the now unused name_reg as a scratch register. - __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch); + __ mov(name_reg, a0); + __ RecordWriteField(receiver_reg, + offset, + name_reg, + scratch, + kRAHasNotBeenSaved, + kDontSaveFPRegs); } else { // Write to the properties array. int offset = index * kPointerSize + FixedArray::kHeaderSize; @@ -445,7 +451,13 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Ok to clobber receiver_reg and name_reg, since we return. - __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg); + __ mov(name_reg, a0); + __ RecordWriteField(scratch, + offset, + name_reg, + receiver_reg, + kRAHasNotBeenSaved, + kDontSaveFPRegs); } // Return the value (register v0). @@ -554,9 +566,10 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) { } -static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, - const CallOptimization& optimization, - int argc) { +static MaybeObject* GenerateFastApiDirectCall( + MacroAssembler* masm, + const CallOptimization& optimization, + int argc) { // ----------- S t a t e ------------- // -- sp[0] : holder (set by CheckPrototypes) // -- sp[4] : callee js function @@ -595,6 +608,7 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, const int kApiStackSpace = 4; + FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); // NOTE: the O32 abi requires a0 to hold a special pointer when returning a @@ -626,6 +640,7 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm, ExternalReference(&fun, ExternalReference::DIRECT_API_CALL, masm->isolate()); + AllowExternalCallThatCantCauseGC scope(masm); return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace); } @@ -804,7 +819,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { miss_label); // Call a runtime function to load the interceptor property. - __ EnterInternalFrame(); + FrameScope scope(masm, StackFrame::INTERNAL); // Save the name_ register across the call. __ push(name_); @@ -822,7 +837,8 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Restore the name_ register. __ pop(name_); - __ LeaveInternalFrame(); + + // Leave the internal frame. } void LoadWithInterceptor(MacroAssembler* masm, @@ -831,19 +847,20 @@ class CallInterceptorCompiler BASE_EMBEDDED { JSObject* holder_obj, Register scratch, Label* interceptor_succeeded) { - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(holder, name_); + __ Push(holder, name_); - CompileCallLoadPropertyWithInterceptor(masm, - receiver, - holder, - name_, - holder_obj); + CompileCallLoadPropertyWithInterceptor(masm, + receiver, + holder, + name_, + holder_obj); - __ pop(name_); // Restore the name. - __ pop(receiver); // Restore the holder. - __ LeaveInternalFrame(); + __ pop(name_); // Restore the name. + __ pop(receiver); // Restore the holder. + } // If interceptor returns no-result sentinel, call the constant function. __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex); @@ -1256,7 +1273,9 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, const int kApiStackSpace = 1; + FrameScope frame_scope(masm(), StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); + // Create AccessorInfo instance on the stack above the exit frame with // scratch2 (internal::Object **args_) as the data. __ sw(a2, MemOperand(sp, kPointerSize)); @@ -1317,40 +1336,42 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Save necessary data before invoking an interceptor. // Requires a frame to make GC aware of pushed pointers. - __ EnterInternalFrame(); + { + FrameScope frame_scope(masm(), StackFrame::INTERNAL); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - // CALLBACKS case needs a receiver to be passed into C++ callback. - __ Push(receiver, holder_reg, name_reg); - } else { - __ Push(holder_reg, name_reg); - } + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + // CALLBACKS case needs a receiver to be passed into C++ callback. + __ Push(receiver, holder_reg, name_reg); + } else { + __ Push(holder_reg, name_reg); + } - // Invoke an interceptor. Note: map checks from receiver to - // interceptor's holder has been compiled before (see a caller - // of this method). - CompileCallLoadPropertyWithInterceptor(masm(), - receiver, - holder_reg, - name_reg, - interceptor_holder); - - // Check if interceptor provided a value for property. If it's - // the case, return immediately. - Label interceptor_failed; - __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); - __ Branch(&interceptor_failed, eq, v0, Operand(scratch1)); - __ LeaveInternalFrame(); - __ Ret(); + // Invoke an interceptor. Note: map checks from receiver to + // interceptor's holder has been compiled before (see a caller + // of this method). + CompileCallLoadPropertyWithInterceptor(masm(), + receiver, + holder_reg, + name_reg, + interceptor_holder); + + // Check if interceptor provided a value for property. If it's + // the case, return immediately. + Label interceptor_failed; + __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); + __ Branch(&interceptor_failed, eq, v0, Operand(scratch1)); + frame_scope.GenerateLeaveFrame(); + __ Ret(); - __ bind(&interceptor_failed); - __ pop(name_reg); - __ pop(holder_reg); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - __ pop(receiver); - } + __ bind(&interceptor_failed); + __ pop(name_reg); + __ pop(holder_reg); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + __ pop(receiver); + } - __ LeaveInternalFrame(); + // Leave the internal frame. + } // Check that the maps from interceptor's holder to lookup's holder // haven't changed. And load lookup's holder into |holder| register. @@ -1580,7 +1601,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, DONT_DO_SMI_CHECK); if (argc == 1) { // Otherwise fall through to call the builtin. - Label exit, with_write_barrier, attempt_to_grow_elements; + Label attempt_to_grow_elements; // Get the array's length into v0 and calculate new length. __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -1594,29 +1615,51 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, // Check if we could survive without allocation. __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0)); + // Check if value is a smi. + Label with_write_barrier; + __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize)); + __ JumpIfNotSmi(t0, &with_write_barrier); + // Save new length. __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); // Push the element. - __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize)); // We may need a register containing the address end_elements below, // so write back the value in end_elements. __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize); __ Addu(end_elements, elements, end_elements); const int kEndElementsOffset = FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; - __ sw(t0, MemOperand(end_elements, kEndElementsOffset)); - __ Addu(end_elements, end_elements, kPointerSize); + __ Addu(end_elements, end_elements, kEndElementsOffset); + __ sw(t0, MemOperand(end_elements)); // Check for a smi. - __ JumpIfNotSmi(t0, &with_write_barrier); - __ bind(&exit); __ Drop(argc + 1); __ Ret(); __ bind(&with_write_barrier); - __ InNewSpace(elements, t0, eq, &exit); - __ RecordWriteHelper(elements, end_elements, t0); + + __ lw(t2, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ CheckFastSmiOnlyElements(t2, t2, &call_builtin); + + // Save new length. + __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); + + // Push the element. + // We may need a register containing the address end_elements below, + // so write back the value in end_elements. + __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize); + __ Addu(end_elements, elements, end_elements); + __ Addu(end_elements, end_elements, kEndElementsOffset); + __ sw(t0, MemOperand(end_elements)); + + __ RecordWrite(elements, + end_elements, + t0, + kRAHasNotBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); __ Drop(argc + 1); __ Ret(); @@ -1628,6 +1671,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ Branch(&call_builtin); } + __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize)); + // Growing elements that are SMI-only requires special handling in case + // the new element is non-Smi. For now, delegate to the builtin. + Label no_fast_elements_check; + __ JumpIfSmi(a2, &no_fast_elements_check); + __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ CheckFastObjectElements(t3, t3, &call_builtin); + __ bind(&no_fast_elements_check); + ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address( masm()->isolate()); @@ -1653,8 +1705,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, // Update new_space_allocation_top. __ sw(t2, MemOperand(t3)); // Push the argument. - __ lw(t2, MemOperand(sp, (argc - 1) * kPointerSize)); - __ sw(t2, MemOperand(end_elements)); + __ sw(a2, MemOperand(end_elements)); // Fill the rest with holes. __ LoadRoot(t2, Heap::kTheHoleValueRootIndex); for (int i = 1; i < kAllocationDelta; i++) { @@ -2551,7 +2602,12 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object, ? CALL_AS_FUNCTION : CALL_AS_METHOD; if (V8::UseCrankshaft()) { - UNIMPLEMENTED_MIPS(); + // TODO(kasperl): For now, we always call indirectly through the + // code field in the function to allow recompilation to take effect + // without changing any of the call sites. + __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); + __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION, + NullCallWrapper(), call_kind); } else { __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET, JUMP_FUNCTION, call_kind); @@ -2718,6 +2774,16 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // Store the value in the cell. __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset)); __ mov(v0, a0); // Stored value must be returned in v0. + + // This trashes a0 but the value is returned in v0 anyway. + __ RecordWriteField(t0, + JSGlobalPropertyCell::kValueOffset, + a0, + a2, + kRAHasNotBeenSaved, + kDontSaveFPRegs, + OMIT_REMEMBERED_SET); + Counters* counters = masm()->isolate()->counters(); __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3); __ Ret(); @@ -3116,7 +3182,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) { } -MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic( +MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic( MapList* receiver_maps, CodeList* handler_ics) { // ----------- S t a t e ------------- @@ -3210,9 +3276,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) { } -MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic( +MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic( MapList* receiver_maps, - CodeList* handler_ics) { + CodeList* handler_stubs, + MapList* transitioned_maps) { // ----------- S t a t e ------------- // -- a0 : value // -- a1 : key @@ -3225,10 +3292,18 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic( int receiver_count = receiver_maps->length(); __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); - for (int current = 0; current < receiver_count; ++current) { - Handle<Map> map(receiver_maps->at(current)); - Handle<Code> code(handler_ics->at(current)); - __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map)); + for (int i = 0; i < receiver_count; ++i) { + Handle<Map> map(receiver_maps->at(i)); + Handle<Code> code(handler_stubs->at(i)); + if (transitioned_maps->at(i) == NULL) { + __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map)); + } else { + Label next_map; + __ Branch(&next_map, eq, a3, Operand(map)); + __ li(t0, Operand(Handle<Map>(transitioned_maps->at(i)))); + __ Jump(code, RelocInfo::CODE_TARGET); + __ bind(&next_map); + } } __ bind(&miss); @@ -3457,6 +3532,7 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) { case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: @@ -3553,6 +3629,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( } break; case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3828,7 +3905,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset)); // Check that the index is in range. - __ SmiUntag(t0, key); __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset)); // Unsigned comparison catches both negative and too-large values. __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1)); @@ -3836,7 +3912,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // Handle both smis and HeapNumbers in the fast path. Go to the // runtime for all other kinds of values. // a3: external array. - // t0: key (integer). if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) { // Double to pixel conversion is only implemented in the runtime for now. @@ -3848,7 +3923,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset)); // a3: base pointer of external storage. - // t0: key (integer). // t1: value (integer). switch (elements_kind) { @@ -3865,33 +3939,36 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ mov(v0, t1); // Value is in range 0..255. __ bind(&done); __ mov(t1, v0); - __ addu(t8, a3, t0); + + __ srl(t8, key, 1); + __ addu(t8, a3, t8); __ sb(t1, MemOperand(t8, 0)); } break; case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ addu(t8, a3, t0); + __ srl(t8, key, 1); + __ addu(t8, a3, t8); __ sb(t1, MemOperand(t8, 0)); break; case EXTERNAL_SHORT_ELEMENTS: case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ sll(t8, t0, 1); - __ addu(t8, a3, t8); + __ addu(t8, a3, key); __ sh(t1, MemOperand(t8, 0)); break; case EXTERNAL_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ sll(t8, t0, 2); + __ sll(t8, key, 1); __ addu(t8, a3, t8); __ sw(t1, MemOperand(t8, 0)); break; case EXTERNAL_FLOAT_ELEMENTS: // Perform int-to-float conversion and store to memory. + __ SmiUntag(t0, key); StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4); break; case EXTERNAL_DOUBLE_ELEMENTS: - __ sll(t8, t0, 3); + __ sll(t8, key, 2); __ addu(a3, a3, t8); // a3: effective address of the double element FloatingPointHelper::Destination destination; @@ -3913,6 +3990,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } break; case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3921,12 +3999,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } // Entry registers are intact, a0 holds the value which is the return value. - __ mov(v0, value); + __ mov(v0, a0); __ Ret(); if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) { // a3: external array. - // t0: index (integer). __ bind(&check_heap_number); __ GetObjectType(value, t1, t2); __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE)); @@ -3934,7 +4011,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset)); // a3: base pointer of external storage. - // t0: key (integer). // The WebGL specification leaves the behavior of storing NaN and // +/-Infinity into integer arrays basically undefined. For more @@ -3947,11 +4023,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { __ cvt_s_d(f0, f0); - __ sll(t8, t0, 2); + __ sll(t8, key, 1); __ addu(t8, a3, t8); __ swc1(f0, MemOperand(t8, 0)); } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - __ sll(t8, t0, 3); + __ sll(t8, key, 2); __ addu(t8, a3, t8); __ sdc1(f0, MemOperand(t8, 0)); } else { @@ -3960,18 +4036,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ addu(t8, a3, t0); + __ srl(t8, key, 1); + __ addu(t8, a3, t8); __ sb(t3, MemOperand(t8, 0)); break; case EXTERNAL_SHORT_ELEMENTS: case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ sll(t8, t0, 1); - __ addu(t8, a3, t8); + __ addu(t8, a3, key); __ sh(t3, MemOperand(t8, 0)); break; case EXTERNAL_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ sll(t8, t0, 2); + __ sll(t8, key, 1); __ addu(t8, a3, t8); __ sw(t3, MemOperand(t8, 0)); break; @@ -3979,6 +4055,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3989,7 +4066,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // Entry registers are intact, a0 holds the value // which is the return value. - __ mov(v0, value); + __ mov(v0, a0); __ Ret(); } else { // FPU is not available, do manual conversions. @@ -4044,13 +4121,13 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ or_(t3, t7, t6); __ bind(&done); - __ sll(t9, a1, 2); + __ sll(t9, key, 1); __ addu(t9, a2, t9); __ sw(t3, MemOperand(t9, 0)); // Entry registers are intact, a0 holds the value which is the return // value. - __ mov(v0, value); + __ mov(v0, a0); __ Ret(); __ bind(&nan_or_infinity_or_zero); @@ -4068,6 +4145,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // t8: effective address of destination element. __ sw(t4, MemOperand(t8, 0)); __ sw(t3, MemOperand(t8, Register::kSizeInBytes)); + __ mov(v0, a0); __ Ret(); } else { bool is_signed_type = IsElementTypeSigned(elements_kind); @@ -4130,18 +4208,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ addu(t8, a3, t0); + __ srl(t8, key, 1); + __ addu(t8, a3, t8); __ sb(t3, MemOperand(t8, 0)); break; case EXTERNAL_SHORT_ELEMENTS: case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ sll(t8, t0, 1); - __ addu(t8, a3, t8); + __ addu(t8, a3, key); __ sh(t3, MemOperand(t8, 0)); break; case EXTERNAL_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ sll(t8, t0, 2); + __ sll(t8, key, 1); __ addu(t8, a3, t8); __ sw(t3, MemOperand(t8, 0)); break; @@ -4149,6 +4227,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -4298,8 +4377,10 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( } -void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, - bool is_js_array) { +void KeyedStoreStubCompiler::GenerateStoreFastElement( + MacroAssembler* masm, + bool is_js_array, + ElementsKind elements_kind) { // ----------- S t a t e ------------- // -- a0 : value // -- a1 : key @@ -4308,7 +4389,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, // -- a3 : scratch // -- a4 : scratch (elements) // ----------------------------------- - Label miss_force_generic; + Label miss_force_generic, transition_elements_kind; Register value_reg = a0; Register key_reg = a1; @@ -4342,14 +4423,32 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, // Compare smis. __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch)); - __ Addu(scratch, - elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize); - __ Addu(scratch3, scratch2, scratch); - __ sw(value_reg, MemOperand(scratch3)); - __ RecordWrite(scratch, Operand(scratch2), receiver_reg , elements_reg); - + if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + __ JumpIfNotSmi(value_reg, &transition_elements_kind); + __ Addu(scratch, + elements_reg, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize); + __ Addu(scratch, scratch, scratch2); + __ sw(value_reg, MemOperand(scratch)); + } else { + ASSERT(elements_kind == FAST_ELEMENTS); + __ Addu(scratch, + elements_reg, + Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); + __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize); + __ Addu(scratch, scratch, scratch2); + __ sw(value_reg, MemOperand(scratch)); + __ mov(receiver_reg, value_reg); + ASSERT(elements_kind == FAST_ELEMENTS); + __ RecordWrite(elements_reg, // Object. + scratch, // Address. + receiver_reg, // Value. + kRAHasNotBeenSaved, + kDontSaveFPRegs); + } // value_reg (a0) is preserved. // Done. __ Ret(); @@ -4358,6 +4457,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ Jump(ic, RelocInfo::CODE_TARGET); + + __ bind(&transition_elements_kind); + Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); + __ Jump(ic_miss, RelocInfo::CODE_TARGET); } @@ -4375,15 +4478,15 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- t2 : scratch (exponent_reg) // -- t3 : scratch4 // ----------------------------------- - Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value; + Label miss_force_generic, transition_elements_kind; Register value_reg = a0; Register key_reg = a1; Register receiver_reg = a2; - Register scratch = a3; - Register elements_reg = t0; - Register mantissa_reg = t1; - Register exponent_reg = t2; + Register elements_reg = a3; + Register scratch1 = t0; + Register scratch2 = t1; + Register scratch3 = t2; Register scratch4 = t3; // This stub is meant to be tail-jumped to, the receiver must already @@ -4395,90 +4498,25 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Check that the key is within bounds. if (is_js_array) { - __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); + __ lw(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); } else { - __ lw(scratch, + __ lw(scratch1, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); } // Compare smis, unsigned compare catches both negative and out-of-bound // indexes. - __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch)); - - // Handle smi values specially. - __ JumpIfSmi(value_reg, &smi_value); - - // Ensure that the object is a heap number - __ CheckMap(value_reg, - scratch, - masm->isolate()->factory()->heap_number_map(), - &miss_force_generic, - DONT_DO_SMI_CHECK); - - // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 - // in the exponent. - __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32)); - __ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); - __ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch)); - - __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - - __ bind(&have_double_value); - __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize); - __ Addu(scratch, elements_reg, Operand(scratch4)); - __ sw(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize)); - uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); - __ sw(exponent_reg, FieldMemOperand(scratch, offset)); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, value_reg); // In delay slot. - - __ bind(&maybe_nan); - // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise - // it's an Infinity, and the non-NaN code path applies. - __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32)); - __ Branch(&is_nan, gt, exponent_reg, Operand(scratch)); - __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - __ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg)); - - __ bind(&is_nan); - // Load canonical NaN for storing into the double array. - uint64_t nan_int64 = BitCast<uint64_t>( - FixedDoubleArray::canonical_not_the_hole_nan_as_double()); - __ li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); - __ li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); - __ jmp(&have_double_value); - - __ bind(&smi_value); - __ Addu(scratch, elements_reg, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); - __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize); - __ Addu(scratch, scratch, scratch4); - // scratch is now effective address of the double element - - FloatingPointHelper::Destination destination; - if (CpuFeatures::IsSupported(FPU)) { - destination = FloatingPointHelper::kFPURegisters; - } else { - destination = FloatingPointHelper::kCoreRegisters; - } + __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1)); + + __ StoreNumberToDoubleElements(value_reg, + key_reg, + receiver_reg, + elements_reg, + scratch1, + scratch2, + scratch3, + scratch4, + &transition_elements_kind); - Register untagged_value = receiver_reg; - __ SmiUntag(untagged_value, value_reg); - FloatingPointHelper::ConvertIntToDouble( - masm, - untagged_value, - destination, - f0, - mantissa_reg, - exponent_reg, - scratch4, - f2); - if (destination == FloatingPointHelper::kFPURegisters) { - CpuFeatures::Scope scope(FPU); - __ sdc1(f0, MemOperand(scratch, 0)); - } else { - __ sw(mantissa_reg, MemOperand(scratch, 0)); - __ sw(exponent_reg, MemOperand(scratch, Register::kSizeInBytes)); - } __ Ret(USE_DELAY_SLOT); __ mov(v0, value_reg); // In delay slot. @@ -4487,6 +4525,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ Jump(ic, RelocInfo::CODE_TARGET); + + __ bind(&transition_elements_kind); + Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); + __ Jump(ic_miss, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc index a791dbba2..7a3fd090d 100644 --- a/deps/v8/src/mksnapshot.cc +++ b/deps/v8/src/mksnapshot.cc @@ -312,7 +312,8 @@ int main(int argc, char** argv) { } // If we don't do this then we end up with a stray root pointing at the // context even after we have disposed of the context. - HEAP->CollectAllGarbage(true); + // TODO(gc): request full compaction? + HEAP->CollectAllGarbage(i::Heap::kNoGCFlags); i::Object* raw_context = *(v8::Utils::OpenHandle(*context)); context.Dispose(); CppByteSink sink(argv[1]); diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index 8de7162ab..6d2cf5f72 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -94,6 +94,9 @@ void HeapObject::HeapObjectVerify() { case BYTE_ARRAY_TYPE: ByteArray::cast(this)->ByteArrayVerify(); break; + case FREE_SPACE_TYPE: + FreeSpace::cast(this)->FreeSpaceVerify(); + break; case EXTERNAL_PIXEL_ARRAY_TYPE: ExternalPixelArray::cast(this)->ExternalPixelArrayVerify(); break; @@ -207,6 +210,11 @@ void ByteArray::ByteArrayVerify() { } +void FreeSpace::FreeSpaceVerify() { + ASSERT(IsFreeSpace()); +} + + void ExternalPixelArray::ExternalPixelArrayVerify() { ASSERT(IsExternalPixelArray()); } @@ -260,7 +268,7 @@ void JSObject::JSObjectVerify() { (map()->inobject_properties() + properties()->length() - map()->NextFreePropertyIndex())); } - ASSERT_EQ(map()->has_fast_elements(), + ASSERT_EQ((map()->has_fast_elements() || map()->has_fast_smi_only_elements()), (elements()->map() == GetHeap()->fixed_array_map() || elements()->map() == GetHeap()->fixed_cow_array_map())); ASSERT(map()->has_fast_elements() == HasFastElements()); @@ -322,7 +330,8 @@ void FixedDoubleArray::FixedDoubleArrayVerify() { double value = get_scalar(i); ASSERT(!isnan(value) || (BitCast<uint64_t>(value) == - BitCast<uint64_t>(canonical_not_the_hole_nan_as_double()))); + BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) || + ((BitCast<uint64_t>(value) & Double::kSignMask) != 0)); } } } @@ -387,6 +396,7 @@ void JSFunction::JSFunctionVerify() { CHECK(IsJSFunction()); VerifyObjectField(kPrototypeOrInitialMapOffset); VerifyObjectField(kNextFunctionLinkOffset); + CHECK(code()->IsCode()); CHECK(next_function_link()->IsUndefined() || next_function_link()->IsJSFunction()); } @@ -446,9 +456,8 @@ void Oddball::OddballVerify() { } else { ASSERT(number->IsSmi()); int value = Smi::cast(number)->value(); - // Hidden oddballs have negative smis. - const int kLeastHiddenOddballNumber = -4; ASSERT(value <= 1); + // Hidden oddballs have negative smis. ASSERT(value >= kLeastHiddenOddballNumber); } } @@ -463,6 +472,7 @@ void JSGlobalPropertyCell::JSGlobalPropertyCellVerify() { void Code::CodeVerify() { CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()), kCodeAlignment)); + relocation_info()->Verify(); Address last_gc_pc = NULL; for (RelocIterator it(this); !it.done(); it.next()) { it.rinfo()->Verify(); @@ -488,7 +498,7 @@ void JSWeakMap::JSWeakMapVerify() { CHECK(IsJSWeakMap()); JSObjectVerify(); VerifyHeapPointer(table()); - ASSERT(table()->IsHashTable()); + ASSERT(table()->IsHashTable() || table()->IsUndefined()); } @@ -535,13 +545,14 @@ void JSRegExp::JSRegExpVerify() { void JSProxy::JSProxyVerify() { - ASSERT(IsJSProxy()); + CHECK(IsJSProxy()); VerifyPointer(handler()); + ASSERT(hash()->IsSmi() || hash()->IsUndefined()); } void JSFunctionProxy::JSFunctionProxyVerify() { - ASSERT(IsJSFunctionProxy()); + CHECK(IsJSFunctionProxy()); JSProxyVerify(); VerifyPointer(call_trap()); VerifyPointer(construct_trap()); diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index bb24a2f85..cebf9be07 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -43,8 +43,11 @@ #include "isolate.h" #include "property.h" #include "spaces.h" +#include "store-buffer.h" #include "v8memory.h" +#include "incremental-marking.h" + namespace v8 { namespace internal { @@ -80,16 +83,7 @@ PropertyDetails PropertyDetails::AsDeleted() { type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \ void holder::set_##name(type* value, WriteBarrierMode mode) { \ WRITE_FIELD(this, offset, value); \ - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode); \ - } - - -// GC-safe accessors do not use HeapObject::GetHeap(), but access TLS instead. -#define ACCESSORS_GCSAFE(holder, name, type, offset) \ - type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \ - void holder::set_##name(type* value, WriteBarrierMode mode) { \ - WRITE_FIELD(this, offset, value); \ - CONDITIONAL_WRITE_BARRIER(HEAP, this, offset, mode); \ + CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \ } @@ -147,6 +141,12 @@ bool Object::IsHeapObject() { } +bool Object::NonFailureIsHeapObject() { + ASSERT(!this->IsFailure()); + return (reinterpret_cast<intptr_t>(this) & kSmiTagMask) != 0; +} + + bool Object::IsHeapNumber() { return Object::IsHeapObject() && HeapObject::cast(this)->map()->instance_type() == HEAP_NUMBER_TYPE; @@ -165,6 +165,13 @@ bool Object::IsSpecObject() { } +bool Object::IsSpecFunction() { + if (!Object::IsHeapObject()) return false; + InstanceType type = HeapObject::cast(this)->map()->instance_type(); + return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE; +} + + bool Object::IsSymbol() { if (!this->IsHeapObject()) return false; uint32_t type = HeapObject::cast(this)->map()->instance_type(); @@ -402,6 +409,19 @@ bool Object::IsByteArray() { } +bool Object::IsFreeSpace() { + return Object::IsHeapObject() + && HeapObject::cast(this)->map()->instance_type() == FREE_SPACE_TYPE; +} + + +bool Object::IsFiller() { + if (!Object::IsHeapObject()) return false; + InstanceType instance_type = HeapObject::cast(this)->map()->instance_type(); + return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE; +} + + bool Object::IsExternalPixelArray() { return Object::IsHeapObject() && HeapObject::cast(this)->map()->instance_type() == @@ -509,20 +529,23 @@ Failure* Failure::cast(MaybeObject* obj) { bool Object::IsJSReceiver() { + STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); return IsHeapObject() && HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE; } bool Object::IsJSObject() { - return IsJSReceiver() && !IsJSProxy(); + STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE); + return IsHeapObject() && + HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE; } bool Object::IsJSProxy() { - return Object::IsHeapObject() && - (HeapObject::cast(this)->map()->instance_type() == JS_PROXY_TYPE || - HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE); + if (!Object::IsHeapObject()) return false; + InstanceType type = HeapObject::cast(this)->map()->instance_type(); + return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE; } @@ -642,7 +665,6 @@ bool Object::IsCode() { bool Object::IsOddball() { - ASSERT(HEAP->is_safe_to_read_maps()); return Object::IsHeapObject() && HeapObject::cast(this)->map()->instance_type() == ODDBALL_TYPE; } @@ -939,21 +961,20 @@ MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) { #define WRITE_FIELD(p, offset, value) \ (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value) -// TODO(isolates): Pass heap in to these macros. -#define WRITE_BARRIER(object, offset) \ - object->GetHeap()->RecordWrite(object->address(), offset); - -// CONDITIONAL_WRITE_BARRIER must be issued after the actual -// write due to the assert validating the written value. -#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, mode) \ - if (mode == UPDATE_WRITE_BARRIER) { \ - heap->RecordWrite(object->address(), offset); \ - } else { \ - ASSERT(mode == SKIP_WRITE_BARRIER); \ - ASSERT(heap->InNewSpace(object) || \ - !heap->InNewSpace(READ_FIELD(object, offset)) || \ - Page::FromAddress(object->address())-> \ - IsRegionDirty(object->address() + offset)); \ +#define WRITE_BARRIER(heap, object, offset, value) \ + heap->incremental_marking()->RecordWrite( \ + object, HeapObject::RawField(object, offset), value); \ + if (heap->InNewSpace(value)) { \ + heap->RecordWrite(object->address(), offset); \ + } + +#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \ + if (mode == UPDATE_WRITE_BARRIER) { \ + heap->incremental_marking()->RecordWrite( \ + object, HeapObject::RawField(object, offset), value); \ + if (heap->InNewSpace(value)) { \ + heap->RecordWrite(object->address(), offset); \ + } \ } #ifndef V8_TARGET_ARCH_MIPS @@ -974,7 +995,6 @@ MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) { #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset) #endif // V8_TARGET_ARCH_MIPS - #ifndef V8_TARGET_ARCH_MIPS #define WRITE_DOUBLE_FIELD(p, offset, value) \ (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value) @@ -1169,91 +1189,6 @@ HeapObject* MapWord::ToForwardingAddress() { } -bool MapWord::IsMarked() { - return (value_ & kMarkingMask) == 0; -} - - -void MapWord::SetMark() { - value_ &= ~kMarkingMask; -} - - -void MapWord::ClearMark() { - value_ |= kMarkingMask; -} - - -bool MapWord::IsOverflowed() { - return (value_ & kOverflowMask) != 0; -} - - -void MapWord::SetOverflow() { - value_ |= kOverflowMask; -} - - -void MapWord::ClearOverflow() { - value_ &= ~kOverflowMask; -} - - -MapWord MapWord::EncodeAddress(Address map_address, int offset) { - // Offset is the distance in live bytes from the first live object in the - // same page. The offset between two objects in the same page should not - // exceed the object area size of a page. - ASSERT(0 <= offset && offset < Page::kObjectAreaSize); - - uintptr_t compact_offset = offset >> kObjectAlignmentBits; - ASSERT(compact_offset < (1 << kForwardingOffsetBits)); - - Page* map_page = Page::FromAddress(map_address); - ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index); - - uintptr_t map_page_offset = - map_page->Offset(map_address) >> kMapAlignmentBits; - - uintptr_t encoding = - (compact_offset << kForwardingOffsetShift) | - (map_page_offset << kMapPageOffsetShift) | - (map_page->mc_page_index << kMapPageIndexShift); - return MapWord(encoding); -} - - -Address MapWord::DecodeMapAddress(MapSpace* map_space) { - int map_page_index = - static_cast<int>((value_ & kMapPageIndexMask) >> kMapPageIndexShift); - ASSERT_MAP_PAGE_INDEX(map_page_index); - - int map_page_offset = static_cast<int>( - ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) << - kMapAlignmentBits); - - return (map_space->PageAddress(map_page_index) + map_page_offset); -} - - -int MapWord::DecodeOffset() { - // The offset field is represented in the kForwardingOffsetBits - // most-significant bits. - uintptr_t offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits; - ASSERT(offset < static_cast<uintptr_t>(Page::kObjectAreaSize)); - return static_cast<int>(offset); -} - - -MapWord MapWord::FromEncodedAddress(Address address) { - return MapWord(reinterpret_cast<uintptr_t>(address)); -} - - -Address MapWord::ToEncodedAddress() { - return reinterpret_cast<Address>(value_); -} - - #ifdef DEBUG void HeapObject::VerifyObjectField(int offset) { VerifyPointer(READ_FIELD(this, offset)); @@ -1266,12 +1201,11 @@ void HeapObject::VerifySmiField(int offset) { Heap* HeapObject::GetHeap() { - // During GC, the map pointer in HeapObject is used in various ways that - // prevent us from retrieving Heap from the map. - // Assert that we are not in GC, implement GC code in a way that it doesn't - // pull heap from the map. - ASSERT(HEAP->is_safe_to_read_maps()); - return map()->heap(); + Heap* heap = + MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap(); + ASSERT(heap != NULL); + ASSERT(heap->isolate() == Isolate::Current()); + return heap; } @@ -1287,6 +1221,17 @@ Map* HeapObject::map() { void HeapObject::set_map(Map* value) { set_map_word(MapWord::FromMap(value)); + if (value != NULL) { + // TODO(1600) We are passing NULL as a slot because maps can never be on + // evacuation candidate. + value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value); + } +} + + +// Unsafe accessor omitting write barrier. +void HeapObject::set_map_unsafe(Map* value) { + set_map_word(MapWord::FromMap(value)); } @@ -1329,47 +1274,6 @@ void HeapObject::IteratePointer(ObjectVisitor* v, int offset) { } -bool HeapObject::IsMarked() { - return map_word().IsMarked(); -} - - -void HeapObject::SetMark() { - ASSERT(!IsMarked()); - MapWord first_word = map_word(); - first_word.SetMark(); - set_map_word(first_word); -} - - -void HeapObject::ClearMark() { - ASSERT(IsMarked()); - MapWord first_word = map_word(); - first_word.ClearMark(); - set_map_word(first_word); -} - - -bool HeapObject::IsOverflowed() { - return map_word().IsOverflowed(); -} - - -void HeapObject::SetOverflow() { - MapWord first_word = map_word(); - first_word.SetOverflow(); - set_map_word(first_word); -} - - -void HeapObject::ClearOverflow() { - ASSERT(IsOverflowed()); - MapWord first_word = map_word(); - first_word.ClearOverflow(); - set_map_word(first_word); -} - - double HeapNumber::value() { return READ_DOUBLE_FIELD(this, kValueOffset); } @@ -1400,16 +1304,77 @@ FixedArrayBase* JSObject::elements() { return static_cast<FixedArrayBase*>(array); } +void JSObject::ValidateSmiOnlyElements() { +#if DEBUG + if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) { + Heap* heap = GetHeap(); + // Don't use elements, since integrity checks will fail if there + // are filler pointers in the array. + FixedArray* fixed_array = + reinterpret_cast<FixedArray*>(READ_FIELD(this, kElementsOffset)); + Map* map = fixed_array->map(); + // Arrays that have been shifted in place can't be verified. + if (map != heap->raw_unchecked_one_pointer_filler_map() && + map != heap->raw_unchecked_two_pointer_filler_map() && + map != heap->free_space_map()) { + for (int i = 0; i < fixed_array->length(); i++) { + Object* current = fixed_array->get(i); + ASSERT(current->IsSmi() || current == heap->the_hole_value()); + } + } + } +#endif +} + + +MaybeObject* JSObject::EnsureCanContainNonSmiElements() { +#if DEBUG + ValidateSmiOnlyElements(); +#endif + if ((map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS)) { + Object* obj; + MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + set_map(Map::cast(obj)); + } + return this; +} + + +MaybeObject* JSObject::EnsureCanContainElements(Object** objects, + uint32_t count) { + if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) { + for (uint32_t i = 0; i < count; ++i) { + Object* current = *objects++; + if (!current->IsSmi() && current != GetHeap()->the_hole_value()) { + return EnsureCanContainNonSmiElements(); + } + } + } + return this; +} + + +MaybeObject* JSObject::EnsureCanContainElements(FixedArray* elements) { + Object** objects = reinterpret_cast<Object**>( + FIELD_ADDR(elements, elements->OffsetOfElementAt(0))); + return EnsureCanContainElements(objects, elements->length()); +} + void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) { - ASSERT(map()->has_fast_elements() == + ASSERT((map()->has_fast_elements() || + map()->has_fast_smi_only_elements()) == (value->map() == GetHeap()->fixed_array_map() || value->map() == GetHeap()->fixed_cow_array_map())); ASSERT(map()->has_fast_double_elements() == value->IsFixedDoubleArray()); ASSERT(value->HasValidElements()); +#ifdef DEBUG + ValidateSmiOnlyElements(); +#endif WRITE_FIELD(this, kElementsOffset, value); - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, mode); + CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode); } @@ -1420,7 +1385,7 @@ void JSObject::initialize_properties() { void JSObject::initialize_elements() { - ASSERT(map()->has_fast_elements()); + ASSERT(map()->has_fast_elements() || map()->has_fast_smi_only_elements()); ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array())); WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array()); } @@ -1428,9 +1393,11 @@ void JSObject::initialize_elements() { MaybeObject* JSObject::ResetElements() { Object* obj; - { MaybeObject* maybe_obj = map()->GetFastElementsMap(); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } + ElementsKind elements_kind = FLAG_smi_only_arrays + ? FAST_SMI_ONLY_ELEMENTS + : FAST_ELEMENTS; + MaybeObject* maybe_obj = GetElementsTransitionMap(elements_kind); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; set_map(Map::cast(obj)); initialize_elements(); return this; @@ -1442,12 +1409,12 @@ ACCESSORS(Oddball, to_number, Object, kToNumberOffset) byte Oddball::kind() { - return READ_BYTE_FIELD(this, kKindOffset); + return Smi::cast(READ_FIELD(this, kKindOffset))->value(); } void Oddball::set_kind(byte value) { - WRITE_BYTE_FIELD(this, kKindOffset, value); + WRITE_FIELD(this, kKindOffset, Smi::FromInt(value)); } @@ -1460,6 +1427,8 @@ void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) { // The write barrier is not used for global property cells. ASSERT(!val->IsJSGlobalPropertyCell()); WRITE_FIELD(this, kValueOffset, val); + GetHeap()->incremental_marking()->RecordWrite( + this, HeapObject::RawField(this, kValueOffset), val); } @@ -1528,7 +1497,7 @@ void JSObject::SetInternalField(int index, Object* value) { // to adjust the index here. int offset = GetHeaderSize() + (kPointerSize * index); WRITE_FIELD(this, offset, value); - WRITE_BARRIER(this, offset); + WRITE_BARRIER(GetHeap(), this, offset, value); } @@ -1554,7 +1523,7 @@ Object* JSObject::FastPropertyAtPut(int index, Object* value) { if (index < 0) { int offset = map()->instance_size() + (index * kPointerSize); WRITE_FIELD(this, offset, value); - WRITE_BARRIER(this, offset); + WRITE_BARRIER(GetHeap(), this, offset, value); } else { ASSERT(index < properties()->length()); properties()->set(index, value); @@ -1588,16 +1557,32 @@ Object* JSObject::InObjectPropertyAtPut(int index, ASSERT(index < 0); int offset = map()->instance_size() + (index * kPointerSize); WRITE_FIELD(this, offset, value); - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode); + CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); return value; } -void JSObject::InitializeBody(int object_size, Object* value) { - ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value)); - for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) { - WRITE_FIELD(this, offset, value); +void JSObject::InitializeBody(Map* map, + Object* pre_allocated_value, + Object* filler_value) { + ASSERT(!filler_value->IsHeapObject() || + !GetHeap()->InNewSpace(filler_value)); + ASSERT(!pre_allocated_value->IsHeapObject() || + !GetHeap()->InNewSpace(pre_allocated_value)); + int size = map->instance_size(); + int offset = kHeaderSize; + if (filler_value != pre_allocated_value) { + int pre_allocated = map->pre_allocated_property_fields(); + ASSERT(pre_allocated * kPointerSize + kHeaderSize <= size); + for (int i = 0; i < pre_allocated; i++) { + WRITE_FIELD(this, offset, pre_allocated_value); + offset += kPointerSize; + } + } + while (offset < size) { + WRITE_FIELD(this, offset, filler_value); + offset += kPointerSize; } } @@ -1683,7 +1668,7 @@ void FixedArray::set(int index, Object* value) { ASSERT(index >= 0 && index < this->length()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); - WRITE_BARRIER(this, offset); + WRITE_BARRIER(GetHeap(), this, offset, value); } @@ -1768,7 +1753,7 @@ void FixedDoubleArray::Initialize(FixedDoubleArray* from) { void FixedDoubleArray::Initialize(FixedArray* from) { int old_length = from->length(); - ASSERT(old_length < length()); + ASSERT(old_length <= length()); for (int i = 0; i < old_length; i++) { Object* hole_or_object = from->get(i); if (hole_or_object->IsTheHole()) { @@ -1802,7 +1787,9 @@ void FixedDoubleArray::Initialize(NumberDictionary* from) { WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) { - if (GetHeap()->InNewSpace(this)) return SKIP_WRITE_BARRIER; + Heap* heap = GetHeap(); + if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER; + if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER; return UPDATE_WRITE_BARRIER; } @@ -1814,7 +1801,7 @@ void FixedArray::set(int index, ASSERT(index >= 0 && index < this->length()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode); + CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); } @@ -1823,6 +1810,10 @@ void FixedArray::fast_set(FixedArray* array, int index, Object* value) { ASSERT(index >= 0 && index < array->length()); ASSERT(!HEAP->InNewSpace(value)); WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value); + array->GetHeap()->incremental_marking()->RecordWrite( + array, + HeapObject::RawField(array, kHeaderSize + index * kPointerSize), + value); } @@ -1875,7 +1866,7 @@ void FixedArray::set_unchecked(Heap* heap, WriteBarrierMode mode) { int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); - CONDITIONAL_WRITE_BARRIER(heap, this, offset, mode); + CONDITIONAL_WRITE_BARRIER(heap, this, offset, value, mode); } @@ -2154,6 +2145,7 @@ CAST_ACCESSOR(JSFunctionProxy) CAST_ACCESSOR(JSWeakMap) CAST_ACCESSOR(Foreign) CAST_ACCESSOR(ByteArray) +CAST_ACCESSOR(FreeSpace) CAST_ACCESSOR(ExternalArray) CAST_ACCESSOR(ExternalByteArray) CAST_ACCESSOR(ExternalUnsignedByteArray) @@ -2180,6 +2172,7 @@ HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) { SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) +SMI_ACCESSORS(FreeSpace, size, kSizeOffset) SMI_ACCESSORS(String, length, kLengthOffset) @@ -2336,7 +2329,7 @@ String* SlicedString::parent() { void SlicedString::set_parent(String* parent) { - ASSERT(parent->IsSeqString()); + ASSERT(parent->IsSeqString() || parent->IsExternalString()); WRITE_FIELD(this, kParentOffset, parent); } @@ -2356,7 +2349,7 @@ Object* ConsString::unchecked_first() { void ConsString::set_first(String* value, WriteBarrierMode mode) { WRITE_FIELD(this, kFirstOffset, value); - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, mode); + CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, value, mode); } @@ -2372,29 +2365,31 @@ Object* ConsString::unchecked_second() { void ConsString::set_second(String* value, WriteBarrierMode mode) { WRITE_FIELD(this, kSecondOffset, value); - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, mode); + CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode); } -ExternalAsciiString::Resource* ExternalAsciiString::resource() { +const ExternalAsciiString::Resource* ExternalAsciiString::resource() { return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)); } void ExternalAsciiString::set_resource( - ExternalAsciiString::Resource* resource) { - *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource; + const ExternalAsciiString::Resource* resource) { + *reinterpret_cast<const Resource**>( + FIELD_ADDR(this, kResourceOffset)) = resource; } -ExternalTwoByteString::Resource* ExternalTwoByteString::resource() { +const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() { return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)); } void ExternalTwoByteString::set_resource( - ExternalTwoByteString::Resource* resource) { - *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource; + const ExternalTwoByteString::Resource* resource) { + *reinterpret_cast<const Resource**>( + FIELD_ADDR(this, kResourceOffset)) = resource; } @@ -2694,6 +2689,9 @@ int HeapObject::SizeFromMap(Map* map) { if (instance_type == BYTE_ARRAY_TYPE) { return reinterpret_cast<ByteArray*>(this)->ByteArraySize(); } + if (instance_type == FREE_SPACE_TYPE) { + return reinterpret_cast<FreeSpace*>(this)->size(); + } if (instance_type == STRING_TYPE) { return SeqTwoByteString::SizeFor( reinterpret_cast<SeqTwoByteString*>(this)->length()); @@ -2855,12 +2853,6 @@ JSFunction* Map::unchecked_constructor() { } -FixedArray* Map::unchecked_prototype_transitions() { - return reinterpret_cast<FixedArray*>( - READ_FIELD(this, kPrototypeTransitionsOffset)); -} - - Code::Flags Code::flags() { return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset)); } @@ -2932,6 +2924,19 @@ void Code::set_major_key(int major) { } +bool Code::is_pregenerated() { + return kind() == STUB && IsPregeneratedField::decode(flags()); +} + + +void Code::set_is_pregenerated(bool value) { + ASSERT(kind() == STUB); + Flags f = flags(); + f = static_cast<Flags>(IsPregeneratedField::update(f, value)); + set_flags(f); +} + + bool Code::optimizable() { ASSERT(kind() == FUNCTION); return READ_BYTE_FIELD(this, kOptimizableOffset) == 1; @@ -3097,6 +3102,19 @@ void Code::set_to_boolean_state(byte value) { WRITE_BYTE_FIELD(this, kToBooleanTypeOffset, value); } + +bool Code::has_function_cache() { + ASSERT(kind() == STUB); + return READ_BYTE_FIELD(this, kHasFunctionCacheOffset) != 0; +} + + +void Code::set_has_function_cache(bool flag) { + ASSERT(kind() == STUB); + WRITE_BYTE_FIELD(this, kHasFunctionCacheOffset, flag); +} + + bool Code::is_inline_cache_stub() { Kind kind = this->kind(); return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND; @@ -3182,48 +3200,6 @@ Code* Code::GetCodeFromTargetAddress(Address address) { } -Isolate* Map::isolate() { - return heap()->isolate(); -} - - -Heap* Map::heap() { - // NOTE: address() helper is not used to save one instruction. - Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_; - ASSERT(heap != NULL); - ASSERT(heap->isolate() == Isolate::Current()); - return heap; -} - - -Heap* Code::heap() { - // NOTE: address() helper is not used to save one instruction. - Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_; - ASSERT(heap != NULL); - ASSERT(heap->isolate() == Isolate::Current()); - return heap; -} - - -Isolate* Code::isolate() { - return heap()->isolate(); -} - - -Heap* JSGlobalPropertyCell::heap() { - // NOTE: address() helper is not used to save one instruction. - Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_; - ASSERT(heap != NULL); - ASSERT(heap->isolate() == Isolate::Current()); - return heap; -} - - -Isolate* JSGlobalPropertyCell::isolate() { - return heap()->isolate(); -} - - Object* Code::GetObjectFromEntryAddress(Address location_of_address) { return HeapObject:: FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize); @@ -3238,46 +3214,7 @@ Object* Map::prototype() { void Map::set_prototype(Object* value, WriteBarrierMode mode) { ASSERT(value->IsNull() || value->IsJSReceiver()); WRITE_FIELD(this, kPrototypeOffset, value); - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, mode); -} - - -MaybeObject* Map::GetFastElementsMap() { - if (has_fast_elements()) return this; - Object* obj; - { MaybeObject* maybe_obj = CopyDropTransitions(); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - Map* new_map = Map::cast(obj); - new_map->set_elements_kind(FAST_ELEMENTS); - isolate()->counters()->map_to_fast_elements()->Increment(); - return new_map; -} - - -MaybeObject* Map::GetFastDoubleElementsMap() { - if (has_fast_double_elements()) return this; - Object* obj; - { MaybeObject* maybe_obj = CopyDropTransitions(); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - Map* new_map = Map::cast(obj); - new_map->set_elements_kind(FAST_DOUBLE_ELEMENTS); - isolate()->counters()->map_to_fast_double_elements()->Increment(); - return new_map; -} - - -MaybeObject* Map::GetSlowElementsMap() { - if (!has_fast_elements() && !has_fast_double_elements()) return this; - Object* obj; - { MaybeObject* maybe_obj = CopyDropTransitions(); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - Map* new_map = Map::cast(obj); - new_map->set_elements_kind(DICTIONARY_ELEMENTS); - isolate()->counters()->map_to_slow_elements()->Increment(); - return new_map; + CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode); } @@ -3312,7 +3249,8 @@ void Map::set_instance_descriptors(DescriptorArray* value, WriteBarrierMode mode) { Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset); - if (value == isolate()->heap()->empty_descriptor_array()) { + Heap* heap = GetHeap(); + if (value == heap->empty_descriptor_array()) { clear_instance_descriptors(); return; } else { @@ -3325,10 +3263,8 @@ void Map::set_instance_descriptors(DescriptorArray* value, } ASSERT(!is_shared()); WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value); - CONDITIONAL_WRITE_BARRIER(GetHeap(), - this, - kInstanceDescriptorsOrBitField3Offset, - mode); + CONDITIONAL_WRITE_BARRIER( + heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode); } @@ -3357,14 +3293,22 @@ void Map::set_bit_field3(int value) { } +FixedArray* Map::unchecked_prototype_transitions() { + return reinterpret_cast<FixedArray*>( + READ_FIELD(this, kPrototypeTransitionsOffset)); +} + + ACCESSORS(Map, code_cache, Object, kCodeCacheOffset) ACCESSORS(Map, prototype_transitions, FixedArray, kPrototypeTransitionsOffset) ACCESSORS(Map, constructor, Object, kConstructorOffset) ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset) ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset) -ACCESSORS_GCSAFE(JSFunction, next_function_link, Object, - kNextFunctionLinkOffset) +ACCESSORS(JSFunction, + next_function_link, + Object, + kNextFunctionLinkOffset) ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset) ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset) @@ -3453,8 +3397,8 @@ ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex) #endif ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset) -ACCESSORS_GCSAFE(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset) -ACCESSORS_GCSAFE(SharedFunctionInfo, initial_map, Object, kInitialMapOffset) +ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset) +ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset) ACCESSORS(SharedFunctionInfo, instance_class_name, Object, kInstanceClassNameOffset) ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset) @@ -3660,7 +3604,7 @@ Code* SharedFunctionInfo::unchecked_code() { void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) { WRITE_FIELD(this, kCodeOffset, value); - ASSERT(!Isolate::Current()->heap()->InNewSpace(value)); + CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode); } @@ -3673,7 +3617,11 @@ SerializedScopeInfo* SharedFunctionInfo::scope_info() { void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value, WriteBarrierMode mode) { WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value)); - CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kScopeInfoOffset, mode); + CONDITIONAL_WRITE_BARRIER(GetHeap(), + this, + kScopeInfoOffset, + reinterpret_cast<Object*>(value), + mode); } @@ -3770,10 +3718,13 @@ Code* JSFunction::unchecked_code() { void JSFunction::set_code(Code* value) { - // Skip the write barrier because code is never in new space. ASSERT(!HEAP->InNewSpace(value)); Address entry = value->entry(); WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry)); + GetHeap()->incremental_marking()->RecordWriteOfCodeEntry( + this, + HeapObject::RawField(this, kCodeEntryOffset), + value); } @@ -3813,7 +3764,7 @@ SharedFunctionInfo* JSFunction::unchecked_shared() { void JSFunction::set_context(Object* value) { ASSERT(value->IsUndefined() || value->IsContext()); WRITE_FIELD(this, kContextOffset, value); - WRITE_BARRIER(this, kContextOffset); + WRITE_BARRIER(GetHeap(), this, kContextOffset, value); } ACCESSORS(JSFunction, prototype_or_initial_map, Object, @@ -3887,7 +3838,7 @@ void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id, Object* value) { ASSERT(id < kJSBuiltinsCount); // id is unsigned. WRITE_FIELD(this, OffsetOfFunctionWithId(id), value); - WRITE_BARRIER(this, OffsetOfFunctionWithId(id)); + WRITE_BARRIER(GetHeap(), this, OffsetOfFunctionWithId(id), value); } @@ -3906,6 +3857,7 @@ void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id, ACCESSORS(JSProxy, handler, Object, kHandlerOffset) +ACCESSORS(JSProxy, hash, Object, kHashOffset) ACCESSORS(JSFunctionProxy, call_trap, Object, kCallTrapOffset) ACCESSORS(JSFunctionProxy, construct_trap, Object, kConstructTrapOffset) @@ -3918,8 +3870,8 @@ void JSProxy::InitializeBody(int object_size, Object* value) { } -ACCESSORS(JSWeakMap, table, ObjectHashTable, kTableOffset) -ACCESSORS_GCSAFE(JSWeakMap, next, Object, kNextOffset) +ACCESSORS(JSWeakMap, table, Object, kTableOffset) +ACCESSORS(JSWeakMap, next, Object, kNextOffset) ObjectHashTable* JSWeakMap::unchecked_table() { @@ -4011,9 +3963,8 @@ byte* Code::entry() { } -bool Code::contains(byte* pc) { - return (instruction_start() <= pc) && - (pc <= instruction_start() + instruction_size()); +bool Code::contains(byte* inner_pointer) { + return (address() <= inner_pointer) && (inner_pointer <= address() + Size()); } @@ -4092,6 +4043,7 @@ void JSRegExp::SetDataAtUnchecked(int index, Object* value, Heap* heap) { if (value->IsSmi()) { fa->set_unchecked(index, Smi::cast(value)); } else { + // We only do this during GC, so we don't need to notify the write barrier. fa->set_unchecked(heap, index, value, SKIP_WRITE_BARRIER); } } @@ -4099,15 +4051,20 @@ void JSRegExp::SetDataAtUnchecked(int index, Object* value, Heap* heap) { ElementsKind JSObject::GetElementsKind() { ElementsKind kind = map()->elements_kind(); - ASSERT((kind == FAST_ELEMENTS && - (elements()->map() == GetHeap()->fixed_array_map() || - elements()->map() == GetHeap()->fixed_cow_array_map())) || +#if DEBUG + FixedArrayBase* fixed_array = + reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset)); + Map* map = fixed_array->map(); + ASSERT(((kind == FAST_ELEMENTS || kind == FAST_SMI_ONLY_ELEMENTS) && + (map == GetHeap()->fixed_array_map() || + map == GetHeap()->fixed_cow_array_map())) || (kind == FAST_DOUBLE_ELEMENTS && - elements()->IsFixedDoubleArray()) || + fixed_array->IsFixedDoubleArray()) || (kind == DICTIONARY_ELEMENTS && - elements()->IsFixedArray() && - elements()->IsDictionary()) || + fixed_array->IsFixedArray() && + fixed_array->IsDictionary()) || (kind > DICTIONARY_ELEMENTS)); +#endif return kind; } @@ -4122,6 +4079,18 @@ bool JSObject::HasFastElements() { } +bool JSObject::HasFastSmiOnlyElements() { + return GetElementsKind() == FAST_SMI_ONLY_ELEMENTS; +} + + +bool JSObject::HasFastTypeElements() { + ElementsKind elements_kind = GetElementsKind(); + return elements_kind == FAST_SMI_ONLY_ELEMENTS || + elements_kind == FAST_ELEMENTS; +} + + bool JSObject::HasFastDoubleElements() { return GetElementsKind() == FAST_DOUBLE_ELEMENTS; } @@ -4132,6 +4101,11 @@ bool JSObject::HasDictionaryElements() { } +bool JSObject::HasNonStrictArgumentsElements() { + return GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS; +} + + bool JSObject::HasExternalArrayElements() { HeapObject* array = elements(); ASSERT(array != NULL); @@ -4183,7 +4157,7 @@ bool JSObject::AllowsSetElementsLength() { MaybeObject* JSObject::EnsureWritableFastElements() { - ASSERT(HasFastElements()); + ASSERT(HasFastTypeElements()); FixedArray* elems = FixedArray::cast(elements()); Isolate* isolate = GetIsolate(); if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems; @@ -4359,44 +4333,18 @@ Object* JSObject::BypassGlobalProxy() { } -bool JSObject::HasHiddenPropertiesObject() { - ASSERT(!IsJSGlobalProxy()); - return GetPropertyAttributePostInterceptor(this, - GetHeap()->hidden_symbol(), - false) != ABSENT; +MaybeObject* JSReceiver::GetIdentityHash(CreationFlag flag) { + return IsJSProxy() + ? JSProxy::cast(this)->GetIdentityHash(flag) + : JSObject::cast(this)->GetIdentityHash(flag); } -Object* JSObject::GetHiddenPropertiesObject() { - ASSERT(!IsJSGlobalProxy()); - PropertyAttributes attributes; - // You can't install a getter on a property indexed by the hidden symbol, - // so we can be sure that GetLocalPropertyPostInterceptor returns a real - // object. - Object* result = - GetLocalPropertyPostInterceptor(this, - GetHeap()->hidden_symbol(), - &attributes)->ToObjectUnchecked(); - return result; -} - - -MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) { - ASSERT(!IsJSGlobalProxy()); - return SetPropertyPostInterceptor(GetHeap()->hidden_symbol(), - hidden_obj, - DONT_ENUM, - kNonStrictMode); -} - - -bool JSObject::HasHiddenProperties() { - return !GetHiddenProperties(OMIT_CREATION)->ToObjectChecked()->IsUndefined(); -} - - -bool JSObject::HasElement(uint32_t index) { - return HasElementWithReceiver(this, index); +bool JSReceiver::HasElement(uint32_t index) { + if (IsJSProxy()) { + return JSProxy::cast(this)->HasElementWithHandler(index); + } + return JSObject::cast(this)->HasElementWithReceiver(this, index); } @@ -4508,27 +4456,27 @@ MaybeObject* StringDictionaryShape::AsObject(String* key) { } -bool ObjectHashTableShape::IsMatch(JSObject* key, Object* other) { - return key == JSObject::cast(other); +bool ObjectHashTableShape::IsMatch(JSReceiver* key, Object* other) { + return key == JSReceiver::cast(other); } -uint32_t ObjectHashTableShape::Hash(JSObject* key) { - MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION); +uint32_t ObjectHashTableShape::Hash(JSReceiver* key) { + MaybeObject* maybe_hash = key->GetIdentityHash(OMIT_CREATION); ASSERT(!maybe_hash->IsFailure()); return Smi::cast(maybe_hash->ToObjectUnchecked())->value(); } -uint32_t ObjectHashTableShape::HashForObject(JSObject* key, Object* other) { - MaybeObject* maybe_hash = JSObject::cast(other)->GetIdentityHash( - JSObject::OMIT_CREATION); +uint32_t ObjectHashTableShape::HashForObject(JSReceiver* key, Object* other) { + MaybeObject* maybe_hash = + JSReceiver::cast(other)->GetIdentityHash(OMIT_CREATION); ASSERT(!maybe_hash->IsFailure()); return Smi::cast(maybe_hash->ToObjectUnchecked())->value(); } -MaybeObject* ObjectHashTableShape::AsObject(JSObject* key) { +MaybeObject* ObjectHashTableShape::AsObject(JSReceiver* key) { return key; } @@ -4548,7 +4496,7 @@ void Map::ClearCodeCache(Heap* heap) { void JSArray::EnsureSize(int required_size) { - ASSERT(HasFastElements()); + ASSERT(HasFastTypeElements()); FixedArray* elts = FixedArray::cast(elements()); const int kArraySizeThatFitsComfortablyInNewSpace = 128; if (elts->length() < required_size) { @@ -4566,13 +4514,17 @@ void JSArray::EnsureSize(int required_size) { void JSArray::set_length(Smi* length) { + // Don't need a write barrier for a Smi. set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER); } -void JSArray::SetContent(FixedArray* storage) { +MaybeObject* JSArray::SetContent(FixedArray* storage) { + MaybeObject* maybe_object = EnsureCanContainElements(storage); + if (maybe_object->IsFailure()) return maybe_object; set_length(Smi::FromInt(storage->length())); set_elements(storage); + return this; } diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc index 0398572f9..fc7573241 100644 --- a/deps/v8/src/objects-printer.cc +++ b/deps/v8/src/objects-printer.cc @@ -82,12 +82,18 @@ void HeapObject::HeapObjectPrint(FILE* out) { case HEAP_NUMBER_TYPE: HeapNumber::cast(this)->HeapNumberPrint(out); break; + case FIXED_DOUBLE_ARRAY_TYPE: + FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out); + break; case FIXED_ARRAY_TYPE: FixedArray::cast(this)->FixedArrayPrint(out); break; case BYTE_ARRAY_TYPE: ByteArray::cast(this)->ByteArrayPrint(out); break; + case FREE_SPACE_TYPE: + FreeSpace::cast(this)->FreeSpacePrint(out); + break; case EXTERNAL_PIXEL_ARRAY_TYPE: ExternalPixelArray::cast(this)->ExternalPixelArrayPrint(out); break; @@ -189,6 +195,11 @@ void ByteArray::ByteArrayPrint(FILE* out) { } +void FreeSpace::FreeSpacePrint(FILE* out) { + PrintF(out, "free space, size %d", Size()); +} + + void ExternalPixelArray::ExternalPixelArrayPrint(FILE* out) { PrintF(out, "external pixel array"); } @@ -234,6 +245,54 @@ void ExternalDoubleArray::ExternalDoubleArrayPrint(FILE* out) { } +static void PrintElementsKind(FILE* out, ElementsKind kind) { + switch (kind) { + case FAST_SMI_ONLY_ELEMENTS: + PrintF(out, "FAST_SMI_ONLY_ELEMENTS"); + break; + case FAST_ELEMENTS: + PrintF(out, "FAST_ELEMENTS"); + break; + case FAST_DOUBLE_ELEMENTS: + PrintF(out, "FAST_DOUBLE_ELEMENTS"); + break; + case DICTIONARY_ELEMENTS: + PrintF(out, "DICTIONARY_ELEMENTS"); + break; + case NON_STRICT_ARGUMENTS_ELEMENTS: + PrintF(out, "NON_STRICT_ARGUMENTS_ELEMENTS"); + break; + case EXTERNAL_BYTE_ELEMENTS: + PrintF(out, "EXTERNAL_BYTE_ELEMENTS"); + break; + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + PrintF(out, "EXTERNAL_UNSIGNED_BYTE_ELEMENTS"); + break; + case EXTERNAL_SHORT_ELEMENTS: + PrintF(out, "EXTERNAL_SHORT_ELEMENTS"); + break; + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + PrintF(out, "EXTERNAL_UNSIGNED_SHORT_ELEMENTS"); + break; + case EXTERNAL_INT_ELEMENTS: + PrintF(out, "EXTERNAL_INT_ELEMENTS"); + break; + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + PrintF(out, "EXTERNAL_UNSIGNED_INT_ELEMENTS"); + break; + case EXTERNAL_FLOAT_ELEMENTS: + PrintF(out, "EXTERNAL_FLOAT_ELEMENTS"); + break; + case EXTERNAL_DOUBLE_ELEMENTS: + PrintF(out, "EXTERNAL_DOUBLE_ELEMENTS"); + break; + case EXTERNAL_PIXEL_ELEMENTS: + PrintF(out, "EXTERNAL_DOUBLE_ELEMENTS"); + break; + } +} + + void JSObject::PrintProperties(FILE* out) { if (HasFastProperties()) { DescriptorArray* descs = map()->instance_descriptors(); @@ -256,14 +315,33 @@ void JSObject::PrintProperties(FILE* out) { descs->GetCallbacksObject(i)->ShortPrint(out); PrintF(out, " (callback)\n"); break; + case ELEMENTS_TRANSITION: { + PrintF(out, "(elements transition to "); + Object* descriptor_contents = descs->GetValue(i); + if (descriptor_contents->IsMap()) { + Map* map = Map::cast(descriptor_contents); + PrintElementsKind(out, map->elements_kind()); + } else { + FixedArray* map_array = FixedArray::cast(descriptor_contents); + for (int i = 0; i < map_array->length(); ++i) { + Map* map = Map::cast(map_array->get(i)); + if (i != 0) { + PrintF(out, ", "); + } + PrintElementsKind(out, map->elements_kind()); + } + } + PrintF(out, ")\n"); + break; + } case MAP_TRANSITION: - PrintF(out, " (map transition)\n"); + PrintF(out, "(map transition)\n"); break; case CONSTANT_TRANSITION: - PrintF(out, " (constant transition)\n"); + PrintF(out, "(constant transition)\n"); break; case NULL_DESCRIPTOR: - PrintF(out, " (null descriptor)\n"); + PrintF(out, "(null descriptor)\n"); break; default: UNREACHABLE(); @@ -277,7 +355,10 @@ void JSObject::PrintProperties(FILE* out) { void JSObject::PrintElements(FILE* out) { - switch (GetElementsKind()) { + // Don't call GetElementsKind, its validation code can cause the printer to + // fail when debugging. + switch (map()->elements_kind()) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: { // Print in array notation for non-sparse arrays. FixedArray* p = FixedArray::cast(elements()); @@ -385,8 +466,13 @@ void JSObject::PrintElements(FILE* out) { void JSObject::JSObjectPrint(FILE* out) { PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this)); - PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - prototype = %p\n", reinterpret_cast<void*>(GetPrototype())); + PrintF(out, " - map = %p [", reinterpret_cast<void*>(map())); + // Don't call GetElementsKind, its validation code can cause the printer to + // fail when debugging. + PrintElementsKind(out, this->map()->elements_kind()); + PrintF(out, + "]\n - prototype = %p\n", + reinterpret_cast<void*>(GetPrototype())); PrintF(out, " {\n"); PrintProperties(out); PrintElements(out); @@ -415,6 +501,7 @@ static const char* TypeToString(InstanceType type) { case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING"; case FIXED_ARRAY_TYPE: return "FIXED_ARRAY"; case BYTE_ARRAY_TYPE: return "BYTE_ARRAY"; + case FREE_SPACE_TYPE: return "FREE_SPACE"; case EXTERNAL_PIXEL_ARRAY_TYPE: return "EXTERNAL_PIXEL_ARRAY"; case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY"; case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE: @@ -458,7 +545,9 @@ void Map::MapPrint(FILE* out) { PrintF(out, " - type: %s\n", TypeToString(instance_type())); PrintF(out, " - instance size: %d\n", instance_size()); PrintF(out, " - inobject properties: %d\n", inobject_properties()); - PrintF(out, " - pre-allocated property fields: %d\n", + PrintF(out, " - elements kind: "); + PrintElementsKind(out, elements_kind()); + PrintF(out, "\n - pre-allocated property fields: %d\n", pre_allocated_property_fields()); PrintF(out, " - unused property fields: %d\n", unused_property_fields()); if (is_hidden_prototype()) { @@ -516,6 +605,16 @@ void FixedArray::FixedArrayPrint(FILE* out) { } +void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) { + HeapObject::PrintHeader(out, "FixedDoubleArray"); + PrintF(out, " - length: %d", length()); + for (int i = 0; i < length(); i++) { + PrintF(out, "\n [%d]: %g", i, get_scalar(i)); + } + PrintF(out, "\n"); +} + + void JSValue::JSValuePrint(FILE* out) { HeapObject::PrintHeader(out, "ValueObject"); value()->Print(out); @@ -587,6 +686,8 @@ void JSProxy::JSProxyPrint(FILE* out) { PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map())); PrintF(out, " - handler = "); handler()->Print(out); + PrintF(out, " - hash = "); + hash()->Print(out); PrintF(out, "\n"); } @@ -607,7 +708,6 @@ void JSFunctionProxy::JSFunctionProxyPrint(FILE* out) { void JSWeakMap::JSWeakMapPrint(FILE* out) { HeapObject::PrintHeader(out, "JSWeakMap"); PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map())); - PrintF(out, " - number of elements = %d\n", table()->NumberOfElements()); PrintF(out, " - table = "); table()->ShortPrint(out); PrintF(out, "\n"); @@ -802,10 +902,15 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(FILE* out) { void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) { HeapObject::PrintHeader(out, "ObjectTemplateInfo"); + PrintF(out, " - tag: "); + tag()->ShortPrint(out); + PrintF(out, "\n - property_list: "); + property_list()->ShortPrint(out); PrintF(out, "\n - constructor: "); constructor()->ShortPrint(out); PrintF(out, "\n - internal_field_count: "); internal_field_count()->ShortPrint(out); + PrintF(out, "\n"); } diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h new file mode 100644 index 000000000..6f0f61d35 --- /dev/null +++ b/deps/v8/src/objects-visiting-inl.h @@ -0,0 +1,143 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_OBJECTS_VISITING_INL_H_ +#define V8_OBJECTS_VISITING_INL_H_ + + +namespace v8 { +namespace internal { + +template<typename StaticVisitor> +void StaticNewSpaceVisitor<StaticVisitor>::Initialize() { + table_.Register(kVisitShortcutCandidate, + &FixedBodyVisitor<StaticVisitor, + ConsString::BodyDescriptor, + int>::Visit); + + table_.Register(kVisitConsString, + &FixedBodyVisitor<StaticVisitor, + ConsString::BodyDescriptor, + int>::Visit); + + table_.Register(kVisitSlicedString, + &FixedBodyVisitor<StaticVisitor, + SlicedString::BodyDescriptor, + int>::Visit); + + table_.Register(kVisitFixedArray, + &FlexibleBodyVisitor<StaticVisitor, + FixedArray::BodyDescriptor, + int>::Visit); + + table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray); + + table_.Register(kVisitGlobalContext, + &FixedBodyVisitor<StaticVisitor, + Context::ScavengeBodyDescriptor, + int>::Visit); + + table_.Register(kVisitByteArray, &VisitByteArray); + + table_.Register(kVisitSharedFunctionInfo, + &FixedBodyVisitor<StaticVisitor, + SharedFunctionInfo::BodyDescriptor, + int>::Visit); + + table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString); + + table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString); + + table_.Register(kVisitJSFunction, + &JSObjectVisitor:: + template VisitSpecialized<JSFunction::kSize>); + + table_.Register(kVisitFreeSpace, &VisitFreeSpace); + + table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit); + + table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit); + + table_.template RegisterSpecializations<DataObjectVisitor, + kVisitDataObject, + kVisitDataObjectGeneric>(); + + table_.template RegisterSpecializations<JSObjectVisitor, + kVisitJSObject, + kVisitJSObjectGeneric>(); + table_.template RegisterSpecializations<StructVisitor, + kVisitStruct, + kVisitStructGeneric>(); +} + + +void Code::CodeIterateBody(ObjectVisitor* v) { + int mode_mask = RelocInfo::kCodeTargetMask | + RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | + RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) | + RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) | + RelocInfo::ModeMask(RelocInfo::JS_RETURN) | + RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) | + RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY); + + IteratePointer(v, kRelocationInfoOffset); + IteratePointer(v, kDeoptimizationDataOffset); + + RelocIterator it(this, mode_mask); + for (; !it.done(); it.next()) { + it.rinfo()->Visit(v); + } +} + + +template<typename StaticVisitor> +void Code::CodeIterateBody(Heap* heap) { + int mode_mask = RelocInfo::kCodeTargetMask | + RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | + RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) | + RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) | + RelocInfo::ModeMask(RelocInfo::JS_RETURN) | + RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) | + RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY); + + StaticVisitor::VisitPointer( + heap, + reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset)); + StaticVisitor::VisitPointer( + heap, + reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset)); + + RelocIterator it(this, mode_mask); + for (; !it.done(); it.next()) { + it.rinfo()->template Visit<StaticVisitor>(heap); + } +} + + +} } // namespace v8::internal + +#endif // V8_OBJECTS_VISITING_INL_H_ diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc index 0aa21dd6e..20a7b3170 100644 --- a/deps/v8/src/objects-visiting.cc +++ b/deps/v8/src/objects-visiting.cc @@ -73,6 +73,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId( case BYTE_ARRAY_TYPE: return kVisitByteArray; + case FREE_SPACE_TYPE: + return kVisitFreeSpace; + case FIXED_ARRAY_TYPE: return kVisitFixedArray; diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h index 4ce1bd077..e6ddfed4a 100644 --- a/deps/v8/src/objects-visiting.h +++ b/deps/v8/src/objects-visiting.h @@ -30,22 +30,6 @@ #include "allocation.h" -#if V8_TARGET_ARCH_IA32 -#include "ia32/assembler-ia32.h" -#include "ia32/assembler-ia32-inl.h" -#elif V8_TARGET_ARCH_X64 -#include "x64/assembler-x64.h" -#include "x64/assembler-x64-inl.h" -#elif V8_TARGET_ARCH_ARM -#include "arm/assembler-arm.h" -#include "arm/assembler-arm-inl.h" -#elif V8_TARGET_ARCH_MIPS -#include "mips/assembler-mips.h" -#include "mips/assembler-mips-inl.h" -#else -#error Unsupported target architecture. -#endif - // This file provides base classes and auxiliary methods for defining // static object visitors used during GC. // Visiting HeapObject body with a normal ObjectVisitor requires performing @@ -67,6 +51,7 @@ class StaticVisitorBase : public AllStatic { kVisitSeqTwoByteString, kVisitShortcutCandidate, kVisitByteArray, + kVisitFreeSpace, kVisitFixedArray, kVisitFixedDoubleArray, kVisitGlobalContext, @@ -172,6 +157,10 @@ class VisitorDispatchTable { } } + inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) { + return reinterpret_cast<Callback>(callbacks_[id]); + } + inline Callback GetVisitor(Map* map) { return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]); } @@ -236,7 +225,7 @@ class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> { static inline ReturnType Visit(Map* map, HeapObject* object) { int object_size = BodyDescriptor::SizeOf(map, object); BodyVisitorBase<StaticVisitor>::IteratePointers( - map->heap(), + map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size); @@ -247,7 +236,7 @@ class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> { static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) { ASSERT(BodyDescriptor::SizeOf(map, object) == object_size); BodyVisitorBase<StaticVisitor>::IteratePointers( - map->heap(), + map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size); @@ -261,7 +250,7 @@ class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> { public: static inline ReturnType Visit(Map* map, HeapObject* object) { BodyVisitorBase<StaticVisitor>::IteratePointers( - map->heap(), + map->GetHeap(), object, BodyDescriptor::kStartOffset, BodyDescriptor::kEndOffset); @@ -289,63 +278,7 @@ class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> { template<typename StaticVisitor> class StaticNewSpaceVisitor : public StaticVisitorBase { public: - static void Initialize() { - table_.Register(kVisitShortcutCandidate, - &FixedBodyVisitor<StaticVisitor, - ConsString::BodyDescriptor, - int>::Visit); - - table_.Register(kVisitConsString, - &FixedBodyVisitor<StaticVisitor, - ConsString::BodyDescriptor, - int>::Visit); - - table_.Register(kVisitSlicedString, - &FixedBodyVisitor<StaticVisitor, - SlicedString::BodyDescriptor, - int>::Visit); - - table_.Register(kVisitFixedArray, - &FlexibleBodyVisitor<StaticVisitor, - FixedArray::BodyDescriptor, - int>::Visit); - - table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray); - - table_.Register(kVisitGlobalContext, - &FixedBodyVisitor<StaticVisitor, - Context::ScavengeBodyDescriptor, - int>::Visit); - - table_.Register(kVisitByteArray, &VisitByteArray); - - table_.Register(kVisitSharedFunctionInfo, - &FixedBodyVisitor<StaticVisitor, - SharedFunctionInfo::BodyDescriptor, - int>::Visit); - - table_.Register(kVisitJSWeakMap, &VisitJSObject); - - table_.Register(kVisitJSRegExp, &VisitJSObject); - - table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString); - - table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString); - - table_.Register(kVisitJSFunction, - &JSObjectVisitor:: - template VisitSpecialized<JSFunction::kSize>); - - table_.RegisterSpecializations<DataObjectVisitor, - kVisitDataObject, - kVisitDataObjectGeneric>(); - table_.RegisterSpecializations<JSObjectVisitor, - kVisitJSObject, - kVisitJSObjectGeneric>(); - table_.RegisterSpecializations<StructVisitor, - kVisitStruct, - kVisitStructGeneric>(); - } + static void Initialize(); static inline int IterateBody(Map* map, HeapObject* obj) { return table_.GetVisitor(map)(map, obj); @@ -379,6 +312,10 @@ class StaticNewSpaceVisitor : public StaticVisitorBase { SeqTwoByteStringSize(map->instance_type()); } + static inline int VisitFreeSpace(Map* map, HeapObject* object) { + return FreeSpace::cast(object)->Size(); + } + class DataObjectVisitor { public: template<int object_size> @@ -410,55 +347,6 @@ VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback> StaticNewSpaceVisitor<StaticVisitor>::table_; -void Code::CodeIterateBody(ObjectVisitor* v) { - int mode_mask = RelocInfo::kCodeTargetMask | - RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | - RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) | - RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) | - RelocInfo::ModeMask(RelocInfo::JS_RETURN) | - RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) | - RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY); - - // Use the relocation info pointer before it is visited by - // the heap compaction in the next statement. - RelocIterator it(this, mode_mask); - - IteratePointer(v, kRelocationInfoOffset); - IteratePointer(v, kDeoptimizationDataOffset); - - for (; !it.done(); it.next()) { - it.rinfo()->Visit(v); - } -} - - -template<typename StaticVisitor> -void Code::CodeIterateBody(Heap* heap) { - int mode_mask = RelocInfo::kCodeTargetMask | - RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | - RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) | - RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) | - RelocInfo::ModeMask(RelocInfo::JS_RETURN) | - RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) | - RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY); - - // Use the relocation info pointer before it is visited by - // the heap compaction in the next statement. - RelocIterator it(this, mode_mask); - - StaticVisitor::VisitPointer( - heap, - reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset)); - StaticVisitor::VisitPointer( - heap, - reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset)); - - for (; !it.done(); it.next()) { - it.rinfo()->template Visit<StaticVisitor>(heap); - } -} - - } } // namespace v8::internal #endif // V8_OBJECTS_VISITING_H_ diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 41b4fd4db..561273230 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -39,7 +39,9 @@ #include "hydrogen.h" #include "objects-inl.h" #include "objects-visiting.h" +#include "objects-visiting-inl.h" #include "macro-assembler.h" +#include "mark-compact.h" #include "safepoint-table.h" #include "string-stream.h" #include "utils.h" @@ -132,27 +134,20 @@ Object* Object::ToBoolean() { void Object::Lookup(String* name, LookupResult* result) { Object* holder = NULL; - if (IsSmi()) { - Context* global_context = Isolate::Current()->context()->global_context(); - holder = global_context->number_function()->instance_prototype(); + if (IsJSReceiver()) { + holder = this; } else { - HeapObject* heap_object = HeapObject::cast(this); - if (heap_object->IsJSObject()) { - return JSObject::cast(this)->Lookup(name, result); - } else if (heap_object->IsJSProxy()) { - return result->HandlerResult(); - } Context* global_context = Isolate::Current()->context()->global_context(); - if (heap_object->IsString()) { - holder = global_context->string_function()->instance_prototype(); - } else if (heap_object->IsHeapNumber()) { + if (IsNumber()) { holder = global_context->number_function()->instance_prototype(); - } else if (heap_object->IsBoolean()) { + } else if (IsString()) { + holder = global_context->string_function()->instance_prototype(); + } else if (IsBoolean()) { holder = global_context->boolean_function()->instance_prototype(); } } ASSERT(holder != NULL); // Cannot handle null or undefined. - JSObject::cast(holder)->Lookup(name, result); + JSReceiver::cast(holder)->Lookup(name, result); } @@ -167,10 +162,9 @@ MaybeObject* Object::GetPropertyWithReceiver(Object* receiver, } -MaybeObject* Object::GetPropertyWithCallback(Object* receiver, - Object* structure, - String* name, - Object* holder) { +MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver, + Object* structure, + String* name) { Isolate* isolate = name->GetIsolate(); // To accommodate both the old and the new api we switch on the // data structure used to store the callbacks. Eventually foreign @@ -191,10 +185,9 @@ MaybeObject* Object::GetPropertyWithCallback(Object* receiver, v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj); HandleScope scope(isolate); JSObject* self = JSObject::cast(receiver); - JSObject* holder_handle = JSObject::cast(holder); Handle<String> key(name); LOG(isolate, ApiNamedPropertyAccess("load", self, name)); - CustomArguments args(isolate, data->data(), self, holder_handle); + CustomArguments args(isolate, data->data(), self, this); v8::AccessorInfo info(args.end()); v8::Handle<v8::Value> result; { @@ -212,9 +205,9 @@ MaybeObject* Object::GetPropertyWithCallback(Object* receiver, // __defineGetter__ callback if (structure->IsFixedArray()) { Object* getter = FixedArray::cast(structure)->get(kGetterIndex); - if (getter->IsJSFunction()) { - return Object::GetPropertyWithDefinedGetter(receiver, - JSFunction::cast(getter)); + if (getter->IsSpecFunction()) { + // TODO(rossberg): nicer would be to cast to some JSCallable here... + return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter)); } // Getter is not a function. return isolate->heap()->undefined_value(); @@ -225,47 +218,64 @@ MaybeObject* Object::GetPropertyWithCallback(Object* receiver, } -MaybeObject* Object::GetPropertyWithHandler(Object* receiver_raw, - String* name_raw, - Object* handler_raw) { - Isolate* isolate = name_raw->GetIsolate(); +MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw, + String* name_raw) { + Isolate* isolate = GetIsolate(); HandleScope scope(isolate); Handle<Object> receiver(receiver_raw); Handle<Object> name(name_raw); - Handle<Object> handler(handler_raw); - // Extract trap function. - Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("get"); - Handle<Object> trap(v8::internal::GetProperty(handler, trap_name)); + Handle<Object> args[] = { receiver, name }; + Handle<Object> result = CallTrap( + "get", isolate->derived_get_trap(), ARRAY_SIZE(args), args); if (isolate->has_pending_exception()) return Failure::Exception(); - if (trap->IsUndefined()) { - // Get the derived `get' property. - trap = isolate->derived_get_trap(); - } - - // Call trap function. - Object** args[] = { receiver.location(), name.location() }; - bool has_exception; - Handle<Object> result = - Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception); - if (has_exception) return Failure::Exception(); return *result; } +MaybeObject* JSProxy::GetElementWithHandler(Object* receiver, + uint32_t index) { + String* name; + MaybeObject* maybe = GetHeap()->Uint32ToString(index); + if (!maybe->To<String>(&name)) return maybe; + return GetPropertyWithHandler(receiver, name); +} + + +MaybeObject* JSProxy::SetElementWithHandler(uint32_t index, + Object* value, + StrictModeFlag strict_mode) { + String* name; + MaybeObject* maybe = GetHeap()->Uint32ToString(index); + if (!maybe->To<String>(&name)) return maybe; + return SetPropertyWithHandler(name, value, NONE, strict_mode); +} + + +bool JSProxy::HasElementWithHandler(uint32_t index) { + String* name; + MaybeObject* maybe = GetHeap()->Uint32ToString(index); + if (!maybe->To<String>(&name)) return maybe; + return HasPropertyWithHandler(name); +} + + MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver, - JSFunction* getter) { + JSReceiver* getter) { HandleScope scope; - Handle<JSFunction> fun(JSFunction::cast(getter)); + Handle<JSReceiver> fun(getter); Handle<Object> self(receiver); #ifdef ENABLE_DEBUGGER_SUPPORT Debug* debug = fun->GetHeap()->isolate()->debug(); // Handle stepping into a getter if step into is active. - if (debug->StepInActive()) { - debug->HandleStepIn(fun, Handle<Object>::null(), 0, false); + // TODO(rossberg): should this apply to getters that are function proxies? + if (debug->StepInActive() && fun->IsJSFunction()) { + debug->HandleStepIn( + Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false); } #endif + bool has_pending_exception; Handle<Object> result = Execution::Call(fun, self, 0, NULL, &has_pending_exception); @@ -290,10 +300,8 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck( AccessorInfo* info = AccessorInfo::cast(obj); if (info->all_can_read()) { *attributes = result->GetAttributes(); - return GetPropertyWithCallback(receiver, - result->GetCallbackObject(), - name, - result->holder()); + return result->holder()->GetPropertyWithCallback( + receiver, result->GetCallbackObject(), name); } } break; @@ -486,7 +494,7 @@ MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) { } JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(dictionary->ValueAt(entry)); - cell->set_value(cell->heap()->the_hole_value()); + cell->set_value(cell->GetHeap()->the_hole_value()); dictionary->DetailsAtPut(entry, details.AsDeleted()); } else { Object* deleted = dictionary->DeleteProperty(entry, mode); @@ -537,7 +545,9 @@ MaybeObject* Object::GetProperty(Object* receiver, // holder in the prototype chain. // Proxy handlers do not use the proxy's prototype, so we can skip this. if (!result->IsHandler()) { - Object* last = result->IsProperty() ? result->holder() : heap->null_value(); + Object* last = result->IsProperty() + ? result->holder() + : Object::cast(heap->null_value()); ASSERT(this != this->GetPrototype()); for (Object* current = this; true; current = current->GetPrototype()) { if (current->IsAccessCheckNeeded()) { @@ -566,30 +576,26 @@ MaybeObject* Object::GetProperty(Object* receiver, } *attributes = result->GetAttributes(); Object* value; - JSObject* holder = result->holder(); switch (result->type()) { case NORMAL: - value = holder->GetNormalizedProperty(result); + value = result->holder()->GetNormalizedProperty(result); ASSERT(!value->IsTheHole() || result->IsReadOnly()); return value->IsTheHole() ? heap->undefined_value() : value; case FIELD: - value = holder->FastPropertyAt(result->GetFieldIndex()); + value = result->holder()->FastPropertyAt(result->GetFieldIndex()); ASSERT(!value->IsTheHole() || result->IsReadOnly()); return value->IsTheHole() ? heap->undefined_value() : value; case CONSTANT_FUNCTION: return result->GetConstantFunction(); case CALLBACKS: - return GetPropertyWithCallback(receiver, - result->GetCallbackObject(), - name, - holder); - case HANDLER: { - JSProxy* proxy = JSProxy::cast(this); - return GetPropertyWithHandler(receiver, name, proxy->handler()); - } + return result->holder()->GetPropertyWithCallback( + receiver, result->GetCallbackObject(), name); + case HANDLER: + return result->proxy()->GetPropertyWithHandler(receiver, name); case INTERCEPTOR: { JSObject* recvr = JSObject::cast(receiver); - return holder->GetPropertyWithInterceptor(recvr, name, attributes); + return result->holder()->GetPropertyWithInterceptor( + recvr, name, attributes); } case MAP_TRANSITION: case ELEMENTS_TRANSITION: @@ -613,28 +619,21 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) { for (holder = this; holder != heap->null_value(); holder = holder->GetPrototype()) { - if (holder->IsSmi()) { - Context* global_context = Isolate::Current()->context()->global_context(); - holder = global_context->number_function()->instance_prototype(); - } else { - HeapObject* heap_object = HeapObject::cast(holder); - if (!heap_object->IsJSObject()) { - Isolate* isolate = heap->isolate(); - Context* global_context = isolate->context()->global_context(); - if (heap_object->IsString()) { - holder = global_context->string_function()->instance_prototype(); - } else if (heap_object->IsHeapNumber()) { - holder = global_context->number_function()->instance_prototype(); - } else if (heap_object->IsBoolean()) { - holder = global_context->boolean_function()->instance_prototype(); - } else if (heap_object->IsJSProxy()) { - // TODO(rossberg): do something - return heap->undefined_value(); // For now... - } else { - // Undefined and null have no indexed properties. - ASSERT(heap_object->IsUndefined() || heap_object->IsNull()); - return heap->undefined_value(); - } + if (!holder->IsJSObject()) { + Isolate* isolate = heap->isolate(); + Context* global_context = isolate->context()->global_context(); + if (holder->IsNumber()) { + holder = global_context->number_function()->instance_prototype(); + } else if (holder->IsString()) { + holder = global_context->string_function()->instance_prototype(); + } else if (holder->IsBoolean()) { + holder = global_context->boolean_function()->instance_prototype(); + } else if (holder->IsJSProxy()) { + return JSProxy::cast(holder)->GetElementWithHandler(receiver, index); + } else { + // Undefined and null have no indexed properties. + ASSERT(holder->IsUndefined() || holder->IsNull()); + return heap->undefined_value(); } } @@ -877,6 +876,9 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { // Fill the remainder of the string with dead wood. int new_size = this->Size(); // Byte size of the external String object. heap->CreateFillerObjectAt(this->address() + new_size, size - new_size); + if (Marking::IsBlack(Marking::MarkBitFrom(this))) { + MemoryChunk::IncrementLiveBytes(this->address(), new_size - size); + } return true; } @@ -923,6 +925,10 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) { // Fill the remainder of the string with dead wood. int new_size = this->Size(); // Byte size of the external String object. heap->CreateFillerObjectAt(this->address() + new_size, size - new_size); + if (Marking::IsBlack(Marking::MarkBitFrom(this))) { + MemoryChunk::IncrementLiveBytes(this->address(), new_size - size); + } + return true; } @@ -998,8 +1004,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) { break; } case JS_WEAK_MAP_TYPE: { - int elements = JSWeakMap::cast(this)->table()->NumberOfElements(); - accumulator->Add("<JS WeakMap[%d]>", elements); + accumulator->Add("<JS WeakMap>"); break; } case JS_REGEXP_TYPE: { @@ -1027,7 +1032,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) { // JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue). default: { Map* map_of_this = map(); - Heap* heap = map_of_this->heap(); + Heap* heap = GetHeap(); Object* constructor = map_of_this->constructor(); bool printed = false; if (constructor->IsHeapObject() && @@ -1049,7 +1054,6 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) { global_object ? "Global Object: " : "", vowel ? "n" : ""); accumulator->Put(str); - accumulator->Put('>'); printed = true; } } @@ -1071,7 +1075,6 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) { void HeapObject::HeapObjectShortPrint(StringStream* accumulator) { - // if (!HEAP->InNewSpace(this)) PrintF("*", this); Heap* heap = GetHeap(); if (!heap->Contains(this)) { accumulator->Add("!!!INVALID POINTER!!!"); @@ -1094,7 +1097,7 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) { } switch (map()->instance_type()) { case MAP_TYPE: - accumulator->Add("<Map>"); + accumulator->Add("<Map(elements=%u)>", Map::cast(this)->elements_kind()); break; case FIXED_ARRAY_TYPE: accumulator->Add("<FixedArray[%u]>", FixedArray::cast(this)->length()); @@ -1102,6 +1105,9 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) { case BYTE_ARRAY_TYPE: accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length()); break; + case FREE_SPACE_TYPE: + accumulator->Add("<FreeSpace[%u]>", FreeSpace::cast(this)->Size()); + break; case EXTERNAL_PIXEL_ARRAY_TYPE: accumulator->Add("<ExternalPixelArray[%u]>", ExternalPixelArray::cast(this)->length()); @@ -1277,6 +1283,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size, case HEAP_NUMBER_TYPE: case FILLER_TYPE: case BYTE_ARRAY_TYPE: + case FREE_SPACE_TYPE: case EXTERNAL_PIXEL_ARRAY_TYPE: case EXTERNAL_BYTE_ARRAY_TYPE: case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE: @@ -1533,7 +1540,7 @@ MaybeObject* JSObject::AddConstantFunctionProperty( // If the old map is the global object map (from new Object()), // then transitions are not added to it, so we are done. - Heap* heap = old_map->heap(); + Heap* heap = GetHeap(); if (old_map == heap->isolate()->context()->global_context()-> object_function()->map()) { return function; @@ -1609,7 +1616,7 @@ MaybeObject* JSObject::AddProperty(String* name, StrictModeFlag strict_mode) { ASSERT(!IsJSGlobalProxy()); Map* map_of_this = map(); - Heap* heap = map_of_this->heap(); + Heap* heap = GetHeap(); if (!map_of_this->is_extensible()) { if (strict_mode == kNonStrictMode) { return heap->undefined_value(); @@ -1658,6 +1665,14 @@ MaybeObject* JSObject::SetPropertyPostInterceptor( // found. Use set property to handle all these cases. return SetProperty(&result, name, value, attributes, strict_mode); } + bool found = false; + MaybeObject* result_object; + result_object = SetPropertyWithCallbackSetterInPrototypes(name, + value, + attributes, + &found, + strict_mode); + if (found) return result_object; // Add a new real property. return AddProperty(name, value, attributes, strict_mode); } @@ -1696,7 +1711,7 @@ MaybeObject* JSObject::ConvertDescriptorToFieldAndMapTransition( return result; } // Do not add transitions to the map of "new Object()". - if (map() == old_map->heap()->isolate()->context()->global_context()-> + if (map() == GetIsolate()->context()->global_context()-> object_function()->map()) { return result; } @@ -1880,8 +1895,9 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure, if (structure->IsFixedArray()) { Object* setter = FixedArray::cast(structure)->get(kSetterIndex); - if (setter->IsJSFunction()) { - return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value); + if (setter->IsSpecFunction()) { + // TODO(rossberg): nicer would be to cast to some JSCallable here... + return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value); } else { if (strict_mode == kNonStrictMode) { return value; @@ -1900,22 +1916,24 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure, } -MaybeObject* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter, - Object* value) { +MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter, + Object* value) { Isolate* isolate = GetIsolate(); Handle<Object> value_handle(value, isolate); - Handle<JSFunction> fun(JSFunction::cast(setter), isolate); - Handle<JSObject> self(this, isolate); + Handle<JSReceiver> fun(setter, isolate); + Handle<JSReceiver> self(this, isolate); #ifdef ENABLE_DEBUGGER_SUPPORT Debug* debug = isolate->debug(); // Handle stepping into a setter if step into is active. - if (debug->StepInActive()) { - debug->HandleStepIn(fun, Handle<Object>::null(), 0, false); + // TODO(rossberg): should this apply to getters that are function proxies? + if (debug->StepInActive() && fun->IsJSFunction()) { + debug->HandleStepIn( + Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false); } #endif bool has_pending_exception; - Object** argv[] = { value_handle.location() }; - Execution::Call(fun, self, 1, argv, &has_pending_exception); + Handle<Object> argv[] = { value_handle }; + Execution::Call(fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception); // Check for pending exception and return the result. if (has_pending_exception) return Failure::Exception(); return *value_handle; @@ -1928,6 +1946,9 @@ void JSObject::LookupCallbackSetterInPrototypes(String* name, for (Object* pt = GetPrototype(); pt != heap->null_value(); pt = pt->GetPrototype()) { + if (pt->IsJSProxy()) { + return result->HandlerResult(JSProxy::cast(pt)); + } JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result); if (result->IsProperty()) { if (result->type() == CALLBACKS && !result->IsReadOnly()) return; @@ -1948,6 +1969,16 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes( for (Object* pt = GetPrototype(); pt != heap->null_value(); pt = pt->GetPrototype()) { + if (pt->IsJSProxy()) { + String* name; + MaybeObject* maybe = GetHeap()->Uint32ToString(index); + if (!maybe->To<String>(&name)) { + *found = true; // Force abort + return maybe; + } + return JSProxy::cast(pt)->SetPropertyWithHandlerIfDefiningSetter( + name, value, NONE, strict_mode, found); + } if (!JSObject::cast(pt)->HasDictionaryElements()) { continue; } @@ -1969,6 +2000,60 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes( return heap->the_hole_value(); } +MaybeObject* JSObject::SetPropertyWithCallbackSetterInPrototypes( + String* name, + Object* value, + PropertyAttributes attributes, + bool* found, + StrictModeFlag strict_mode) { + LookupResult result; + LookupCallbackSetterInPrototypes(name, &result); + Heap* heap = GetHeap(); + if (result.IsFound()) { + *found = true; + if (result.type() == CALLBACKS) { + return SetPropertyWithCallback(result.GetCallbackObject(), + name, + value, + result.holder(), + strict_mode); + } else if (result.type() == HANDLER) { + // We could not find a local property so let's check whether there is an + // accessor that wants to handle the property. + LookupResult accessor_result; + LookupCallbackSetterInPrototypes(name, &accessor_result); + if (accessor_result.IsFound()) { + if (accessor_result.type() == CALLBACKS) { + return SetPropertyWithCallback(accessor_result.GetCallbackObject(), + name, + value, + accessor_result.holder(), + strict_mode); + } else if (accessor_result.type() == HANDLER) { + // There is a proxy in the prototype chain. Invoke its + // getPropertyDescriptor trap. + bool found = false; + // SetPropertyWithHandlerIfDefiningSetter can cause GC, + // make sure to use the handlified references after calling + // the function. + Handle<JSObject> self(this); + Handle<String> hname(name); + Handle<Object> hvalue(value); + MaybeObject* result = + accessor_result.proxy()->SetPropertyWithHandlerIfDefiningSetter( + name, value, attributes, strict_mode, &found); + if (found) return result; + // The proxy does not define the property as an accessor. + // Consequently, it has no effect on setting the receiver. + return self->AddProperty(*hname, *hvalue, attributes, strict_mode); + } + } + } + } + *found = false; + return heap->the_hole_value(); +} + void JSObject::LookupInDescriptor(String* name, LookupResult* result) { DescriptorArray* descriptors = map()->instance_descriptors(); @@ -1985,7 +2070,8 @@ void Map::LookupInDescriptors(JSObject* holder, String* name, LookupResult* result) { DescriptorArray* descriptors = instance_descriptors(); - DescriptorLookupCache* cache = heap()->isolate()->descriptor_lookup_cache(); + DescriptorLookupCache* cache = + GetHeap()->isolate()->descriptor_lookup_cache(); int number = cache->Lookup(descriptors, name); if (number == DescriptorLookupCache::kAbsent) { number = descriptors->Search(name); @@ -1999,75 +2085,239 @@ void Map::LookupInDescriptors(JSObject* holder, } -MaybeObject* Map::GetElementsTransitionMap(ElementsKind elements_kind, - bool safe_to_add_transition) { - Heap* current_heap = heap(); +static Map* GetElementsTransitionMapFromDescriptor(Object* descriptor_contents, + ElementsKind elements_kind) { + if (descriptor_contents->IsMap()) { + Map* map = Map::cast(descriptor_contents); + if (map->elements_kind() == elements_kind) { + return map; + } + return NULL; + } + + FixedArray* map_array = FixedArray::cast(descriptor_contents); + for (int i = 0; i < map_array->length(); ++i) { + Object* current = map_array->get(i); + // Skip undefined slots, they are sentinels for reclaimed maps. + if (!current->IsUndefined()) { + Map* current_map = Map::cast(map_array->get(i)); + if (current_map->elements_kind() == elements_kind) { + return current_map; + } + } + } + + return NULL; +} + + +static MaybeObject* AddElementsTransitionMapToDescriptor( + Object* descriptor_contents, + Map* new_map) { + // Nothing was in the descriptor for an ELEMENTS_TRANSITION, + // simply add the map. + if (descriptor_contents == NULL) { + return new_map; + } + + // There was already a map in the descriptor, create a 2-element FixedArray + // to contain the existing map plus the new one. + FixedArray* new_array; + Heap* heap = new_map->GetHeap(); + if (descriptor_contents->IsMap()) { + // Must tenure, DescriptorArray expects no new-space objects. + MaybeObject* maybe_new_array = heap->AllocateFixedArray(2, TENURED); + if (!maybe_new_array->To<FixedArray>(&new_array)) { + return maybe_new_array; + } + new_array->set(0, descriptor_contents); + new_array->set(1, new_map); + return new_array; + } + + // The descriptor already contained a list of maps for different ElementKinds + // of ELEMENTS_TRANSITION, first check the existing array for an undefined + // slot, and if that's not available, create a FixedArray to hold the existing + // maps plus the new one and fill it in. + FixedArray* array = FixedArray::cast(descriptor_contents); + for (int i = 0; i < array->length(); ++i) { + if (array->get(i)->IsUndefined()) { + array->set(i, new_map); + return array; + } + } + + // Must tenure, DescriptorArray expects no new-space objects. + MaybeObject* maybe_new_array = + heap->AllocateFixedArray(array->length() + 1, TENURED); + if (!maybe_new_array->To<FixedArray>(&new_array)) { + return maybe_new_array; + } + int i = 0; + while (i < array->length()) { + new_array->set(i, array->get(i)); + ++i; + } + new_array->set(i, new_map); + return new_array; +} + + +String* Map::elements_transition_sentinel_name() { + return GetHeap()->empty_symbol(); +} + + +Object* Map::GetDescriptorContents(String* sentinel_name, + bool* safe_to_add_transition) { + // Get the cached index for the descriptors lookup, or find and cache it. DescriptorArray* descriptors = instance_descriptors(); - String* elements_transition_sentinel_name = current_heap->empty_symbol(); + DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache(); + int index = cache->Lookup(descriptors, sentinel_name); + if (index == DescriptorLookupCache::kAbsent) { + index = descriptors->Search(sentinel_name); + cache->Update(descriptors, sentinel_name, index); + } + // If the transition already exists, return its descriptor. + if (index != DescriptorArray::kNotFound) { + PropertyDetails details(descriptors->GetDetails(index)); + if (details.type() == ELEMENTS_TRANSITION) { + return descriptors->GetValue(index); + } else { + *safe_to_add_transition = false; + } + } + return NULL; +} + + +Map* Map::LookupElementsTransitionMap(ElementsKind elements_kind, + bool* safe_to_add_transition) { + // Special case: indirect SMI->FAST transition (cf. comment in + // AddElementsTransition()). + if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS && + elements_kind == FAST_ELEMENTS) { + Map* double_map = this->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, + safe_to_add_transition); + if (double_map == NULL) return double_map; + return double_map->LookupElementsTransitionMap(FAST_ELEMENTS, + safe_to_add_transition); + } + Object* descriptor_contents = GetDescriptorContents( + elements_transition_sentinel_name(), safe_to_add_transition); + if (descriptor_contents != NULL) { + Map* maybe_transition_map = + GetElementsTransitionMapFromDescriptor(descriptor_contents, + elements_kind); + ASSERT(maybe_transition_map == NULL || maybe_transition_map->IsMap()); + return maybe_transition_map; + } + return NULL; +} + + +MaybeObject* Map::AddElementsTransition(ElementsKind elements_kind, + Map* transitioned_map) { + // The map transition graph should be a tree, therefore the transition + // from SMI to FAST elements is not done directly, but by going through + // DOUBLE elements first. + if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS && + elements_kind == FAST_ELEMENTS) { + bool safe_to_add = true; + Map* double_map = this->LookupElementsTransitionMap( + FAST_DOUBLE_ELEMENTS, &safe_to_add); + // This method is only called when safe_to_add_transition has been found + // to be true earlier. + ASSERT(safe_to_add); + + if (double_map == NULL) { + MaybeObject* maybe_map = this->CopyDropTransitions(); + if (!maybe_map->To(&double_map)) return maybe_map; + double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS); + MaybeObject* maybe_double_transition = this->AddElementsTransition( + FAST_DOUBLE_ELEMENTS, double_map); + if (maybe_double_transition->IsFailure()) return maybe_double_transition; + } + return double_map->AddElementsTransition(FAST_ELEMENTS, transitioned_map); + } + + bool safe_to_add_transition = true; + Object* descriptor_contents = GetDescriptorContents( + elements_transition_sentinel_name(), &safe_to_add_transition); + // This method is only called when safe_to_add_transition has been found + // to be true earlier. + ASSERT(safe_to_add_transition); + MaybeObject* maybe_new_contents = + AddElementsTransitionMapToDescriptor(descriptor_contents, + transitioned_map); + Object* new_contents; + if (!maybe_new_contents->ToObject(&new_contents)) { + return maybe_new_contents; + } + + ElementsTransitionDescriptor desc(elements_transition_sentinel_name(), + new_contents); + Object* new_descriptors; + MaybeObject* maybe_new_descriptors = + instance_descriptors()->CopyInsert(&desc, KEEP_TRANSITIONS); + if (!maybe_new_descriptors->ToObject(&new_descriptors)) { + return maybe_new_descriptors; + } + set_instance_descriptors(DescriptorArray::cast(new_descriptors)); + return this; +} + + +MaybeObject* JSObject::GetElementsTransitionMap(ElementsKind to_kind) { + Map* current_map = map(); + ElementsKind from_kind = current_map->elements_kind(); + + if (from_kind == to_kind) return current_map; + + // Only objects with FastProperties can have DescriptorArrays and can track + // element-related maps. Also don't add descriptors to maps that are shared. + bool safe_to_add_transition = HasFastProperties() && + !current_map->IsUndefined() && + !current_map->is_shared(); + + // Prevent long chains of DICTIONARY -> FAST_ELEMENTS maps caused by objects + // with elements that switch back and forth between dictionary and fast + // element mode. + if (from_kind == DICTIONARY_ELEMENTS && to_kind == FAST_ELEMENTS) { + safe_to_add_transition = false; + } if (safe_to_add_transition) { // It's only safe to manipulate the descriptor array if it would be // safe to add a transition. - - ASSERT(!is_shared()); // no transitions can be added to shared maps. - // Check if the elements transition already exists. - DescriptorLookupCache* cache = - current_heap->isolate()->descriptor_lookup_cache(); - int index = cache->Lookup(descriptors, elements_transition_sentinel_name); - if (index == DescriptorLookupCache::kAbsent) { - index = descriptors->Search(elements_transition_sentinel_name); - cache->Update(descriptors, - elements_transition_sentinel_name, - index); - } - - // If the transition already exists, check the type. If there is a match, - // return it. - if (index != DescriptorArray::kNotFound) { - PropertyDetails details(PropertyDetails(descriptors->GetDetails(index))); - if (details.type() == ELEMENTS_TRANSITION && - details.elements_kind() == elements_kind) { - return descriptors->GetValue(index); - } else { - safe_to_add_transition = false; - } + Map* maybe_transition_map = current_map->LookupElementsTransitionMap( + to_kind, &safe_to_add_transition); + if (maybe_transition_map != NULL) { + return maybe_transition_map; } } + Map* new_map = NULL; + // No transition to an existing map for the given ElementsKind. Make a new // one. - Object* obj; - { MaybeObject* maybe_map = CopyDropTransitions(); - if (!maybe_map->ToObject(&obj)) return maybe_map; + { MaybeObject* maybe_map = current_map->CopyDropTransitions(); + if (!maybe_map->To(&new_map)) return maybe_map; } - Map* new_map = Map::cast(obj); - new_map->set_elements_kind(elements_kind); - GetIsolate()->counters()->map_to_external_array_elements()->Increment(); + new_map->set_elements_kind(to_kind); // Only remember the map transition if the object's map is NOT equal to the // global object_function's map and there is not an already existing // non-matching element transition. - bool allow_map_transition = - safe_to_add_transition && + bool allow_map_transition = safe_to_add_transition && (GetIsolate()->context()->global_context()->object_function()->map() != map()); if (allow_map_transition) { - // Allocate new instance descriptors for the old map with map transition. - ElementsTransitionDescriptor desc(elements_transition_sentinel_name, - Map::cast(new_map), - elements_kind); - Object* new_descriptors; - MaybeObject* maybe_new_descriptors = descriptors->CopyInsert( - &desc, - KEEP_TRANSITIONS); - if (!maybe_new_descriptors->ToObject(&new_descriptors)) { - return maybe_new_descriptors; - } - descriptors = DescriptorArray::cast(new_descriptors); - set_instance_descriptors(descriptors); + MaybeObject* maybe_transition = + current_map->AddElementsTransition(to_kind, new_map); + if (maybe_transition->IsFailure()) return maybe_transition; } - return new_map; } @@ -2078,6 +2328,7 @@ void JSObject::LocalLookupRealNamedProperty(String* name, Object* proto = GetPrototype(); if (proto->IsNull()) return result->NotFound(); ASSERT(proto->IsJSGlobalObject()); + // A GlobalProxy's prototype should always be a proper JSObject. return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result); } @@ -2204,7 +2455,7 @@ MaybeObject* JSReceiver::SetProperty(LookupResult* result, PropertyAttributes attributes, StrictModeFlag strict_mode) { if (result->IsFound() && result->type() == HANDLER) { - return JSProxy::cast(this)->SetPropertyWithHandler( + return result->proxy()->SetPropertyWithHandler( key, value, attributes, strict_mode); } else { return JSObject::cast(this)->SetPropertyForResult( @@ -2218,22 +2469,11 @@ bool JSProxy::HasPropertyWithHandler(String* name_raw) { HandleScope scope(isolate); Handle<Object> receiver(this); Handle<Object> name(name_raw); - Handle<Object> handler(this->handler()); - // Extract trap function. - Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("has"); - Handle<Object> trap(v8::internal::GetProperty(handler, trap_name)); + Handle<Object> args[] = { name }; + Handle<Object> result = CallTrap( + "has", isolate->derived_has_trap(), ARRAY_SIZE(args), args); if (isolate->has_pending_exception()) return Failure::Exception(); - if (trap->IsUndefined()) { - trap = isolate->derived_has_trap(); - } - - // Call trap function. - Object** args[] = { name.location() }; - bool has_exception; - Handle<Object> result = - Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception); - if (has_exception) return Failure::Exception(); return result->ToBoolean()->IsTrue(); } @@ -2249,24 +2489,82 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler( Handle<Object> receiver(this); Handle<Object> name(name_raw); Handle<Object> value(value_raw); - Handle<Object> handler(this->handler()); - // Extract trap function. - Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("set"); - Handle<Object> trap(v8::internal::GetProperty(handler, trap_name)); + Handle<Object> args[] = { receiver, name, value }; + CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args); + if (isolate->has_pending_exception()) return Failure::Exception(); + + return *value; +} + + +MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandlerIfDefiningSetter( + String* name_raw, + Object* value_raw, + PropertyAttributes attributes, + StrictModeFlag strict_mode, + bool* found) { + *found = true; // except where defined otherwise... + Isolate* isolate = GetHeap()->isolate(); + Handle<JSProxy> proxy(this); + Handle<String> name(name_raw); + Handle<Object> value(value_raw); + Handle<Object> args[] = { name }; + Handle<Object> result = proxy->CallTrap( + "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args); if (isolate->has_pending_exception()) return Failure::Exception(); - if (trap->IsUndefined()) { - trap = isolate->derived_set_trap(); - } - // Call trap function. - Object** args[] = { - receiver.location(), name.location(), value.location() - }; - bool has_exception; - Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception); - if (has_exception) return Failure::Exception(); + if (!result->IsUndefined()) { + // The proxy handler cares about this property. + // Check whether it is virtualized as an accessor. + // Emulate [[GetProperty]] semantics for proxies. + bool has_pending_exception; + Handle<Object> argv[] = { result }; + Handle<Object> desc = + Execution::Call(isolate->to_complete_property_descriptor(), result, + ARRAY_SIZE(argv), argv, &has_pending_exception); + if (has_pending_exception) return Failure::Exception(); + + Handle<String> conf_name = + isolate->factory()->LookupAsciiSymbol("configurable_"); + Handle<Object> configurable(v8::internal::GetProperty(desc, conf_name)); + ASSERT(!isolate->has_pending_exception()); + if (configurable->IsFalse()) { + Handle<Object> args[] = { Handle<Object>(proxy->handler()), proxy, name }; + Handle<Object> error = isolate->factory()->NewTypeError( + "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args))); + return isolate->Throw(*error); + } + ASSERT(configurable->IsTrue()); + + // Check for AccessorDescriptor. + Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_"); + Handle<Object> setter(v8::internal::GetProperty(desc, set_name)); + ASSERT(!isolate->has_pending_exception()); + if (!setter->IsUndefined()) { + // We have a setter -- invoke it. + // TODO(rossberg): nicer would be to cast to some JSCallable here... + return proxy->SetPropertyWithDefinedSetter( + JSReceiver::cast(*setter), *value); + } else { + Handle<String> get_name = isolate->factory()->LookupAsciiSymbol("get_"); + Handle<Object> getter(v8::internal::GetProperty(desc, get_name)); + ASSERT(!isolate->has_pending_exception()); + if (!getter->IsUndefined()) { + // We have a getter but no setter -- the property may not be + // written. In strict mode, throw an error. + if (strict_mode == kNonStrictMode) return *value; + Handle<Object> args[] = { name, proxy }; + Handle<Object> error = isolate->factory()->NewTypeError( + "no_setter_in_callback", HandleVector(args, ARRAY_SIZE(args))); + return isolate->Throw(*error); + } + } + // Fall-through. + } + // The proxy does not define the property as an accessor. + *found = false; return *value; } @@ -2277,31 +2575,16 @@ MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler( HandleScope scope(isolate); Handle<Object> receiver(this); Handle<Object> name(name_raw); - Handle<Object> handler(this->handler()); - // Extract trap function. - Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete"); - Handle<Object> trap(v8::internal::GetProperty(handler, trap_name)); + Handle<Object> args[] = { name }; + Handle<Object> result = CallTrap( + "delete", Handle<Object>(), ARRAY_SIZE(args), args); if (isolate->has_pending_exception()) return Failure::Exception(); - if (trap->IsUndefined()) { - Handle<Object> args[] = { handler, trap_name }; - Handle<Object> error = isolate->factory()->NewTypeError( - "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args))); - isolate->Throw(*error); - return Failure::Exception(); - } - - // Call trap function. - Object** args[] = { name.location() }; - bool has_exception; - Handle<Object> result = - Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception); - if (has_exception) return Failure::Exception(); Object* bool_result = result->ToBoolean(); - if (mode == STRICT_DELETION && - bool_result == isolate->heap()->false_value()) { - Handle<Object> args[] = { handler, trap_name }; + if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) { + Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete"); + Handle<Object> args[] = { Handle<Object>(handler()), trap_name }; Handle<Object> error = isolate->factory()->NewTypeError( "handler_failed", HandleVector(args, ARRAY_SIZE(args))); isolate->Throw(*error); @@ -2311,39 +2594,73 @@ MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler( } +MUST_USE_RESULT MaybeObject* JSProxy::DeleteElementWithHandler( + uint32_t index, + DeleteMode mode) { + Isolate* isolate = GetIsolate(); + HandleScope scope(isolate); + Handle<String> name = isolate->factory()->Uint32ToString(index); + return JSProxy::DeletePropertyWithHandler(*name, mode); +} + + MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler( JSReceiver* receiver_raw, - String* name_raw, - bool* has_exception) { + String* name_raw) { Isolate* isolate = GetIsolate(); HandleScope scope(isolate); + Handle<JSProxy> proxy(this); Handle<JSReceiver> receiver(receiver_raw); Handle<Object> name(name_raw); - Handle<Object> handler(this->handler()); - // Extract trap function. - Handle<String> trap_name = - isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor"); - Handle<Object> trap(v8::internal::GetProperty(handler, trap_name)); + Handle<Object> args[] = { name }; + Handle<Object> result = CallTrap( + "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args); if (isolate->has_pending_exception()) return NONE; - if (trap->IsUndefined()) { - Handle<Object> args[] = { handler, trap_name }; + + if (result->IsUndefined()) return ABSENT; + + bool has_pending_exception; + Handle<Object> argv[] = { result }; + Handle<Object> desc = + Execution::Call(isolate->to_complete_property_descriptor(), result, + ARRAY_SIZE(argv), argv, &has_pending_exception); + if (has_pending_exception) return NONE; + + // Convert result to PropertyAttributes. + Handle<String> enum_n = isolate->factory()->LookupAsciiSymbol("enumerable"); + Handle<Object> enumerable(v8::internal::GetProperty(desc, enum_n)); + if (isolate->has_pending_exception()) return NONE; + Handle<String> conf_n = isolate->factory()->LookupAsciiSymbol("configurable"); + Handle<Object> configurable(v8::internal::GetProperty(desc, conf_n)); + if (isolate->has_pending_exception()) return NONE; + Handle<String> writ_n = isolate->factory()->LookupAsciiSymbol("writable"); + Handle<Object> writable(v8::internal::GetProperty(desc, writ_n)); + if (isolate->has_pending_exception()) return NONE; + + if (configurable->IsFalse()) { + Handle<Object> args[] = { Handle<Object>(proxy->handler()), proxy, name }; Handle<Object> error = isolate->factory()->NewTypeError( - "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args))); + "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args))); isolate->Throw(*error); - *has_exception = true; return NONE; } - // Call trap function. - Object** args[] = { name.location() }; - Handle<Object> result = - Execution::Call(trap, handler, ARRAY_SIZE(args), args, has_exception); - if (has_exception) return NONE; + int attributes = NONE; + if (enumerable->ToBoolean()->IsFalse()) attributes |= DONT_ENUM; + if (configurable->ToBoolean()->IsFalse()) attributes |= DONT_DELETE; + if (writable->ToBoolean()->IsFalse()) attributes |= READ_ONLY; + return static_cast<PropertyAttributes>(attributes); +} - // TODO(rossberg): convert result to PropertyAttributes - USE(result); - return NONE; + +MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler( + JSReceiver* receiver, + uint32_t index) { + Isolate* isolate = GetIsolate(); + HandleScope scope(isolate); + Handle<String> name = isolate->factory()->Uint32ToString(index); + return GetPropertyAttributeWithHandler(receiver, *name); } @@ -2352,6 +2669,9 @@ void JSProxy::Fix() { HandleScope scope(isolate); Handle<JSProxy> self(this); + // Save identity hash. + MaybeObject* maybe_hash = GetIdentityHash(OMIT_CREATION); + if (IsJSFunctionProxy()) { isolate->factory()->BecomeJSFunction(self); // Code will be set on the JavaScript side. @@ -2359,9 +2679,42 @@ void JSProxy::Fix() { isolate->factory()->BecomeJSObject(self); } ASSERT(self->IsJSObject()); + + // Inherit identity, if it was present. + Object* hash; + if (maybe_hash->To<Object>(&hash) && hash->IsSmi()) { + Handle<JSObject> new_self(JSObject::cast(*self)); + isolate->factory()->SetIdentityHash(new_self, hash); + } } +MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name, + Handle<Object> derived, + int argc, + Handle<Object> argv[]) { + Isolate* isolate = GetIsolate(); + Handle<Object> handler(this->handler()); + + Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol(name); + Handle<Object> trap(v8::internal::GetProperty(handler, trap_name)); + if (isolate->has_pending_exception()) return trap; + + if (trap->IsUndefined()) { + if (derived.is_null()) { + Handle<Object> args[] = { handler, trap_name }; + Handle<Object> error = isolate->factory()->NewTypeError( + "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args))); + isolate->Throw(*error); + return Handle<Object>(); + } + trap = Handle<Object>(derived); + } + + bool threw; + return Execution::Call(trap, handler, argc, argv, &threw); +} + MaybeObject* JSObject::SetPropertyForResult(LookupResult* result, String* name, @@ -2386,48 +2739,46 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result, } // Check access rights if needed. - if (IsAccessCheckNeeded() - && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) { - return SetPropertyWithFailedAccessCheck(result, - name, - value, - true, - strict_mode); + if (IsAccessCheckNeeded()) { + if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) { + return SetPropertyWithFailedAccessCheck( + result, name, value, true, strict_mode); + } } if (IsJSGlobalProxy()) { Object* proto = GetPrototype(); if (proto->IsNull()) return value; ASSERT(proto->IsJSGlobalObject()); - return JSObject::cast(proto)->SetProperty( + return JSObject::cast(proto)->SetPropertyForResult( result, name, value, attributes, strict_mode); } if (!result->IsProperty() && !IsJSContextExtensionObject()) { - // We could not find a local property so let's check whether there is an - // accessor that wants to handle the property. - LookupResult accessor_result; - LookupCallbackSetterInPrototypes(name, &accessor_result); - if (accessor_result.IsProperty()) { - return SetPropertyWithCallback(accessor_result.GetCallbackObject(), - name, - value, - accessor_result.holder(), - strict_mode); - } + bool found = false; + MaybeObject* result_object; + result_object = SetPropertyWithCallbackSetterInPrototypes(name, + value, + attributes, + &found, + strict_mode); + if (found) return result_object; } + + // At this point, no GC should have happened, as this would invalidate + // 'result', which we cannot handlify! + if (!result->IsFound()) { // Neither properties nor transitions found. return AddProperty(name, value, attributes, strict_mode); } if (result->IsReadOnly() && result->IsProperty()) { if (strict_mode == kStrictMode) { - HandleScope scope(heap->isolate()); - Handle<String> key(name); - Handle<Object> holder(this); - Handle<Object> args[2] = { key, holder }; + Handle<JSObject> self(this); + Handle<String> hname(name); + Handle<Object> args[] = { hname, self }; return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError( - "strict_read_only_property", HandleVector(args, 2))); + "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)))); } else { return value; } @@ -2656,9 +3007,8 @@ PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver( String* key) { uint32_t index = 0; if (IsJSObject() && key->AsArrayIndex(&index)) { - if (JSObject::cast(this)->HasElementWithReceiver(receiver, index)) - return NONE; - return ABSENT; + return JSObject::cast(this)->HasElementWithReceiver(receiver, index) + ? NONE : ABSENT; } // Named property. LookupResult result; @@ -2688,10 +3038,8 @@ PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver, case CALLBACKS: return result->GetAttributes(); case HANDLER: { - // TODO(rossberg): propagate exceptions properly. - bool has_exception = false; - return JSProxy::cast(this)->GetPropertyAttributeWithHandler( - receiver, name, &has_exception); + return JSProxy::cast(result->proxy())->GetPropertyAttributeWithHandler( + receiver, name); } case INTERCEPTOR: return result->holder()->GetPropertyAttributeWithInterceptor( @@ -2857,7 +3205,7 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode, } } - Heap* current_heap = map_of_this->heap(); + Heap* current_heap = GetHeap(); // Copy the next enumeration index from instance descriptor. int index = map_of_this->instance_descriptors()->NextEnumerationIndex(); @@ -2879,6 +3227,10 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode, ASSERT(instance_size_delta >= 0); current_heap->CreateFillerObjectAt(this->address() + new_instance_size, instance_size_delta); + if (Marking::IsBlack(Marking::MarkBitFrom(this))) { + MemoryChunk::IncrementLiveBytes(this->address(), -instance_size_delta); + } + set_map(new_map); new_map->clear_instance_descriptors(); @@ -2912,13 +3264,14 @@ MaybeObject* JSObject::NormalizeElements() { FixedArrayBase* array = FixedArrayBase::cast(elements()); Map* old_map = array->map(); bool is_arguments = - (old_map == old_map->heap()->non_strict_arguments_elements_map()); + (old_map == old_map->GetHeap()->non_strict_arguments_elements_map()); if (is_arguments) { array = FixedArrayBase::cast(FixedArray::cast(array)->get(1)); } if (array->IsDictionary()) return array; ASSERT(HasFastElements() || + HasFastSmiOnlyElements() || HasFastDoubleElements() || HasFastArgumentsElements()); // Compute the effective length and allocate a new backing store. @@ -2953,7 +3306,8 @@ MaybeObject* JSObject::NormalizeElements() { if (!maybe_value_object->ToObject(&value)) return maybe_value_object; } } else { - ASSERT(old_map->has_fast_elements()); + ASSERT(old_map->has_fast_elements() || + old_map->has_fast_smi_only_elements()); value = FixedArray::cast(array)->get(i); } PropertyDetails details = PropertyDetails(NONE, NORMAL); @@ -2973,13 +3327,14 @@ MaybeObject* JSObject::NormalizeElements() { // Set the new map first to satify the elements type assert in // set_elements(). Object* new_map; - MaybeObject* maybe = map()->GetSlowElementsMap(); + MaybeObject* maybe = GetElementsTransitionMap(DICTIONARY_ELEMENTS); if (!maybe->ToObject(&new_map)) return maybe; set_map(Map::cast(new_map)); set_elements(dictionary); } - old_map->isolate()->counters()->elements_to_dictionary()->Increment(); + old_map->GetHeap()->isolate()->counters()->elements_to_dictionary()-> + Increment(); #ifdef DEBUG if (FLAG_trace_normalization) { @@ -2993,69 +3348,8 @@ MaybeObject* JSObject::NormalizeElements() { } -MaybeObject* JSObject::GetHiddenProperties(HiddenPropertiesFlag flag) { +Smi* JSReceiver::GenerateIdentityHash() { Isolate* isolate = GetIsolate(); - Heap* heap = isolate->heap(); - Object* holder = BypassGlobalProxy(); - if (holder->IsUndefined()) return heap->undefined_value(); - JSObject* obj = JSObject::cast(holder); - if (obj->HasFastProperties()) { - // If the object has fast properties, check whether the first slot - // in the descriptor array matches the hidden symbol. Since the - // hidden symbols hash code is zero (and no other string has hash - // code zero) it will always occupy the first entry if present. - DescriptorArray* descriptors = obj->map()->instance_descriptors(); - if ((descriptors->number_of_descriptors() > 0) && - (descriptors->GetKey(0) == heap->hidden_symbol()) && - descriptors->IsProperty(0)) { - ASSERT(descriptors->GetType(0) == FIELD); - return obj->FastPropertyAt(descriptors->GetFieldIndex(0)); - } - } - - // Only attempt to find the hidden properties in the local object and not - // in the prototype chain. - if (!obj->HasHiddenPropertiesObject()) { - // Hidden properties object not found. Allocate a new hidden properties - // object if requested. Otherwise return the undefined value. - if (flag == ALLOW_CREATION) { - Object* hidden_obj; - { MaybeObject* maybe_obj = heap->AllocateJSObject( - isolate->context()->global_context()->object_function()); - if (!maybe_obj->ToObject(&hidden_obj)) return maybe_obj; - } - return obj->SetHiddenPropertiesObject(hidden_obj); - } else { - return heap->undefined_value(); - } - } - return obj->GetHiddenPropertiesObject(); -} - - -MaybeObject* JSObject::GetIdentityHash(HiddenPropertiesFlag flag) { - Isolate* isolate = GetIsolate(); - Object* hidden_props_obj; - { MaybeObject* maybe_obj = GetHiddenProperties(flag); - if (!maybe_obj->ToObject(&hidden_props_obj)) return maybe_obj; - } - if (!hidden_props_obj->IsJSObject()) { - // We failed to create hidden properties. That's a detached - // global proxy. - ASSERT(hidden_props_obj->IsUndefined()); - return Smi::FromInt(0); - } - JSObject* hidden_props = JSObject::cast(hidden_props_obj); - String* hash_symbol = isolate->heap()->identity_hash_symbol(); - { - // Note that HasLocalProperty() can cause a GC in the general case in the - // presence of interceptors. - AssertNoAllocation no_alloc; - if (hidden_props->HasLocalProperty(hash_symbol)) { - MaybeObject* hash = hidden_props->GetProperty(hash_symbol); - return Smi::cast(hash->ToObjectChecked()); - } - } int hash_value; int attempts = 0; @@ -3067,17 +3361,209 @@ MaybeObject* JSObject::GetIdentityHash(HiddenPropertiesFlag flag) { } while (hash_value == 0 && attempts < 30); hash_value = hash_value != 0 ? hash_value : 1; // never return 0 - Smi* hash = Smi::FromInt(hash_value); - { MaybeObject* result = hidden_props->SetLocalPropertyIgnoreAttributes( - hash_symbol, - hash, - static_cast<PropertyAttributes>(None)); - if (result->IsFailure()) return result; + return Smi::FromInt(hash_value); +} + + +MaybeObject* JSObject::SetIdentityHash(Object* hash, CreationFlag flag) { + MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_symbol(), + hash); + if (maybe->IsFailure()) return maybe; + return this; +} + + +MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) { + Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_symbol()); + if (stored_value->IsSmi()) return stored_value; + + Smi* hash = GenerateIdentityHash(); + MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_symbol(), + hash); + if (result->IsFailure()) return result; + if (result->ToObjectUnchecked()->IsUndefined()) { + // Trying to get hash of detached proxy. + return Smi::FromInt(0); } return hash; } +MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) { + Object* hash = this->hash(); + if (!hash->IsSmi() && flag == ALLOW_CREATION) { + hash = GenerateIdentityHash(); + set_hash(hash); + } + return hash; +} + + +Object* JSObject::GetHiddenProperty(String* key) { + if (IsJSGlobalProxy()) { + // For a proxy, use the prototype as target object. + Object* proxy_parent = GetPrototype(); + // If the proxy is detached, return undefined. + if (proxy_parent->IsNull()) return GetHeap()->undefined_value(); + ASSERT(proxy_parent->IsJSGlobalObject()); + return JSObject::cast(proxy_parent)->GetHiddenProperty(key); + } + ASSERT(!IsJSGlobalProxy()); + MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false); + ASSERT(!hidden_lookup->IsFailure()); // No failure when passing false as arg. + if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) { + return GetHeap()->undefined_value(); + } + StringDictionary* dictionary = + StringDictionary::cast(hidden_lookup->ToObjectUnchecked()); + int entry = dictionary->FindEntry(key); + if (entry == StringDictionary::kNotFound) return GetHeap()->undefined_value(); + return dictionary->ValueAt(entry); +} + + +MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) { + if (IsJSGlobalProxy()) { + // For a proxy, use the prototype as target object. + Object* proxy_parent = GetPrototype(); + // If the proxy is detached, return undefined. + if (proxy_parent->IsNull()) return GetHeap()->undefined_value(); + ASSERT(proxy_parent->IsJSGlobalObject()); + return JSObject::cast(proxy_parent)->SetHiddenProperty(key, value); + } + ASSERT(!IsJSGlobalProxy()); + MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(true); + StringDictionary* dictionary; + if (!hidden_lookup->To<StringDictionary>(&dictionary)) return hidden_lookup; + + // If it was found, check if the key is already in the dictionary. + int entry = dictionary->FindEntry(key); + if (entry != StringDictionary::kNotFound) { + // If key was found, just update the value. + dictionary->ValueAtPut(entry, value); + return this; + } + // Key was not already in the dictionary, so add the entry. + MaybeObject* insert_result = dictionary->Add(key, + value, + PropertyDetails(NONE, NORMAL)); + StringDictionary* new_dict; + if (!insert_result->To<StringDictionary>(&new_dict)) return insert_result; + if (new_dict != dictionary) { + // If adding the key expanded the dictionary (i.e., Add returned a new + // dictionary), store it back to the object. + MaybeObject* store_result = SetHiddenPropertiesDictionary(new_dict); + if (store_result->IsFailure()) return store_result; + } + // Return this to mark success. + return this; +} + + +void JSObject::DeleteHiddenProperty(String* key) { + if (IsJSGlobalProxy()) { + // For a proxy, use the prototype as target object. + Object* proxy_parent = GetPrototype(); + // If the proxy is detached, return immediately. + if (proxy_parent->IsNull()) return; + ASSERT(proxy_parent->IsJSGlobalObject()); + JSObject::cast(proxy_parent)->DeleteHiddenProperty(key); + return; + } + MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false); + ASSERT(!hidden_lookup->IsFailure()); // No failure when passing false as arg. + if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) return; + StringDictionary* dictionary = + StringDictionary::cast(hidden_lookup->ToObjectUnchecked()); + int entry = dictionary->FindEntry(key); + if (entry == StringDictionary::kNotFound) { + // Key wasn't in dictionary. Deletion is a success. + return; + } + // Key was in the dictionary. Remove it. + dictionary->DeleteProperty(entry, JSReceiver::FORCE_DELETION); +} + + +bool JSObject::HasHiddenProperties() { + return GetPropertyAttributePostInterceptor(this, + GetHeap()->hidden_symbol(), + false) != ABSENT; +} + + +MaybeObject* JSObject::GetHiddenPropertiesDictionary(bool create_if_absent) { + ASSERT(!IsJSGlobalProxy()); + if (HasFastProperties()) { + // If the object has fast properties, check whether the first slot + // in the descriptor array matches the hidden symbol. Since the + // hidden symbols hash code is zero (and no other string has hash + // code zero) it will always occupy the first entry if present. + DescriptorArray* descriptors = this->map()->instance_descriptors(); + if ((descriptors->number_of_descriptors() > 0) && + (descriptors->GetKey(0) == GetHeap()->hidden_symbol()) && + descriptors->IsProperty(0)) { + ASSERT(descriptors->GetType(0) == FIELD); + Object* hidden_store = + this->FastPropertyAt(descriptors->GetFieldIndex(0)); + return StringDictionary::cast(hidden_store); + } + } else { + PropertyAttributes attributes; + // You can't install a getter on a property indexed by the hidden symbol, + // so we can be sure that GetLocalPropertyPostInterceptor returns a real + // object. + Object* lookup = + GetLocalPropertyPostInterceptor(this, + GetHeap()->hidden_symbol(), + &attributes)->ToObjectUnchecked(); + if (!lookup->IsUndefined()) { + return StringDictionary::cast(lookup); + } + } + if (!create_if_absent) return GetHeap()->undefined_value(); + const int kInitialSize = 5; + MaybeObject* dict_alloc = StringDictionary::Allocate(kInitialSize); + StringDictionary* dictionary; + if (!dict_alloc->To<StringDictionary>(&dictionary)) return dict_alloc; + MaybeObject* store_result = + SetPropertyPostInterceptor(GetHeap()->hidden_symbol(), + dictionary, + DONT_ENUM, + kNonStrictMode); + if (store_result->IsFailure()) return store_result; + return dictionary; +} + + +MaybeObject* JSObject::SetHiddenPropertiesDictionary( + StringDictionary* dictionary) { + ASSERT(!IsJSGlobalProxy()); + ASSERT(HasHiddenProperties()); + if (HasFastProperties()) { + // If the object has fast properties, check whether the first slot + // in the descriptor array matches the hidden symbol. Since the + // hidden symbols hash code is zero (and no other string has hash + // code zero) it will always occupy the first entry if present. + DescriptorArray* descriptors = this->map()->instance_descriptors(); + if ((descriptors->number_of_descriptors() > 0) && + (descriptors->GetKey(0) == GetHeap()->hidden_symbol()) && + descriptors->IsProperty(0)) { + ASSERT(descriptors->GetType(0) == FIELD); + this->FastPropertyAtPut(descriptors->GetFieldIndex(0), dictionary); + return this; + } + } + MaybeObject* store_result = + SetPropertyPostInterceptor(GetHeap()->hidden_symbol(), + dictionary, + DONT_ENUM, + kNonStrictMode); + if (store_result->IsFailure()) return store_result; + return this; +} + + MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name, DeleteMode mode) { // Check local property, ignore interceptor. @@ -3194,9 +3680,16 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) { MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) { if (IsJSProxy()) { return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode); - } else { - return JSObject::cast(this)->DeleteProperty(name, mode); } + return JSObject::cast(this)->DeleteProperty(name, mode); +} + + +MaybeObject* JSReceiver::DeleteElement(uint32_t index, DeleteMode mode) { + if (IsJSProxy()) { + return JSProxy::cast(this)->DeleteElementWithHandler(index, mode); + } + return JSObject::cast(this)->DeleteElement(index, mode); } @@ -3260,7 +3753,8 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) { bool JSObject::ReferencesObjectFromElements(FixedArray* elements, ElementsKind kind, Object* object) { - ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS); + ASSERT(kind == FAST_ELEMENTS || + kind == DICTIONARY_ELEMENTS); if (kind == FAST_ELEMENTS) { int length = IsJSArray() ? Smi::cast(JSArray::cast(this)->length())->value() @@ -3280,7 +3774,7 @@ bool JSObject::ReferencesObjectFromElements(FixedArray* elements, // Check whether this object references another object. bool JSObject::ReferencesObject(Object* obj) { Map* map_of_this = map(); - Heap* heap = map_of_this->heap(); + Heap* heap = GetHeap(); AssertNoAllocation no_alloc; // Is the object the constructor for this object? @@ -3315,6 +3809,8 @@ bool JSObject::ReferencesObject(Object* obj) { // Raw pixels and external arrays do not reference other // objects. break; + case FAST_SMI_ONLY_ELEMENTS: + break; case FAST_ELEMENTS: case DICTIONARY_ELEMENTS: { FixedArray* elements = FixedArray::cast(this->elements()); @@ -3502,15 +3998,6 @@ AccessorDescriptor* Map::FindAccessor(String* name) { void JSReceiver::LocalLookup(String* name, LookupResult* result) { - if (IsJSProxy()) { - result->HandlerResult(); - } else { - JSObject::cast(this)->LocalLookup(name, result); - } -} - - -void JSObject::LocalLookup(String* name, LookupResult* result) { ASSERT(name->IsString()); Heap* heap = GetHeap(); @@ -3519,28 +4006,36 @@ void JSObject::LocalLookup(String* name, LookupResult* result) { Object* proto = GetPrototype(); if (proto->IsNull()) return result->NotFound(); ASSERT(proto->IsJSGlobalObject()); - return JSObject::cast(proto)->LocalLookup(name, result); + return JSReceiver::cast(proto)->LocalLookup(name, result); + } + + if (IsJSProxy()) { + result->HandlerResult(JSProxy::cast(this)); + return; } // Do not use inline caching if the object is a non-global object // that requires access checks. - if (!IsJSGlobalProxy() && IsAccessCheckNeeded()) { + if (IsAccessCheckNeeded()) { result->DisallowCaching(); } + JSObject* js_object = JSObject::cast(this); + // Check __proto__ before interceptor. if (name->Equals(heap->Proto_symbol()) && !IsJSContextExtensionObject()) { - result->ConstantResult(this); + result->ConstantResult(js_object); return; } // Check for lookup interceptor except when bootstrapping. - if (HasNamedInterceptor() && !heap->isolate()->bootstrapper()->IsActive()) { - result->InterceptorResult(this); + if (js_object->HasNamedInterceptor() && + !heap->isolate()->bootstrapper()->IsActive()) { + result->InterceptorResult(js_object); return; } - LocalLookupRealNamedProperty(name, result); + js_object->LocalLookupRealNamedProperty(name, result); } @@ -3550,7 +4045,7 @@ void JSReceiver::Lookup(String* name, LookupResult* result) { for (Object* current = this; current != heap->null_value(); current = JSObject::cast(current)->GetPrototype()) { - JSObject::cast(current)->LocalLookup(name, result); + JSReceiver::cast(current)->LocalLookup(name, result); if (result->IsProperty()) return; } result->NotFound(); @@ -3561,7 +4056,7 @@ void JSReceiver::Lookup(String* name, LookupResult* result) { void JSObject::LookupCallback(String* name, LookupResult* result) { Heap* heap = GetHeap(); for (Object* current = this; - current != heap->null_value(); + current != heap->null_value() && current->IsJSObject(); current = JSObject::cast(current)->GetPrototype()) { JSObject::cast(current)->LocalLookupRealNamedProperty(name, result); if (result->IsProperty() && result->type() == CALLBACKS) return; @@ -3607,6 +4102,7 @@ MaybeObject* JSObject::DefineGetterSetter(String* name, if (is_element) { switch (GetElementsKind()) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: break; @@ -3793,7 +4289,7 @@ MaybeObject* JSObject::DefineAccessor(String* name, bool is_getter, Object* fun, PropertyAttributes attributes) { - ASSERT(fun->IsJSFunction() || fun->IsUndefined()); + ASSERT(fun->IsSpecFunction() || fun->IsUndefined()); Isolate* isolate = GetIsolate(); // Check access rights if needed. if (IsAccessCheckNeeded() && @@ -3856,6 +4352,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) { // Accessors overwrite previous callbacks (cf. with getters/setters). switch (GetElementsKind()) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: break; @@ -4079,7 +4576,7 @@ MaybeObject* Map::UpdateCodeCache(String* name, Code* code) { // Allocate the code cache if not present. if (code_cache()->IsFixedArray()) { Object* result; - { MaybeObject* maybe_result = code->heap()->AllocateCodeCache(); + { MaybeObject* maybe_result = GetHeap()->AllocateCodeCache(); if (!maybe_result->ToObject(&result)) return maybe_result; } set_code_cache(result); @@ -4121,7 +4618,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) { // Traverse the transition tree without using a stack. We do this by // reversing the pointers in the maps and descriptor arrays. Map* current = this; - Map* meta_map = heap()->meta_map(); + Map* meta_map = GetHeap()->meta_map(); Object** map_or_index_field = NULL; while (current != meta_map) { DescriptorArray* d = reinterpret_cast<DescriptorArray*>( @@ -4142,7 +4639,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) { // of the next map and recording the index in the transition array in // the map field of the array. Map* next = Map::cast(contents->get(i)); - next->set_map(current); + next->set_map_unsafe(current); *map_or_index_field = Smi::FromInt(i + 2); current = next; map_done = false; @@ -4167,23 +4664,23 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) { Object* perhaps_map = prototype_transitions->get(i); if (perhaps_map->IsMap()) { Map* next = Map::cast(perhaps_map); - next->set_map(current); + next->set_map_unsafe(current); *proto_map_or_index_field = Smi::FromInt(i + kProtoTransitionElementsPerEntry); current = next; continue; } } - *proto_map_or_index_field = heap()->fixed_array_map(); + *proto_map_or_index_field = GetHeap()->fixed_array_map(); if (map_or_index_field != NULL) { - *map_or_index_field = heap()->fixed_array_map(); + *map_or_index_field = GetHeap()->fixed_array_map(); } // The callback expects a map to have a real map as its map, so we save // the map field, which is being used to track the traversal and put the // correct map (the meta_map) in place while we do the callback. Map* prev = current->map(); - current->set_map(meta_map); + current->set_map_unsafe(meta_map); callback(current, data); current = prev; } @@ -4399,7 +4896,7 @@ class CodeCacheHashTableKey : public HashTableKey { MUST_USE_RESULT MaybeObject* AsObject() { ASSERT(code_ != NULL); Object* obj; - { MaybeObject* maybe_obj = code_->heap()->AllocateFixedArray(2); + { MaybeObject* maybe_obj = code_->GetHeap()->AllocateFixedArray(2); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } FixedArray* pair = FixedArray::cast(obj); @@ -5988,7 +6485,7 @@ bool String::MarkAsUndetectable() { if (StringShape(this).IsSymbol()) return false; Map* map = this->map(); - Heap* heap = map->heap(); + Heap* heap = GetHeap(); if (map == heap->string_map()) { this->set_map(heap->undetectable_string_map()); return true; @@ -6191,29 +6688,45 @@ void String::PrintOn(FILE* file) { } +void Map::CreateOneBackPointer(Map* target) { +#ifdef DEBUG + // Verify target. + Object* source_prototype = prototype(); + Object* target_prototype = target->prototype(); + ASSERT(source_prototype->IsJSReceiver() || + source_prototype->IsMap() || + source_prototype->IsNull()); + ASSERT(target_prototype->IsJSReceiver() || + target_prototype->IsNull()); + ASSERT(source_prototype->IsMap() || + source_prototype == target_prototype); +#endif + // Point target back to source. set_prototype() will not let us set + // the prototype to a map, as we do here. + *RawField(target, kPrototypeOffset) = this; +} + + void Map::CreateBackPointers() { DescriptorArray* descriptors = instance_descriptors(); for (int i = 0; i < descriptors->number_of_descriptors(); i++) { if (descriptors->GetType(i) == MAP_TRANSITION || descriptors->GetType(i) == ELEMENTS_TRANSITION || descriptors->GetType(i) == CONSTANT_TRANSITION) { - // Get target. - Map* target = Map::cast(descriptors->GetValue(i)); -#ifdef DEBUG - // Verify target. - Object* source_prototype = prototype(); - Object* target_prototype = target->prototype(); - ASSERT(source_prototype->IsJSObject() || - source_prototype->IsMap() || - source_prototype->IsNull()); - ASSERT(target_prototype->IsJSObject() || - target_prototype->IsNull()); - ASSERT(source_prototype->IsMap() || - source_prototype == target_prototype); -#endif - // Point target back to source. set_prototype() will not let us set - // the prototype to a map, as we do here. - *RawField(target, kPrototypeOffset) = this; + Object* object = reinterpret_cast<Object*>(descriptors->GetValue(i)); + if (object->IsMap()) { + CreateOneBackPointer(reinterpret_cast<Map*>(object)); + } else { + ASSERT(object->IsFixedArray()); + ASSERT(descriptors->GetType(i) == ELEMENTS_TRANSITION); + FixedArray* array = reinterpret_cast<FixedArray*>(object); + for (int i = 0; i < array->length(); ++i) { + Map* target = reinterpret_cast<Map*>(array->get(i)); + if (!target->IsUndefined()) { + CreateOneBackPointer(target); + } + } + } } } } @@ -6240,16 +6753,46 @@ void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) { if (details.type() == MAP_TRANSITION || details.type() == ELEMENTS_TRANSITION || details.type() == CONSTANT_TRANSITION) { - Map* target = reinterpret_cast<Map*>(contents->get(i)); - ASSERT(target->IsHeapObject()); - if (!target->IsMarked()) { - ASSERT(target->IsMap()); - contents->set_unchecked(i + 1, NullDescriptorDetails); - contents->set_null_unchecked(heap, i); - ASSERT(target->prototype() == this || - target->prototype() == real_prototype); - // Getter prototype() is read-only, set_prototype() has side effects. - *RawField(target, Map::kPrototypeOffset) = real_prototype; + Object* object = reinterpret_cast<Object*>(contents->get(i)); + if (object->IsMap()) { + Map* target = reinterpret_cast<Map*>(object); + ASSERT(target->IsHeapObject()); + MarkBit map_mark = Marking::MarkBitFrom(target); + if (!map_mark.Get()) { + ASSERT(target->IsMap()); + contents->set_unchecked(i + 1, NullDescriptorDetails); + contents->set_null_unchecked(heap, i); + ASSERT(target->prototype() == this || + target->prototype() == real_prototype); + // Getter prototype() is read-only, set_prototype() has side effects. + *RawField(target, Map::kPrototypeOffset) = real_prototype; + } + } else { + ASSERT(object->IsFixedArray()); + ASSERT(details.type() == ELEMENTS_TRANSITION); + FixedArray* array = reinterpret_cast<FixedArray*>(object); + bool reachable_map_found = false; + for (int j = 0; j < array->length(); ++j) { + Map* target = reinterpret_cast<Map*>(array->get(j)); + ASSERT(target->IsHeapObject()); + MarkBit map_mark = Marking::MarkBitFrom(target); + if (!map_mark.Get()) { + ASSERT(target->IsMap()); + array->set_undefined(j); + ASSERT(target->prototype() == this || + target->prototype() == real_prototype); + // Getter prototype() is read-only, set_prototype() has side + // effects. + *RawField(target, Map::kPrototypeOffset) = real_prototype; + } else if (target->IsMap()) { + reachable_map_found = true; + } + } + // If no map was found, make sure the FixedArray also gets collected. + if (!reachable_map_found) { + contents->set_unchecked(i + 1, NullDescriptorDetails); + contents->set_null_unchecked(heap, i); + } } } } @@ -6355,7 +6898,7 @@ MaybeObject* JSFunction::SetPrototype(Object* value) { if (!maybe_new_map->ToObject(&new_object)) return maybe_new_map; } Map* new_map = Map::cast(new_object); - Heap* heap = new_map->heap(); + Heap* heap = new_map->GetHeap(); set_map(new_map); new_map->set_constructor(value); new_map->set_non_instance_prototype(true); @@ -6386,7 +6929,7 @@ Object* JSFunction::RemovePrototype() { ASSERT(shared()->strict_mode() || map() == global_context->function_map()); set_map(no_prototype_map); - set_prototype_or_initial_map(no_prototype_map->heap()->the_hole_value()); + set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value()); return this; } @@ -6679,6 +7222,8 @@ bool SharedFunctionInfo::VerifyBailoutId(int id) { void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) { ASSERT(!IsInobjectSlackTrackingInProgress()); + if (!FLAG_clever_optimizations) return; + // Only initiate the tracking the first time. if (live_objects_may_exist()) return; set_live_objects_may_exist(true); @@ -6694,7 +7239,7 @@ void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) { set_construction_count(kGenerousAllocationCount); } set_initial_map(map); - Builtins* builtins = map->heap()->isolate()->builtins(); + Builtins* builtins = map->GetHeap()->isolate()->builtins(); ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric), construct_stub()); set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown)); @@ -6714,8 +7259,9 @@ void SharedFunctionInfo::DetachInitialMap() { // then StartInobjectTracking will be called again the next time the // constructor is called. The countdown will continue and (possibly after // several more GCs) CompleteInobjectSlackTracking will eventually be called. - set_initial_map(map->heap()->raw_unchecked_undefined_value()); - Builtins* builtins = map->heap()->isolate()->builtins(); + Heap* heap = map->GetHeap(); + set_initial_map(heap->raw_unchecked_undefined_value()); + Builtins* builtins = heap->isolate()->builtins(); ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown), *RawField(this, kConstructStubOffset)); set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric)); @@ -6731,7 +7277,7 @@ void SharedFunctionInfo::AttachInitialMap(Map* map) { // Resume inobject slack tracking. set_initial_map(map); - Builtins* builtins = map->heap()->isolate()->builtins(); + Builtins* builtins = map->GetHeap()->isolate()->builtins(); ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric), *RawField(this, kConstructStubOffset)); set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown)); @@ -6763,7 +7309,7 @@ void SharedFunctionInfo::CompleteInobjectSlackTracking() { ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress()); Map* map = Map::cast(initial_map()); - Heap* heap = map->heap(); + Heap* heap = map->GetHeap(); set_initial_map(heap->undefined_value()); Builtins* builtins = heap->isolate()->builtins(); ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown), @@ -6825,8 +7371,14 @@ void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) { } +void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) { + ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); + VisitPointer(rinfo->target_object_address()); +} + + void Code::InvalidateRelocation() { - set_relocation_info(heap()->empty_byte_array()); + set_relocation_info(GetHeap()->empty_byte_array()); } @@ -6860,7 +7412,7 @@ void Code::CopyFrom(const CodeDesc& desc) { Handle<Object> p = it.rinfo()->target_object_handle(origin); it.rinfo()->set_target_object(*p); } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { - Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle(); + Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle(); it.rinfo()->set_target_cell(*cell); } else if (RelocInfo::IsCodeTarget(mode)) { // rewrite code handles in inline cache targets to direct @@ -7263,8 +7815,10 @@ static void CopySlowElementsToFast(NumberDictionary* source, } -MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity, - int length) { +MaybeObject* JSObject::SetFastElementsCapacityAndLength( + int capacity, + int length, + SetFastElementsCapacityMode set_capacity_mode) { Heap* heap = GetHeap(); // We should never end in here with a pixel or external array. ASSERT(!HasExternalArrayElements()); @@ -7281,15 +7835,24 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity, Map* new_map = NULL; if (elements()->map() != heap->non_strict_arguments_elements_map()) { Object* object; - MaybeObject* maybe = map()->GetFastElementsMap(); + bool has_fast_smi_only_elements = + (set_capacity_mode == kAllowSmiOnlyElements) && + (elements()->map()->has_fast_smi_only_elements() || + elements() == heap->empty_fixed_array()); + ElementsKind elements_kind = has_fast_smi_only_elements + ? FAST_SMI_ONLY_ELEMENTS + : FAST_ELEMENTS; + MaybeObject* maybe = GetElementsTransitionMap(elements_kind); if (!maybe->ToObject(&object)) return maybe; new_map = Map::cast(object); } - switch (GetElementsKind()) { + ElementsKind elements_kind = GetElementsKind(); + switch (elements_kind) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: { AssertNoAllocation no_gc; - WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc); + WriteBarrierMode mode(new_elements->GetWriteBarrierMode(no_gc)); CopyFastElementsToFast(FixedArray::cast(elements()), new_elements, mode); set_map(new_map); set_elements(new_elements); @@ -7384,13 +7947,15 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength( } FixedDoubleArray* elems = FixedDoubleArray::cast(obj); - { MaybeObject* maybe_obj = map()->GetFastDoubleElementsMap(); + { MaybeObject* maybe_obj = + GetElementsTransitionMap(FAST_DOUBLE_ELEMENTS); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } Map* new_map = Map::cast(obj); AssertNoAllocation no_gc; switch (GetElementsKind()) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: { elems->Initialize(FixedArray::cast(elements())); break; @@ -7428,8 +7993,9 @@ MaybeObject* JSObject::SetSlowElements(Object* len) { uint32_t new_length = static_cast<uint32_t>(len->Number()); switch (GetElementsKind()) { - case FAST_ELEMENTS: { - case FAST_DOUBLE_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: + case FAST_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: { // Make sure we never try to shrink dense arrays into sparse arrays. ASSERT(static_cast<uint32_t>( FixedArrayBase::cast(elements())->length()) <= new_length); @@ -7495,7 +8061,7 @@ void JSArray::Expand(int required_size) { Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size); // Can't use this any more now because we may have had a GC! for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i)); - self->SetContent(*new_backing); + GetIsolate()->factory()->SetContent(self, new_backing); } @@ -7518,13 +8084,15 @@ MaybeObject* JSObject::SetElementsLength(Object* len) { if (value < 0) return ArrayLengthRangeError(GetHeap()); ElementsKind elements_kind = GetElementsKind(); switch (elements_kind) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: { int old_capacity = FixedArrayBase::cast(elements())->length(); if (value <= old_capacity) { if (IsJSArray()) { Object* obj; - if (elements_kind == FAST_ELEMENTS) { + if (elements_kind == FAST_ELEMENTS || + elements_kind == FAST_SMI_ONLY_ELEMENTS) { MaybeObject* maybe_obj = EnsureWritableFastElements(); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } @@ -7535,7 +8103,8 @@ MaybeObject* JSObject::SetElementsLength(Object* len) { } else { Address filler_start; int filler_size; - if (GetElementsKind() == FAST_ELEMENTS) { + if (elements_kind == FAST_ELEMENTS || + elements_kind == FAST_SMI_ONLY_ELEMENTS) { FixedArray* fast_elements = FixedArray::cast(elements()); fast_elements->set_length(value); filler_start = fast_elements->address() + @@ -7555,13 +8124,14 @@ MaybeObject* JSObject::SetElementsLength(Object* len) { } else { // Otherwise, fill the unused tail with holes. int old_length = FastD2I(JSArray::cast(this)->length()->Number()); - if (GetElementsKind() == FAST_ELEMENTS) { + if (elements_kind == FAST_ELEMENTS || + elements_kind == FAST_SMI_ONLY_ELEMENTS) { FixedArray* fast_elements = FixedArray::cast(elements()); for (int i = value; i < old_length; i++) { fast_elements->set_the_hole(i); } } else { - ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS); + ASSERT(elements_kind == FAST_DOUBLE_ELEMENTS); FixedDoubleArray* fast_double_elements = FixedDoubleArray::cast(elements()); for (int i = value; i < old_length; i++) { @@ -7577,10 +8147,17 @@ MaybeObject* JSObject::SetElementsLength(Object* len) { int new_capacity = value > min ? value : min; if (!ShouldConvertToSlowElements(new_capacity)) { MaybeObject* result; - if (GetElementsKind() == FAST_ELEMENTS) { - result = SetFastElementsCapacityAndLength(new_capacity, value); + if (elements_kind == FAST_ELEMENTS || + elements_kind == FAST_SMI_ONLY_ELEMENTS) { + SetFastElementsCapacityMode set_capacity_mode = + elements_kind == FAST_SMI_ONLY_ELEMENTS + ? kAllowSmiOnlyElements + : kDontAllowSmiOnlyElements; + result = SetFastElementsCapacityAndLength(new_capacity, + value, + set_capacity_mode); } else { - ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS); + ASSERT(elements_kind == FAST_DOUBLE_ELEMENTS); result = SetFastDoubleElementsCapacityAndLength(new_capacity, value); } @@ -7637,10 +8214,13 @@ MaybeObject* JSObject::SetElementsLength(Object* len) { // len is not a number so make the array size one and // set only element to len. Object* obj; - { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } + MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; FixedArray::cast(obj)->set(0, len); + + maybe_obj = EnsureCanContainElements(&len, 1); + if (maybe_obj->IsFailure()) return maybe_obj; + if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1)); set_elements(FixedArray::cast(obj)); return this; @@ -7686,7 +8266,7 @@ MaybeObject* Map::PutPrototypeTransition(Object* prototype, Map* map) { FixedArray* new_cache; // Grow array by factor 2 over and above what we need. { MaybeObject* maybe_cache = - heap()->AllocateFixedArray(transitions * 2 * step + header); + GetHeap()->AllocateFixedArray(transitions * 2 * step + header); if (!maybe_cache->To<FixedArray>(&new_cache)) return maybe_cache; } @@ -7739,7 +8319,7 @@ MaybeObject* JSReceiver::SetPrototype(Object* value, // It is sufficient to validate that the receiver is not in the new prototype // chain. for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) { - if (JSObject::cast(pt) == this) { + if (JSReceiver::cast(pt) == this) { // Cycle detected. HandleScope scope(heap->isolate()); return heap->isolate()->Throw( @@ -7754,8 +8334,8 @@ MaybeObject* JSReceiver::SetPrototype(Object* value, // hidden and set the new prototype on that object. Object* current_proto = real_receiver->GetPrototype(); while (current_proto->IsJSObject() && - JSObject::cast(current_proto)->map()->is_hidden_prototype()) { - real_receiver = JSObject::cast(current_proto); + JSReceiver::cast(current_proto)->map()->is_hidden_prototype()) { + real_receiver = JSReceiver::cast(current_proto); current_proto = current_proto->GetPrototype(); } } @@ -7788,8 +8368,16 @@ MaybeObject* JSReceiver::SetPrototype(Object* value, } +MaybeObject* JSObject::EnsureCanContainElements(Arguments* args, + uint32_t first_arg, + uint32_t arg_count) { + return EnsureCanContainElements(args->arguments() - first_arg, arg_count); +} + + bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) { switch (GetElementsKind()) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: { uint32_t length = IsJSArray() ? static_cast<uint32_t> @@ -7850,6 +8438,11 @@ bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) { Object* pt = GetPrototype(); if (pt->IsNull()) return false; + if (pt->IsJSProxy()) { + // We need to follow the spec and simulate a call to [[GetOwnProperty]]. + return JSProxy::cast(pt)->GetElementAttributeWithHandler( + receiver, index) != ABSENT; + } return JSObject::cast(pt)->HasElementWithReceiver(receiver, index); } @@ -7926,6 +8519,7 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) { } switch (GetElementsKind()) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: { uint32_t length = IsJSArray() ? static_cast<uint32_t> @@ -8040,6 +8634,7 @@ bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) { ElementsKind kind = GetElementsKind(); switch (kind) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: { uint32_t length = IsJSArray() ? static_cast<uint32_t> @@ -8106,6 +8701,11 @@ bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) { Object* pt = GetPrototype(); if (pt->IsNull()) return false; + if (pt->IsJSProxy()) { + // We need to follow the spec and simulate a call to [[GetOwnProperty]]. + return JSProxy::cast(pt)->GetElementAttributeWithHandler( + receiver, index) != ABSENT; + } return JSObject::cast(pt)->HasElementWithReceiver(receiver, index); } @@ -8182,9 +8782,9 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver, // __defineGetter__ callback if (structure->IsFixedArray()) { Object* getter = FixedArray::cast(structure)->get(kGetterIndex); - if (getter->IsJSFunction()) { - return Object::GetPropertyWithDefinedGetter(receiver, - JSFunction::cast(getter)); + if (getter->IsSpecFunction()) { + // TODO(rossberg): nicer would be to cast to some JSCallable here... + return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter)); } // Getter is not a function. return isolate->heap()->undefined_value(); @@ -8239,8 +8839,9 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure, if (structure->IsFixedArray()) { Handle<Object> setter(FixedArray::cast(structure)->get(kSetterIndex)); - if (setter->IsJSFunction()) { - return SetPropertyWithDefinedSetter(JSFunction::cast(*setter), value); + if (setter->IsSpecFunction()) { + // TODO(rossberg): nicer would be to cast to some JSCallable here... + return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value); } else { if (strict_mode == kNonStrictMode) { return value; @@ -8290,7 +8891,8 @@ MaybeObject* JSObject::SetFastElement(uint32_t index, Object* value, StrictModeFlag strict_mode, bool check_prototype) { - ASSERT(HasFastElements() || HasFastArgumentsElements()); + ASSERT(HasFastTypeElements() || + HasFastArgumentsElements()); FixedArray* backing_store = FixedArray::cast(elements()); if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) { @@ -8301,10 +8903,10 @@ MaybeObject* JSObject::SetFastElement(uint32_t index, if (!maybe->ToObject(&writable)) return maybe; backing_store = FixedArray::cast(writable); } - uint32_t length = static_cast<uint32_t>(backing_store->length()); + uint32_t capacity = static_cast<uint32_t>(backing_store->length()); if (check_prototype && - (index >= length || backing_store->get(index)->IsTheHole())) { + (index >= capacity || backing_store->get(index)->IsTheHole())) { bool found; MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index, value, @@ -8313,39 +8915,71 @@ MaybeObject* JSObject::SetFastElement(uint32_t index, if (found) return result; } - // Check whether there is extra space in fixed array. - if (index < length) { - backing_store->set(index, value); - if (IsJSArray()) { - // Update the length of the array if needed. - uint32_t array_length = 0; - CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length)); - if (index >= array_length) { - JSArray::cast(this)->set_length(Smi::FromInt(index + 1)); + uint32_t new_capacity = capacity; + // Check if the length property of this object needs to be updated. + uint32_t array_length = 0; + bool must_update_array_length = false; + if (IsJSArray()) { + CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length)); + if (index >= array_length) { + must_update_array_length = true; + array_length = index + 1; + } + } + // Check if the capacity of the backing store needs to be increased, or if + // a transition to slow elements is necessary. + if (index >= capacity) { + bool convert_to_slow = true; + if ((index - capacity) < kMaxGap) { + new_capacity = NewElementsCapacity(index + 1); + ASSERT(new_capacity > index); + if (!ShouldConvertToSlowElements(new_capacity)) { + convert_to_slow = false; } } + if (convert_to_slow) { + MaybeObject* result = NormalizeElements(); + if (result->IsFailure()) return result; + return SetDictionaryElement(index, value, strict_mode, check_prototype); + } + } + // Convert to fast double elements if appropriate. + if (HasFastSmiOnlyElements() && !value->IsSmi() && value->IsNumber()) { + MaybeObject* maybe = + SetFastDoubleElementsCapacityAndLength(new_capacity, array_length); + if (maybe->IsFailure()) return maybe; + FixedDoubleArray::cast(elements())->set(index, value->Number()); return value; } - - // Allow gap in fast case. - if ((index - length) < kMaxGap) { - // Try allocating extra space. - int new_capacity = NewElementsCapacity(index + 1); - if (!ShouldConvertToSlowElements(new_capacity)) { - ASSERT(static_cast<uint32_t>(new_capacity) > index); - Object* new_elements; - MaybeObject* maybe = - SetFastElementsCapacityAndLength(new_capacity, index + 1); - if (!maybe->ToObject(&new_elements)) return maybe; - FixedArray::cast(new_elements)->set(index, value); - return value; - } + // Change elements kind from SMI_ONLY to generic FAST if necessary. + if (HasFastSmiOnlyElements() && !value->IsSmi()) { + MaybeObject* maybe_new_map = GetElementsTransitionMap(FAST_ELEMENTS); + Map* new_map; + if (!maybe_new_map->To<Map>(&new_map)) return maybe_new_map; + set_map(new_map); } - - // Otherwise default to slow case. - MaybeObject* result = NormalizeElements(); - if (result->IsFailure()) return result; - return SetDictionaryElement(index, value, strict_mode, check_prototype); + // Increase backing store capacity if that's been decided previously. + if (new_capacity != capacity) { + Object* new_elements; + SetFastElementsCapacityMode set_capacity_mode = + value->IsSmi() && HasFastSmiOnlyElements() + ? kAllowSmiOnlyElements + : kDontAllowSmiOnlyElements; + MaybeObject* maybe = + SetFastElementsCapacityAndLength(new_capacity, + array_length, + set_capacity_mode); + if (!maybe->ToObject(&new_elements)) return maybe; + FixedArray::cast(new_elements)->set(index, value); + return value; + } + // Finally, set the new element and length. + ASSERT(elements()->IsFixedArray()); + backing_store->set(index, value); + if (must_update_array_length) { + JSArray::cast(this)->set_length(Smi::FromInt(array_length)); + } + return value; } @@ -8441,7 +9075,9 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index, } MaybeObject* result = CanConvertToFastDoubleElements() ? SetFastDoubleElementsCapacityAndLength(new_length, new_length) - : SetFastElementsCapacityAndLength(new_length, new_length); + : SetFastElementsCapacityAndLength(new_length, + new_length, + kDontAllowSmiOnlyElements); if (result->IsFailure()) return result; #ifdef DEBUG if (FLAG_trace_normalization) { @@ -8485,10 +9121,15 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement( if (IsJSArray()) { CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length)); } - MaybeObject* maybe_obj = - SetFastElementsCapacityAndLength(elms_length, length); + MaybeObject* maybe_obj = SetFastElementsCapacityAndLength( + elms_length, + length, + kDontAllowSmiOnlyElements); if (!maybe_obj->ToObject(&obj)) return maybe_obj; - return SetFastElement(index, value, strict_mode, check_prototype); + return SetFastElement(index, + value, + strict_mode, + check_prototype); } double double_value = value_is_smi @@ -8539,6 +9180,17 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement( } +MaybeObject* JSReceiver::SetElement(uint32_t index, + Object* value, + StrictModeFlag strict_mode, + bool check_proto) { + return IsJSProxy() + ? JSProxy::cast(this)->SetElementWithHandler(index, value, strict_mode) + : JSObject::cast(this)->SetElement(index, value, strict_mode, check_proto) + ; +} + + MaybeObject* JSObject::SetElement(uint32_t index, Object* value, StrictModeFlag strict_mode, @@ -8585,6 +9237,7 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index, bool check_prototype) { Isolate* isolate = GetIsolate(); switch (GetElementsKind()) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: return SetFastElement(index, value, strict_mode, check_prototype); case FAST_DOUBLE_ELEMENTS: @@ -8747,6 +9400,7 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) { break; } // Fall through. + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: backing_store = FixedArray::cast(backing_store_base); *capacity = backing_store->length(); @@ -9022,6 +9676,7 @@ bool JSObject::HasRealElementProperty(uint32_t index) { if (this->IsStringObjectWithCharacterAt(index)) return true; switch (GetElementsKind()) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: { uint32_t length = IsJSArray() ? static_cast<uint32_t>( @@ -9261,6 +9916,7 @@ int JSObject::GetLocalElementKeys(FixedArray* storage, PropertyAttributes filter) { int counter = 0; switch (GetElementsKind()) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: { int length = IsJSArray() ? Smi::cast(JSArray::cast(this)->length())->value() : @@ -9942,7 +10598,7 @@ template class HashTable<CompilationCacheShape, HashTableKey*>; template class HashTable<MapCacheShape, HashTableKey*>; -template class HashTable<ObjectHashTableShape, JSObject*>; +template class HashTable<ObjectHashTableShape, JSReceiver*>; template class Dictionary<StringDictionaryShape, String*>; @@ -10126,8 +10782,6 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) { // If the object is in dictionary mode, it is converted to fast elements // mode. MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) { - ASSERT(!HasExternalArrayElements()); - Heap* heap = GetHeap(); if (HasDictionaryElements()) { @@ -10141,7 +10795,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) { // Convert to fast elements. Object* obj; - { MaybeObject* maybe_obj = map()->GetFastElementsMap(); + { MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } Map* new_map = Map::cast(obj); @@ -10157,13 +10811,16 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) { set_map(new_map); set_elements(fast_elements); + } else if (HasExternalArrayElements()) { + // External arrays cannot have holes or undefined elements. + return Smi::FromInt(ExternalArray::cast(elements())->length()); } else if (!HasFastDoubleElements()) { Object* obj; { MaybeObject* maybe_obj = EnsureWritableFastElements(); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } } - ASSERT(HasFastElements() || HasFastDoubleElements()); + ASSERT(HasFastTypeElements() || HasFastDoubleElements()); // Collect holes at the end, undefined before that and the rest at the // start, and return the number of non-hole, non-undefined values. @@ -11287,9 +11944,9 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor( } -Object* ObjectHashTable::Lookup(JSObject* key) { +Object* ObjectHashTable::Lookup(JSReceiver* key) { // If the object does not have an identity hash, it was never used as a key. - MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION); + MaybeObject* maybe_hash = key->GetIdentityHash(OMIT_CREATION); if (maybe_hash->IsFailure()) return GetHeap()->undefined_value(); int entry = FindEntry(key); if (entry == kNotFound) return GetHeap()->undefined_value(); @@ -11297,10 +11954,10 @@ Object* ObjectHashTable::Lookup(JSObject* key) { } -MaybeObject* ObjectHashTable::Put(JSObject* key, Object* value) { +MaybeObject* ObjectHashTable::Put(JSReceiver* key, Object* value) { // Make sure the key object has an identity hash code. int hash; - { MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::ALLOW_CREATION); + { MaybeObject* maybe_hash = key->GetIdentityHash(ALLOW_CREATION); if (maybe_hash->IsFailure()) return maybe_hash; hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value(); } @@ -11330,7 +11987,7 @@ MaybeObject* ObjectHashTable::Put(JSObject* key, Object* value) { } -void ObjectHashTable::AddEntry(int entry, JSObject* key, Object* value) { +void ObjectHashTable::AddEntry(int entry, JSReceiver* key, Object* value) { set(EntryToIndex(entry), key); set(EntryToIndex(entry) + 1, value); ElementAdded(); @@ -11594,7 +12251,7 @@ int BreakPointInfo::GetBreakPointCount() { // Multiple break points. return FixedArray::cast(break_point_objects())->length(); } -#endif +#endif // ENABLE_DEBUGGER_SUPPORT } } // namespace v8::internal diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index d9c7a8227..b95fa574a 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -38,6 +38,7 @@ #elif V8_TARGET_ARCH_MIPS #include "mips/constants-mips.h" #endif +#include "v8checks.h" // // Most object types in the V8 JavaScript are described in this file. @@ -136,8 +137,13 @@ namespace v8 { namespace internal { enum ElementsKind { - // The "fast" kind for tagged values. Must be first to make it possible - // to efficiently check maps if they have fast elements. + // The "fast" kind for elements that only contain SMI values. Must be first + // to make it possible to efficiently check maps for this kind. + FAST_SMI_ONLY_ELEMENTS, + + // The "fast" kind for tagged values. Must be second to make it possible to + // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind + // together at once. FAST_ELEMENTS, // The "fast" kind for unwrapped, non-tagged double values. @@ -160,7 +166,7 @@ enum ElementsKind { // Derived constants from ElementsKind FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS, - FIRST_ELEMENTS_KIND = FAST_ELEMENTS, + FIRST_ELEMENTS_KIND = FAST_SMI_ONLY_ELEMENTS, LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS }; @@ -174,7 +180,6 @@ class PropertyDetails BASE_EMBEDDED { PropertyDetails(PropertyAttributes attributes, PropertyType type, int index = 0) { - ASSERT(type != ELEMENTS_TRANSITION); ASSERT(TypeField::is_valid(type)); ASSERT(AttributesField::is_valid(attributes)); ASSERT(StorageField::is_valid(index)); @@ -188,23 +193,6 @@ class PropertyDetails BASE_EMBEDDED { ASSERT(index == this->index()); } - PropertyDetails(PropertyAttributes attributes, - PropertyType type, - ElementsKind elements_kind) { - ASSERT(type == ELEMENTS_TRANSITION); - ASSERT(TypeField::is_valid(type)); - ASSERT(AttributesField::is_valid(attributes)); - ASSERT(StorageField::is_valid(static_cast<int>(elements_kind))); - - value_ = TypeField::encode(type) - | AttributesField::encode(attributes) - | StorageField::encode(static_cast<int>(elements_kind)); - - ASSERT(type == this->type()); - ASSERT(attributes == this->attributes()); - ASSERT(elements_kind == this->elements_kind()); - } - // Conversion for storing details as Object*. explicit inline PropertyDetails(Smi* smi); inline Smi* AsSmi(); @@ -226,11 +214,6 @@ class PropertyDetails BASE_EMBEDDED { int index() { return StorageField::decode(value_); } - ElementsKind elements_kind() { - ASSERT(type() == ELEMENTS_TRANSITION); - return static_cast<ElementsKind>(StorageField::decode(value_)); - } - inline PropertyDetails AsDeleted(); static bool IsValidIndex(int index) { @@ -276,6 +259,13 @@ enum NormalizedMapSharingMode { }; +// Indicates whether a get method should implicitly create the object looked up. +enum CreationFlag { + ALLOW_CREATION, + OMIT_CREATION +}; + + // Instance size sentinel for objects of variable size. static const int kVariableSizeSentinel = 0; @@ -329,6 +319,7 @@ static const int kVariableSizeSentinel = 0; V(HEAP_NUMBER_TYPE) \ V(FOREIGN_TYPE) \ V(BYTE_ARRAY_TYPE) \ + V(FREE_SPACE_TYPE) \ /* Note: the order of these external array */ \ /* types is relied upon in */ \ /* Object::IsExternalArray(). */ \ @@ -585,6 +576,7 @@ enum InstanceType { HEAP_NUMBER_TYPE, FOREIGN_TYPE, BYTE_ARRAY_TYPE, + FREE_SPACE_TYPE, EXTERNAL_BYTE_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE, EXTERNAL_SHORT_ARRAY_TYPE, @@ -621,24 +613,30 @@ enum InstanceType { JS_MESSAGE_OBJECT_TYPE, - JS_VALUE_TYPE, // FIRST_NON_CALLABLE_OBJECT_TYPE, FIRST_JS_RECEIVER_TYPE + // All the following types are subtypes of JSReceiver, which corresponds to + // objects in the JS sense. The first and the last type in this range are + // the two forms of function. This organization enables using the same + // compares for checking the JS_RECEIVER/SPEC_OBJECT range and the + // NONCALLABLE_JS_OBJECT range. + JS_FUNCTION_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE, FIRST_JS_PROXY_TYPE + JS_PROXY_TYPE, // LAST_JS_PROXY_TYPE + + JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE JS_OBJECT_TYPE, JS_CONTEXT_EXTENSION_OBJECT_TYPE, JS_GLOBAL_OBJECT_TYPE, JS_BUILTINS_OBJECT_TYPE, JS_GLOBAL_PROXY_TYPE, JS_ARRAY_TYPE, - JS_PROXY_TYPE, JS_WEAK_MAP_TYPE, - JS_REGEXP_TYPE, // LAST_NONCALLABLE_SPEC_OBJECT_TYPE + JS_REGEXP_TYPE, - JS_FUNCTION_TYPE, // FIRST_CALLABLE_SPEC_OBJECT_TYPE - JS_FUNCTION_PROXY_TYPE, // LAST_CALLABLE_SPEC_OBJECT_TYPE + JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE // Pseudo-types FIRST_TYPE = 0x0, - LAST_TYPE = JS_FUNCTION_PROXY_TYPE, + LAST_TYPE = JS_FUNCTION_TYPE, INVALID_TYPE = FIRST_TYPE - 1, FIRST_NONSTRING_TYPE = MAP_TYPE, // Boundaries for testing for an external array. @@ -651,17 +649,23 @@ enum InstanceType { // are not continuous in this enum! The enum ranges instead reflect the // external class names, where proxies are treated as either ordinary objects, // or functions. - FIRST_JS_RECEIVER_TYPE = JS_VALUE_TYPE, + FIRST_JS_RECEIVER_TYPE = JS_FUNCTION_PROXY_TYPE, LAST_JS_RECEIVER_TYPE = LAST_TYPE, + // Boundaries for testing the types represented as JSObject + FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE, + LAST_JS_OBJECT_TYPE = LAST_TYPE, + // Boundaries for testing the types represented as JSProxy + FIRST_JS_PROXY_TYPE = JS_FUNCTION_PROXY_TYPE, + LAST_JS_PROXY_TYPE = JS_PROXY_TYPE, + // Boundaries for testing whether the type is a JavaScript object. + FIRST_SPEC_OBJECT_TYPE = FIRST_JS_RECEIVER_TYPE, + LAST_SPEC_OBJECT_TYPE = LAST_JS_RECEIVER_TYPE, // Boundaries for testing the types for which typeof is "object". - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_VALUE_TYPE, + FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_PROXY_TYPE, LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE, - // Boundaries for testing the types for which typeof is "function". - FIRST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_TYPE, - LAST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_PROXY_TYPE, - // Boundaries for testing whether the type is a JavaScript object. - FIRST_SPEC_OBJECT_TYPE = FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, - LAST_SPEC_OBJECT_TYPE = LAST_CALLABLE_SPEC_OBJECT_TYPE + // Note that the types for which typeof is "function" are not continuous. + // Define this so that we can put assertions on discrete checks. + NUM_OF_CALLABLE_SPEC_OBJECT_TYPES = 2 }; static const int kExternalArrayTypeCount = LAST_EXTERNAL_ARRAY_TYPE - @@ -697,6 +701,7 @@ class ElementsAccessor; class FixedArrayBase; class ObjectVisitor; class StringStream; +class Failure; struct ValueInfo : public Malloced { ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { } @@ -710,7 +715,6 @@ struct ValueInfo : public Malloced { // A template-ized version of the IsXXX functions. template <class C> static inline bool Is(Object* obj); -class Failure; class MaybeObject BASE_EMBEDDED { public: @@ -748,7 +752,7 @@ class MaybeObject BASE_EMBEDDED { // Prints this object with details. inline void Print() { Print(stdout); - }; + } inline void PrintLn() { PrintLn(stdout); } @@ -791,6 +795,7 @@ class MaybeObject BASE_EMBEDDED { V(ExternalDoubleArray) \ V(ExternalPixelArray) \ V(ByteArray) \ + V(FreeSpace) \ V(JSReceiver) \ V(JSObject) \ V(JSContextExtensionObject) \ @@ -835,6 +840,9 @@ class MaybeObject BASE_EMBEDDED { V(AccessCheckNeeded) \ V(JSGlobalPropertyCell) \ + +class JSReceiver; + // Object is the abstract superclass for all classes in the // object hierarchy. // Object does not use any virtual functions to avoid the @@ -859,6 +867,7 @@ class Object : public MaybeObject { #undef DECLARE_STRUCT_PREDICATE INLINE(bool IsSpecObject()); + INLINE(bool IsSpecFunction()); // Oddball testing. INLINE(bool IsUndefined()); @@ -867,6 +876,10 @@ class Object : public MaybeObject { INLINE(bool IsTrue()); INLINE(bool IsFalse()); inline bool IsArgumentsMarker(); + inline bool NonFailureIsHeapObject(); + + // Filler objects (fillers and free space objects). + inline bool IsFiller(); // Extract the number. inline double Number(); @@ -903,15 +916,8 @@ class Object : public MaybeObject { LookupResult* result, String* key, PropertyAttributes* attributes); - MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver, - Object* structure, - String* name, - Object* holder); - MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(Object* receiver, - String* name, - Object* handler); MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver, - JSFunction* getter); + JSReceiver* getter); inline MaybeObject* GetElement(uint32_t index); // For use when we know that no exception can be thrown. @@ -1095,101 +1101,13 @@ class MapWord BASE_EMBEDDED { // View this map word as a forwarding address. inline HeapObject* ToForwardingAddress(); - // Marking phase of full collection: the map word of live objects is - // marked, and may be marked as overflowed (eg, the object is live, its - // children have not been visited, and it does not fit in the marking - // stack). - - // True if this map word's mark bit is set. - inline bool IsMarked(); - - // Return this map word but with its mark bit set. - inline void SetMark(); - - // Return this map word but with its mark bit cleared. - inline void ClearMark(); - - // True if this map word's overflow bit is set. - inline bool IsOverflowed(); - - // Return this map word but with its overflow bit set. - inline void SetOverflow(); - - // Return this map word but with its overflow bit cleared. - inline void ClearOverflow(); - - - // Compacting phase of a full compacting collection: the map word of live - // objects contains an encoding of the original map address along with the - // forwarding address (represented as an offset from the first live object - // in the same page as the (old) object address). - - // Create a map word from a map address and a forwarding address offset. - static inline MapWord EncodeAddress(Address map_address, int offset); - - // Return the map address encoded in this map word. - inline Address DecodeMapAddress(MapSpace* map_space); - - // Return the forwarding offset encoded in this map word. - inline int DecodeOffset(); - - - // During serialization: the map word is used to hold an encoded - // address, and possibly a mark bit (set and cleared with SetMark - // and ClearMark). - - // Create a map word from an encoded address. - static inline MapWord FromEncodedAddress(Address address); - - inline Address ToEncodedAddress(); - - // Bits used by the marking phase of the garbage collector. - // - // The first word of a heap object is normally a map pointer. The last two - // bits are tagged as '01' (kHeapObjectTag). We reuse the last two bits to - // mark an object as live and/or overflowed: - // last bit = 0, marked as alive - // second bit = 1, overflowed - // An object is only marked as overflowed when it is marked as live while - // the marking stack is overflowed. - static const int kMarkingBit = 0; // marking bit - static const int kMarkingMask = (1 << kMarkingBit); // marking mask - static const int kOverflowBit = 1; // overflow bit - static const int kOverflowMask = (1 << kOverflowBit); // overflow mask - - // Forwarding pointers and map pointer encoding. On 32 bit all the bits are - // used. - // +-----------------+------------------+-----------------+ - // |forwarding offset|page offset of map|page index of map| - // +-----------------+------------------+-----------------+ - // ^ ^ ^ - // | | | - // | | kMapPageIndexBits - // | kMapPageOffsetBits - // kForwardingOffsetBits - static const int kMapPageOffsetBits = kPageSizeBits - kMapAlignmentBits; - static const int kForwardingOffsetBits = kPageSizeBits - kObjectAlignmentBits; -#ifdef V8_HOST_ARCH_64_BIT - static const int kMapPageIndexBits = 16; -#else - // Use all the 32-bits to encode on a 32-bit platform. - static const int kMapPageIndexBits = - 32 - (kMapPageOffsetBits + kForwardingOffsetBits); -#endif - - static const int kMapPageIndexShift = 0; - static const int kMapPageOffsetShift = - kMapPageIndexShift + kMapPageIndexBits; - static const int kForwardingOffsetShift = - kMapPageOffsetShift + kMapPageOffsetBits; + static inline MapWord FromRawValue(uintptr_t value) { + return MapWord(value); + } - // Bit masks covering the different parts the encoding. - static const uintptr_t kMapPageIndexMask = - (1 << kMapPageOffsetShift) - 1; - static const uintptr_t kMapPageOffsetMask = - ((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask; - static const uintptr_t kForwardingOffsetMask = - ~(kMapPageIndexMask | kMapPageOffsetMask); + inline uintptr_t ToRawValue() { + return value_; + } private: // HeapObject calls the private constructor and directly reads the value. @@ -1209,6 +1127,7 @@ class HeapObject: public Object { // information. inline Map* map(); inline void set_map(Map* value); + inline void set_map_unsafe(Map* value); // During garbage collection, the map word of a heap object does not // necessarily contain a map pointer. @@ -1216,8 +1135,8 @@ class HeapObject: public Object { inline void set_map_word(MapWord map_word); // The Heap the object was allocated in. Used also to access Isolate. - // This method can not be used during GC, it ASSERTs this. inline Heap* GetHeap(); + // Convenience method to get current isolate. This method can be // accessed only when its result is the same as // Isolate::Current(), it ASSERTs this. See also comment for GetHeap. @@ -1246,31 +1165,6 @@ class HeapObject: public Object { // GC internal. inline int SizeFromMap(Map* map); - // Support for the marking heap objects during the marking phase of GC. - // True if the object is marked live. - inline bool IsMarked(); - - // Mutate this object's map pointer to indicate that the object is live. - inline void SetMark(); - - // Mutate this object's map pointer to remove the indication that the - // object is live (ie, partially restore the map pointer). - inline void ClearMark(); - - // True if this object is marked as overflowed. Overflowed objects have - // been reached and marked during marking of the heap, but their children - // have not necessarily been marked and they have not been pushed on the - // marking stack. - inline bool IsOverflowed(); - - // Mutate this object's map pointer to indicate that the object is - // overflowed. - inline void SetOverflow(); - - // Mutate this object's map pointer to remove the indication that the - // object is overflowed (ie, partially restore the map pointer). - inline void ClearOverflow(); - // Returns the field at offset in obj, as a read/write Object* reference. // Does no checking, and is safe to use during GC, while maps are invalid. // Does not invoke write barrier, so should only be assigned to @@ -1294,18 +1188,14 @@ class HeapObject: public Object { HeapObjectPrint(stdout); } void HeapObjectPrint(FILE* out); + void PrintHeader(FILE* out, const char* id); #endif + #ifdef DEBUG void HeapObjectVerify(); inline void VerifyObjectField(int offset); inline void VerifySmiField(int offset); -#endif - -#ifdef OBJECT_PRINT - void PrintHeader(FILE* out, const char* id); -#endif -#ifdef DEBUG // Verify a pointer is a valid HeapObject pointer that points to object // areas in the heap. static void VerifyHeapPointer(Object* p); @@ -1448,8 +1338,18 @@ class JSReceiver: public HeapObject { Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode); + MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter, + Object* value); MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode); + MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode); + + // Set the index'th array element. + // Can cause GC, or return failure if GC is required. + MUST_USE_RESULT MaybeObject* SetElement(uint32_t index, + Object* value, + StrictModeFlag strict_mode, + bool check_prototype); // Returns the class name ([[Class]] property in the specification). String* class_name(); @@ -1466,6 +1366,7 @@ class JSReceiver: public HeapObject { // Can cause a GC. inline bool HasProperty(String* name); inline bool HasLocalProperty(String* name); + inline bool HasElement(uint32_t index); // Return the object's prototype (might be Heap::null_value()). inline Object* GetPrototype(); @@ -1474,11 +1375,18 @@ class JSReceiver: public HeapObject { MUST_USE_RESULT MaybeObject* SetPrototype(Object* value, bool skip_hidden_prototypes); + // Retrieves a permanent object identity hash code. The undefined value might + // be returned in case no has been created yet and OMIT_CREATION was used. + inline MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag); + // Lookup a property. If found, the result is valid and has // detailed information. void LocalLookup(String* name, LookupResult* result); void Lookup(String* name, LookupResult* result); + protected: + Smi* GenerateIdentityHash(); + private: PropertyAttributes GetPropertyAttribute(JSReceiver* receiver, LookupResult* result, @@ -1525,8 +1433,14 @@ class JSObject: public JSReceiver { MUST_USE_RESULT inline MaybeObject* ResetElements(); inline ElementsKind GetElementsKind(); inline ElementsAccessor* GetElementsAccessor(); + inline bool HasFastSmiOnlyElements(); inline bool HasFastElements(); + // Returns if an object has either FAST_ELEMENT or FAST_SMI_ONLY_ELEMENT + // elements. TODO(danno): Rename HasFastTypeElements to HasFastElements() and + // HasFastElements to HasFastObjectElements. + inline bool HasFastTypeElements(); inline bool HasFastDoubleElements(); + inline bool HasNonStrictArgumentsElements(); inline bool HasDictionaryElements(); inline bool HasExternalPixelElements(); inline bool HasExternalArrayElements(); @@ -1554,6 +1468,11 @@ class JSObject: public JSReceiver { // a dictionary, and it will stay a dictionary. MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit); + MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver, + Object* structure, + String* name); + + // Can cause GC. MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result, String* key, Object* value, @@ -1571,8 +1490,6 @@ class JSObject: public JSReceiver { Object* value, JSObject* holder, StrictModeFlag strict_mode); - MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSFunction* setter, - Object* value); MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor( String* name, Object* value, @@ -1660,37 +1577,28 @@ class JSObject: public JSReceiver { // Accessors for hidden properties object. // // Hidden properties are not local properties of the object itself. - // Instead they are stored on an auxiliary JSObject stored as a local + // Instead they are stored in an auxiliary structure kept as a local // property with a special name Heap::hidden_symbol(). But if the // receiver is a JSGlobalProxy then the auxiliary object is a property - // of its prototype. - // - // Has/Get/SetHiddenPropertiesObject methods don't allow the holder to be - // a JSGlobalProxy. Use BypassGlobalProxy method above to get to the real - // holder. - // - // These accessors do not touch interceptors or accessors. - inline bool HasHiddenPropertiesObject(); - inline Object* GetHiddenPropertiesObject(); - MUST_USE_RESULT inline MaybeObject* SetHiddenPropertiesObject( - Object* hidden_obj); - - // Indicates whether the hidden properties object should be created. - enum HiddenPropertiesFlag { ALLOW_CREATION, OMIT_CREATION }; - - // Retrieves the hidden properties object. - // - // The undefined value might be returned in case no hidden properties object - // is present and creation was omitted. - inline bool HasHiddenProperties(); - MUST_USE_RESULT MaybeObject* GetHiddenProperties(HiddenPropertiesFlag flag); - - // Retrieves a permanent object identity hash code. - // - // The identity hash is stored as a hidden property. The undefined value might - // be returned in case no hidden properties object is present and creation was - // omitted. - MUST_USE_RESULT MaybeObject* GetIdentityHash(HiddenPropertiesFlag flag); + // of its prototype, and if it's a detached proxy, then you can't have + // hidden properties. + + // Sets a hidden property on this object. Returns this object if successful, + // undefined if called on a detached proxy, and a failure if a GC + // is required + MaybeObject* SetHiddenProperty(String* key, Object* value); + // Gets the value of a hidden property with the given key. Returns undefined + // if the property doesn't exist (or if called on a detached proxy), + // otherwise returns the value set for the key. + Object* GetHiddenProperty(String* key); + // Deletes a hidden property. Deleting a non-existing property is + // considered successful. + void DeleteHiddenProperty(String* key); + // Returns true if the object has a property with the hidden symbol as name. + bool HasHiddenProperties(); + + MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag); + MUST_USE_RESULT MaybeObject* SetIdentityHash(Object* hash, CreationFlag flag); MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode); MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode); @@ -1698,6 +1606,19 @@ class JSObject: public JSReceiver { // Tests for the fast common case for property enumeration. bool IsSimpleEnum(); + inline void ValidateSmiOnlyElements(); + + // Makes sure that this object can contain non-smi Object as elements. + inline MaybeObject* EnsureCanContainNonSmiElements(); + + // Makes sure that this object can contain the specified elements. + inline MaybeObject* EnsureCanContainElements(Object** elements, + uint32_t count); + inline MaybeObject* EnsureCanContainElements(FixedArray* elements); + MaybeObject* EnsureCanContainElements(Arguments* arguments, + uint32_t first_arg, + uint32_t arg_count); + // Do we want to keep the elements in fast case when increasing the // capacity? bool ShouldConvertToSlowElements(int new_capacity); @@ -1711,7 +1632,6 @@ class JSObject: public JSReceiver { bool CanConvertToFastDoubleElements(); // Tells whether the index'th element is present. - inline bool HasElement(uint32_t index); bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index); // Computes the new capacity when expanding the elements of a JSObject. @@ -1747,6 +1667,7 @@ class JSObject: public JSReceiver { Object* value, StrictModeFlag strict_mode, bool check_prototype); + MUST_USE_RESULT MaybeObject* SetDictionaryElement(uint32_t index, Object* value, StrictModeFlag strict_mode, @@ -1769,11 +1690,18 @@ class JSObject: public JSReceiver { // The undefined object if index is out of bounds. MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index); + enum SetFastElementsCapacityMode { + kAllowSmiOnlyElements, + kDontAllowSmiOnlyElements + }; + // Replace the elements' backing store with fast elements of the given // capacity. Update the length for JSArrays. Returns the new backing // store. - MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity, - int length); + MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength( + int capacity, + int length, + SetFastElementsCapacityMode set_capacity_mode); MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength( int capacity, int length); @@ -1801,10 +1729,6 @@ class JSObject: public JSReceiver { inline Object* GetInternalField(int index); inline void SetInternalField(int index, Object* value); - // Lookup a property. If found, the result is valid and has - // detailed information. - void LocalLookup(String* name, LookupResult* result); - // The following lookup functions skip interceptors. void LocalLookupRealNamedProperty(String* name, LookupResult* result); void LookupRealNamedProperty(String* name, LookupResult* result); @@ -1860,6 +1784,11 @@ class JSObject: public JSReceiver { Object* value, PropertyAttributes attributes); + // Returns a new map with all transitions dropped from the object's current + // map and the ElementsKind set. + MUST_USE_RESULT MaybeObject* GetElementsTransitionMap( + ElementsKind elements_kind); + // Converts a descriptor of any other type to a real field, // backed by the properties array. Descriptors of visible // types, such as CONSTANT_FUNCTION, keep their enumeration order. @@ -1925,11 +1854,14 @@ class JSObject: public JSReceiver { WriteBarrierMode mode = UPDATE_WRITE_BARRIER); - // initializes the body after properties slot, properties slot is - // initialized by set_properties - // Note: this call does not update write barrier, it is caller's - // reponsibility to ensure that *v* can be collected without WB here. - inline void InitializeBody(int object_size, Object* value); + // Initializes the body after properties slot, properties slot is + // initialized by set_properties. Fill the pre-allocated fields with + // pre_allocated_value and the rest with filler_value. + // Note: this call does not update write barrier, the caller is responsible + // to ensure that |filler_value| can be collected without WB here. + inline void InitializeBody(Map* map, + Object* pre_allocated_value, + Object* filler_value); // Check whether this object references another object bool ReferencesObject(Object* obj); @@ -2054,6 +1986,18 @@ class JSObject: public JSReceiver { StrictModeFlag strict_mode, bool check_prototype); + // Searches the prototype chain for a callback setter and sets the property + // with the setter if it finds one. The '*found' flag indicates whether + // a setter was found or not. + // This function can cause GC and can return a failure result with + // '*found==true'. + MUST_USE_RESULT MaybeObject* SetPropertyWithCallbackSetterInPrototypes( + String* name, + Object* value, + PropertyAttributes attributes, + bool* found, + StrictModeFlag strict_mode); + MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name, DeleteMode mode); MUST_USE_RESULT MaybeObject* DeletePropertyWithInterceptor(String* name); @@ -2092,6 +2036,15 @@ class JSObject: public JSReceiver { void LookupInDescriptor(String* name, LookupResult* result); + // Returns the hidden properties backing store object, currently + // a StringDictionary, stored on this object. + // If no hidden properties object has been put on this object, + // return undefined, unless create_if_absent is true, in which case + // a new dictionary is created, added to this object, and returned. + MaybeObject* GetHiddenPropertiesDictionary(bool create_if_absent); + // Updates the existing hidden properties dictionary. + MaybeObject* SetHiddenPropertiesDictionary(StringDictionary* dictionary); + DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject); }; @@ -2917,7 +2870,7 @@ class StringDictionary: public Dictionary<StringDictionaryShape, String*> { JSObject* obj, int unused_property_fields); - // Find entry for key otherwise return kNotFound. Optimzed version of + // Find entry for key, otherwise return kNotFound. Optimized version of // HashTable::FindEntry. int FindEntry(String* key); }; @@ -2980,10 +2933,10 @@ class NumberDictionary: public Dictionary<NumberDictionaryShape, uint32_t> { class ObjectHashTableShape { public: - static inline bool IsMatch(JSObject* key, Object* other); - static inline uint32_t Hash(JSObject* key); - static inline uint32_t HashForObject(JSObject* key, Object* object); - MUST_USE_RESULT static inline MaybeObject* AsObject(JSObject* key); + static inline bool IsMatch(JSReceiver* key, Object* other); + static inline uint32_t Hash(JSReceiver* key); + static inline uint32_t HashForObject(JSReceiver* key, Object* object); + MUST_USE_RESULT static inline MaybeObject* AsObject(JSReceiver* key); static const int kPrefixSize = 0; static const int kEntrySize = 2; }; @@ -2991,7 +2944,7 @@ class ObjectHashTableShape { // ObjectHashTable maps keys that are JavaScript objects to object values by // using the identity hash of the key for hashing purposes. -class ObjectHashTable: public HashTable<ObjectHashTableShape, JSObject*> { +class ObjectHashTable: public HashTable<ObjectHashTableShape, JSReceiver*> { public: static inline ObjectHashTable* cast(Object* obj) { ASSERT(obj->IsHashTable()); @@ -3000,16 +2953,16 @@ class ObjectHashTable: public HashTable<ObjectHashTableShape, JSObject*> { // Looks up the value associated with the given key. The undefined value is // returned in case the key is not present. - Object* Lookup(JSObject* key); + Object* Lookup(JSReceiver* key); // Adds (or overwrites) the value associated with the given key. Mapping a // key to the undefined value causes removal of the whole entry. - MUST_USE_RESULT MaybeObject* Put(JSObject* key, Object* value); + MUST_USE_RESULT MaybeObject* Put(JSReceiver* key, Object* value); private: friend class MarkCompactCollector; - void AddEntry(int entry, JSObject* key, Object* value); + void AddEntry(int entry, JSReceiver* key, Object* value); void RemoveEntry(int entry, Heap* heap); inline void RemoveEntry(int entry); @@ -3058,6 +3011,68 @@ class JSFunctionResultCache: public FixedArray { }; +// This object provides quick access to scope info details for runtime +// routines w/o the need to explicitly create a ScopeInfo object. +class SerializedScopeInfo : public FixedArray { + public : + static SerializedScopeInfo* cast(Object* object) { + ASSERT(object->IsSerializedScopeInfo()); + return reinterpret_cast<SerializedScopeInfo*>(object); + } + + // Does this scope call eval? + bool CallsEval(); + + // Is this scope a strict mode scope? + bool IsStrictMode(); + + // Return the number of stack slots for code. + int NumberOfStackSlots(); + + // Return the number of context slots for code. + int NumberOfContextSlots(); + + // Return if this has context slots besides MIN_CONTEXT_SLOTS; + bool HasHeapAllocatedLocals(); + + // Lookup support for serialized scope info. Returns the + // the stack slot index for a given slot name if the slot is + // present; otherwise returns a value < 0. The name must be a symbol + // (canonicalized). + int StackSlotIndex(String* name); + + // Lookup support for serialized scope info. Returns the + // context slot index for a given slot name if the slot is present; otherwise + // returns a value < 0. The name must be a symbol (canonicalized). + // If the slot is present and mode != NULL, sets *mode to the corresponding + // mode for that variable. + int ContextSlotIndex(String* name, VariableMode* mode); + + // Lookup support for serialized scope info. Returns the + // parameter index for a given parameter name if the parameter is present; + // otherwise returns a value < 0. The name must be a symbol (canonicalized). + int ParameterIndex(String* name); + + // Lookup support for serialized scope info. Returns the + // function context slot index if the function name is present (named + // function expressions, only), otherwise returns a value < 0. The name + // must be a symbol (canonicalized). + int FunctionContextSlotIndex(String* name); + + static Handle<SerializedScopeInfo> Create(Scope* scope); + + // Serializes empty scope info. + static SerializedScopeInfo* Empty(); + + private: + Object** ContextEntriesAddr(); + + Object** ParameterEntriesAddr(); + + Object** StackSlotEntriesAddr(); +}; + + // The cache for maps used by normalized (dictionary mode) objects. // Such maps do not have property descriptors, so a typical program // needs very limited number of distinct normalized maps. @@ -3079,11 +3094,12 @@ class NormalizedMapCache: public FixedArray { }; -// ByteArray represents fixed sized byte arrays. Used by the outside world, -// such as PCRE, and also by the memory allocator and garbage collector to -// fill in free blocks in the heap. +// ByteArray represents fixed sized byte arrays. Used for the relocation info +// that is attached to code objects. class ByteArray: public FixedArrayBase { public: + inline int Size() { return RoundUp(length() + kHeaderSize, kPointerSize); } + // Setter and getter. inline byte get(int index); inline void set(int index, byte value); @@ -3140,6 +3156,44 @@ class ByteArray: public FixedArrayBase { }; +// FreeSpace represents fixed sized areas of the heap that are not currently in +// use. Used by the heap and GC. +class FreeSpace: public HeapObject { + public: + // [size]: size of the free space including the header. + inline int size(); + inline void set_size(int value); + + inline int Size() { return size(); } + + // Casting. + static inline FreeSpace* cast(Object* obj); + +#ifdef OBJECT_PRINT + inline void FreeSpacePrint() { + FreeSpacePrint(stdout); + } + void FreeSpacePrint(FILE* out); +#endif +#ifdef DEBUG + void FreeSpaceVerify(); +#endif + + // Layout description. + // Size is smi tagged when it is stored. + static const int kSizeOffset = HeapObject::kHeaderSize; + static const int kHeaderSize = kSizeOffset + kPointerSize; + + static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize); + + // Maximal size of a single FreeSpace. + static const int kMaxSize = 512 * MB; + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace); +}; + + // An ExternalArray represents a fixed-size array of primitive values // which live outside the JavaScript heap. Its subclasses are used to // implement the CanvasArray types being defined in the WebGL @@ -3673,6 +3727,11 @@ class Code: public HeapObject { inline int major_key(); inline void set_major_key(int value); + // For stubs, tells whether they should always exist, so that they can be + // called from other stubs. + inline bool is_pregenerated(); + inline void set_is_pregenerated(bool value); + // [optimizable]: For FUNCTION kind, tells if it is optimizable. inline bool optimizable(); inline void set_optimizable(bool value); @@ -3732,6 +3791,11 @@ class Code: public HeapObject { inline byte to_boolean_state(); inline void set_to_boolean_state(byte value); + // For kind STUB, major_key == CallFunction, tells whether there is + // a function cache in the instruction stream. + inline bool has_function_cache(); + inline void set_has_function_cache(bool flag); + // Get the safepoint entry for the given pc. SafepointEntry GetSafepointEntry(Address pc); @@ -3836,10 +3900,6 @@ class Code: public HeapObject { void CodeVerify(); #endif - // Returns the isolate/heap this code object belongs to. - inline Isolate* isolate(); - inline Heap* heap(); - // Max loop nesting marker used to postpose OSR. We don't take loop // nesting that is deeper than 5 levels into account. static const int kMaxLoopNestingMarker = 6; @@ -3875,6 +3935,7 @@ class Code: public HeapObject { static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1; static const int kCompareStateOffset = kStubMajorKeyOffset + 1; static const int kToBooleanTypeOffset = kStubMajorKeyOffset + 1; + static const int kHasFunctionCacheOffset = kStubMajorKeyOffset + 1; static const int kFullCodeFlags = kOptimizableOffset + 1; class FullCodeFlagsHasDeoptimizationSupportField: @@ -3894,9 +3955,10 @@ class Code: public HeapObject { class KindField: public BitField<Kind, 7, 4> {}; class CacheHolderField: public BitField<InlineCacheHolderFlag, 11, 1> {}; class ExtraICStateField: public BitField<ExtraICState, 12, 2> {}; + class IsPregeneratedField: public BitField<bool, 14, 1> {}; // Signed field cannot be encoded using the BitField class. - static const int kArgumentsCountShift = 14; + static const int kArgumentsCountShift = 15; static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1); static const int kFlagsNotUsedInLookup = @@ -4032,8 +4094,12 @@ class Map: public HeapObject { (bit_field2() & kElementsKindMask) >> kElementsKindShift); } + // Tells whether the instance has fast elements that are only Smis. + inline bool has_fast_smi_only_elements() { + return elements_kind() == FAST_SMI_ONLY_ELEMENTS; + } + // Tells whether the instance has fast elements. - // Equivalent to instance->GetElementsKind() == FAST_ELEMENTS. inline bool has_fast_elements() { return elements_kind() == FAST_ELEMENTS; } @@ -4042,6 +4108,10 @@ class Map: public HeapObject { return elements_kind() == FAST_DOUBLE_ELEMENTS; } + inline bool has_non_strict_arguments_elements() { + return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS; + } + inline bool has_external_array_elements() { ElementsKind kind(elements_kind()); return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND && @@ -4100,6 +4170,7 @@ class Map: public HeapObject { // 1 + 2 * i: prototype // 2 + 2 * i: target map DECL_ACCESSORS(prototype_transitions, FixedArray) + inline FixedArray* unchecked_prototype_transitions(); static const int kProtoTransitionHeaderSize = 1; @@ -4109,14 +4180,14 @@ class Map: public HeapObject { static const int kProtoTransitionMapOffset = 1; inline int NumberOfProtoTransitions() { - FixedArray* cache = unchecked_prototype_transitions(); + FixedArray* cache = prototype_transitions(); if (cache->length() == 0) return 0; return Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value(); } inline void SetNumberOfProtoTransitions(int value) { - FixedArray* cache = unchecked_prototype_transitions(); + FixedArray* cache = prototype_transitions(); ASSERT(cache->length() != 0); cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset, Smi::FromInt(value)); @@ -4138,27 +4209,6 @@ class Map: public HeapObject { // instance descriptors. MUST_USE_RESULT MaybeObject* CopyDropTransitions(); - // Returns this map if it already has elements that are fast, otherwise - // returns a copy of the map, with all transitions dropped from the - // descriptors and the ElementsKind set to FAST_ELEMENTS. - MUST_USE_RESULT inline MaybeObject* GetFastElementsMap(); - - // Returns this map if it already has fast elements that are doubles, - // otherwise returns a copy of the map, with all transitions dropped from the - // descriptors and the ElementsKind set to FAST_DOUBLE_ELEMENTS. - MUST_USE_RESULT inline MaybeObject* GetFastDoubleElementsMap(); - - // Returns this map if already has dictionary elements, otherwise returns a - // copy of the map, with all transitions dropped from the descriptors and the - // ElementsKind set to DICTIONARY_ELEMENTS. - MUST_USE_RESULT inline MaybeObject* GetSlowElementsMap(); - - // Returns a new map with all transitions dropped from the descriptors and the - // ElementsKind set. - MUST_USE_RESULT MaybeObject* GetElementsTransitionMap( - ElementsKind elements_kind, - bool safe_to_add_transition); - // Returns the property index for name (only valid for FAST MODE). int PropertyIndexFor(String* name); @@ -4197,6 +4247,8 @@ class Map: public HeapObject { // This is undone in MarkCompactCollector::ClearNonLiveTransitions(). void CreateBackPointers(); + void CreateOneBackPointer(Map* transition_target); + // Set all map transitions from this map to dead maps to null. // Also, restore the original prototype on the targets of these // transitions, so that we do not process this map again while @@ -4218,6 +4270,24 @@ class Map: public HeapObject { return EquivalentToForNormalization(other, KEEP_INOBJECT_PROPERTIES); } + // Returns the contents of this map's descriptor array for the given string. + // May return NULL. |safe_to_add_transition| is set to false and NULL + // is returned if adding transitions is not allowed. + Object* GetDescriptorContents(String* sentinel_name, + bool* safe_to_add_transitions); + + // Returns the map that this map transitions to if its elements_kind + // is changed to |elements_kind|, or NULL if no such map is cached yet. + // |safe_to_add_transitions| is set to false if adding transitions is not + // allowed. + Map* LookupElementsTransitionMap(ElementsKind elements_kind, + bool* safe_to_add_transition); + + // Adds an entry to this map's descriptor array for a transition to + // |transitioned_map| when its elements_kind is changed to |elements_kind|. + MaybeObject* AddElementsTransition(ElementsKind elements_kind, + Map* transitioned_map); + // Dispatched behavior. #ifdef OBJECT_PRINT inline void MapPrint() { @@ -4233,10 +4303,6 @@ class Map: public HeapObject { inline int visitor_id(); inline void set_visitor_id(int visitor_id); - // Returns the isolate/heap this map belongs to. - inline Isolate* isolate(); - inline Heap* heap(); - typedef void (*TraverseCallback)(Map* map, void* data); void TraverseTransitionTree(TraverseCallback callback, void* data); @@ -4273,7 +4339,7 @@ class Map: public HeapObject { static const int kSize = MAP_POINTER_ALIGN(kPadStart); // Layout of pointer fields. Heap iteration code relies on them - // being continiously allocated. + // being continuously allocated. static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset; static const int kPointerFieldsEndOffset = Map::kPrototypeTransitionsOffset + kPointerSize; @@ -4313,7 +4379,7 @@ class Map: public HeapObject { static const int kStringWrapperSafeForDefaultValueOf = 2; static const int kAttachedToSharedFunctionInfo = 3; // No bits can be used after kElementsKindFirstBit, they are all reserved for - // storing ElementKind. for anything other than storing the ElementKind. + // storing ElementKind. static const int kElementsKindShift = 4; static const int kElementsKindBitCount = 4; @@ -4322,6 +4388,9 @@ class Map: public HeapObject { ((1 << (kElementsKindShift + kElementsKindBitCount)) - 1); static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>( (FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1; + static const int8_t kMaximumBitField2FastSmiOnlyElementValue = + static_cast<int8_t>((FAST_SMI_ONLY_ELEMENTS + 1) << + Map::kElementsKindShift) - 1; // Bit positions for bit field 3 static const int kIsShared = 0; @@ -4336,6 +4405,7 @@ class Map: public HeapObject { kSize> BodyDescriptor; private: + String* elements_transition_sentinel_name(); DISALLOW_IMPLICIT_CONSTRUCTORS(Map); }; @@ -5227,8 +5297,6 @@ class GlobalObject: public JSObject { static const int kHeaderSize = kGlobalReceiverOffset + kPointerSize; private: - friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs; - DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject); }; @@ -6226,6 +6294,9 @@ class SeqString: public String { // Casting. static inline SeqString* cast(Object* obj); + // Layout description. + static const int kHeaderSize = String::kSize; + private: DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString); }; @@ -6259,12 +6330,8 @@ class SeqAsciiString: public SeqString { return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize); } - // Layout description. - static const int kHeaderSize = String::kSize; - static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize); - // Maximal memory usage for a single sequential ASCII string. - static const int kMaxSize = 512 * MB; + static const int kMaxSize = 512 * MB - 1; // Maximal length of a single sequential ASCII string. // Q.v. String::kMaxLength which is the maximal size of concatenated strings. static const int kMaxLength = (kMaxSize - kHeaderSize); @@ -6313,12 +6380,8 @@ class SeqTwoByteString: public SeqString { return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize); } - // Layout description. - static const int kHeaderSize = String::kSize; - static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize); - // Maximal memory usage for a single sequential two-byte string. - static const int kMaxSize = 512 * MB; + static const int kMaxSize = 512 * MB - 1; // Maximal length of a single sequential two-byte string. // Q.v. String::kMaxLength which is the maximal size of concatenated strings. static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t); @@ -6480,8 +6543,8 @@ class ExternalAsciiString: public ExternalString { typedef v8::String::ExternalAsciiStringResource Resource; // The underlying resource. - inline Resource* resource(); - inline void set_resource(Resource* buffer); + inline const Resource* resource(); + inline void set_resource(const Resource* buffer); // Dispatched behavior. uint16_t ExternalAsciiStringGet(int index); @@ -6517,8 +6580,8 @@ class ExternalTwoByteString: public ExternalString { typedef v8::String::ExternalStringResource Resource; // The underlying string resource. - inline Resource* resource(); - inline void set_resource(Resource* buffer); + inline const Resource* resource(); + inline void set_resource(const Resource* buffer); // Dispatched behavior. uint16_t ExternalTwoByteStringGet(int index); @@ -6669,6 +6732,9 @@ class Oddball: public HeapObject { static const byte kUndefined = 5; static const byte kOther = 6; + // The ToNumber value of a hidden oddball is a negative smi. + static const int kLeastHiddenOddballNumber = -5; + typedef FixedBodyDescriptor<kToStringOffset, kToNumberOffset + kPointerSize, kSize> BodyDescriptor; @@ -6704,10 +6770,6 @@ class JSGlobalPropertyCell: public HeapObject { kValueOffset + kPointerSize, kSize> BodyDescriptor; - // Returns the isolate/heap this cell object belongs to. - inline Isolate* isolate(); - inline Heap* heap(); - private: DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell); }; @@ -6719,25 +6781,56 @@ class JSProxy: public JSReceiver { // [handler]: The handler property. DECL_ACCESSORS(handler, Object) + // [hash]: The hash code property (undefined if not initialized yet). + DECL_ACCESSORS(hash, Object) + // Casting. static inline JSProxy* cast(Object* obj); bool HasPropertyWithHandler(String* name); + bool HasElementWithHandler(uint32_t index); + + MUST_USE_RESULT MaybeObject* GetPropertyWithHandler( + Object* receiver, + String* name); + MUST_USE_RESULT MaybeObject* GetElementWithHandler( + Object* receiver, + uint32_t index); MUST_USE_RESULT MaybeObject* SetPropertyWithHandler( String* name, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode); + MUST_USE_RESULT MaybeObject* SetElementWithHandler( + uint32_t index, + Object* value, + StrictModeFlag strict_mode); + + // If the handler defines an accessor property, invoke its setter + // (or throw if only a getter exists) and set *found to true. Otherwise false. + MUST_USE_RESULT MaybeObject* SetPropertyWithHandlerIfDefiningSetter( + String* name, + Object* value, + PropertyAttributes attributes, + StrictModeFlag strict_mode, + bool* found); MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler( String* name, DeleteMode mode); + MUST_USE_RESULT MaybeObject* DeleteElementWithHandler( + uint32_t index, + DeleteMode mode); MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler( JSReceiver* receiver, - String* name, - bool* has_exception); + String* name); + MUST_USE_RESULT PropertyAttributes GetElementAttributeWithHandler( + JSReceiver* receiver, + uint32_t index); + + MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag); // Turn this into an (empty) JSObject. void Fix(); @@ -6745,6 +6838,13 @@ class JSProxy: public JSReceiver { // Initializes the body after the handler slot. inline void InitializeBody(int object_size, Object* value); + // Invoke a trap by name. If the trap does not exist on this's handler, + // but derived_trap is non-NULL, invoke that instead. May cause GC. + Handle<Object> CallTrap(const char* name, + Handle<Object> derived_trap, + int argc, + Handle<Object> args[]); + // Dispatched behavior. #ifdef OBJECT_PRINT inline void JSProxyPrint() { @@ -6760,7 +6860,8 @@ class JSProxy: public JSReceiver { // size as a virgin JSObject. This is essential for becoming a JSObject // upon freeze. static const int kHandlerOffset = HeapObject::kHeaderSize; - static const int kPaddingOffset = kHandlerOffset + kPointerSize; + static const int kHashOffset = kHandlerOffset + kPointerSize; + static const int kPaddingOffset = kHashOffset + kPointerSize; static const int kSize = JSObject::kHeaderSize; static const int kHeaderSize = kPaddingOffset; static const int kPaddingSize = kSize - kPaddingOffset; @@ -6768,7 +6869,7 @@ class JSProxy: public JSReceiver { STATIC_CHECK(kPaddingSize >= 0); typedef FixedBodyDescriptor<kHandlerOffset, - kHandlerOffset + kPointerSize, + kPaddingOffset, kSize> BodyDescriptor; private: @@ -6799,7 +6900,7 @@ class JSFunctionProxy: public JSProxy { #endif // Layout description. - static const int kCallTrapOffset = kHandlerOffset + kPointerSize; + static const int kCallTrapOffset = JSProxy::kPaddingOffset; static const int kConstructTrapOffset = kCallTrapOffset + kPointerSize; static const int kPaddingOffset = kConstructTrapOffset + kPointerSize; static const int kSize = JSFunction::kSize; @@ -6820,7 +6921,7 @@ class JSFunctionProxy: public JSProxy { class JSWeakMap: public JSObject { public: // [table]: the backing hash table mapping keys to values. - DECL_ACCESSORS(table, ObjectHashTable) + DECL_ACCESSORS(table, Object) // [next]: linked list of encountered weak maps during GC. DECL_ACCESSORS(next, Object) @@ -6913,7 +7014,7 @@ class JSArray: public JSObject { MUST_USE_RESULT MaybeObject* Initialize(int capacity); // Set the content of the array to the content of storage. - inline void SetContent(FixedArray* storage); + inline MaybeObject* SetContent(FixedArray* storage); // Casting. static inline JSArray* cast(Object* obj); @@ -7129,7 +7230,6 @@ class TemplateInfo: public Struct { static const int kPropertyListOffset = kTagOffset + kPointerSize; static const int kHeaderSize = kPropertyListOffset + kPointerSize; protected: - friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs; DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo); }; @@ -7433,6 +7533,9 @@ class ObjectVisitor BASE_EMBEDDED { // Handy shorthand for visiting a single pointer. virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); } + // Visit pointer embedded into a code object. + virtual void VisitEmbeddedPointer(RelocInfo* rinfo); + // Visits a contiguous arrays of external references (references to the C++ // heap) in the half-open range [start, end). Any or all of the values // may be modified on return. diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index e8d181061..fb94a1a60 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -587,7 +587,7 @@ Parser::Parser(Handle<Script> script, fni_(NULL), stack_overflow_(false), parenthesized_function_(false), - harmony_block_scoping_(false) { + harmony_scoping_(false) { AstNode::ResetIds(); } @@ -650,7 +650,7 @@ FunctionLiteral* Parser::DoParseProgram(Handle<String> source, CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok); } - if (ok && harmony_block_scoping_) { + if (ok && harmony_scoping_) { CheckConflictingVarDeclarations(scope, &ok); } @@ -817,9 +817,9 @@ void Parser::ReportMessageAt(Scanner::Location source_location, isolate()->Throw(*result, &location); } -void Parser::SetHarmonyBlockScoping(bool block_scoping) { - scanner().SetHarmonyBlockScoping(block_scoping); - harmony_block_scoping_ = block_scoping; +void Parser::SetHarmonyScoping(bool block_scoping) { + scanner().SetHarmonyScoping(block_scoping); + harmony_scoping_ = block_scoping; } // Base class containing common code for the different finder classes used by @@ -957,17 +957,18 @@ class InitializationBlockFinder : public ParserFinder { }; -// A ThisNamedPropertyAssigmentFinder finds and marks statements of the form +// A ThisNamedPropertyAssignmentFinder finds and marks statements of the form // this.x = ...;, where x is a named property. It also determines whether a // function contains only assignments of this type. -class ThisNamedPropertyAssigmentFinder : public ParserFinder { +class ThisNamedPropertyAssignmentFinder : public ParserFinder { public: - explicit ThisNamedPropertyAssigmentFinder(Isolate* isolate) + explicit ThisNamedPropertyAssignmentFinder(Isolate* isolate) : isolate_(isolate), only_simple_this_property_assignments_(true), - names_(NULL), - assigned_arguments_(NULL), - assigned_constants_(NULL) {} + names_(0), + assigned_arguments_(0), + assigned_constants_(0) { + } void Update(Scope* scope, Statement* stat) { // Bail out if function already has property assignment that are @@ -994,19 +995,17 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder { // Returns a fixed array containing three elements for each assignment of the // form this.x = y; Handle<FixedArray> GetThisPropertyAssignments() { - if (names_ == NULL) { + if (names_.is_empty()) { return isolate_->factory()->empty_fixed_array(); } - ASSERT(names_ != NULL); - ASSERT(assigned_arguments_ != NULL); - ASSERT_EQ(names_->length(), assigned_arguments_->length()); - ASSERT_EQ(names_->length(), assigned_constants_->length()); + ASSERT_EQ(names_.length(), assigned_arguments_.length()); + ASSERT_EQ(names_.length(), assigned_constants_.length()); Handle<FixedArray> assignments = - isolate_->factory()->NewFixedArray(names_->length() * 3); - for (int i = 0; i < names_->length(); i++) { - assignments->set(i * 3, *names_->at(i)); - assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_->at(i))); - assignments->set(i * 3 + 2, *assigned_constants_->at(i)); + isolate_->factory()->NewFixedArray(names_.length() * 3); + for (int i = 0; i < names_.length(); ++i) { + assignments->set(i * 3, *names_[i]); + assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_[i])); + assignments->set(i * 3 + 2, *assigned_constants_[i]); } return assignments; } @@ -1063,18 +1062,37 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder { AssignmentFromSomethingElse(); } + + + + // We will potentially reorder the property assignments, so they must be + // simple enough that the ordering does not matter. void AssignmentFromParameter(Handle<String> name, int index) { - EnsureAllocation(); - names_->Add(name); - assigned_arguments_->Add(index); - assigned_constants_->Add(isolate_->factory()->undefined_value()); + EnsureInitialized(); + for (int i = 0; i < names_.length(); ++i) { + if (name->Equals(*names_[i])) { + assigned_arguments_[i] = index; + assigned_constants_[i] = isolate_->factory()->undefined_value(); + return; + } + } + names_.Add(name); + assigned_arguments_.Add(index); + assigned_constants_.Add(isolate_->factory()->undefined_value()); } void AssignmentFromConstant(Handle<String> name, Handle<Object> value) { - EnsureAllocation(); - names_->Add(name); - assigned_arguments_->Add(-1); - assigned_constants_->Add(value); + EnsureInitialized(); + for (int i = 0; i < names_.length(); ++i) { + if (name->Equals(*names_[i])) { + assigned_arguments_[i] = -1; + assigned_constants_[i] = value; + return; + } + } + names_.Add(name); + assigned_arguments_.Add(-1); + assigned_constants_.Add(value); } void AssignmentFromSomethingElse() { @@ -1082,35 +1100,36 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder { only_simple_this_property_assignments_ = false; } - void EnsureAllocation() { - if (names_ == NULL) { - ASSERT(assigned_arguments_ == NULL); - ASSERT(assigned_constants_ == NULL); - Zone* zone = isolate_->zone(); - names_ = new(zone) ZoneStringList(4); - assigned_arguments_ = new(zone) ZoneList<int>(4); - assigned_constants_ = new(zone) ZoneObjectList(4); + void EnsureInitialized() { + if (names_.capacity() == 0) { + ASSERT(assigned_arguments_.capacity() == 0); + ASSERT(assigned_constants_.capacity() == 0); + names_.Initialize(4); + assigned_arguments_.Initialize(4); + assigned_constants_.Initialize(4); } } Isolate* isolate_; bool only_simple_this_property_assignments_; - ZoneStringList* names_; - ZoneList<int>* assigned_arguments_; - ZoneObjectList* assigned_constants_; + ZoneStringList names_; + ZoneList<int> assigned_arguments_; + ZoneObjectList assigned_constants_; }; Statement* Parser::ParseSourceElement(ZoneStringList* labels, bool* ok) { + // (Ecma 262 5th Edition, clause 14): + // SourceElement: + // Statement + // FunctionDeclaration + // + // In harmony mode we allow additionally the following productions + // SourceElement: + // LetDeclaration + if (peek() == Token::FUNCTION) { - // FunctionDeclaration is only allowed in the context of SourceElements - // (Ecma 262 5th Edition, clause 14): - // SourceElement: - // Statement - // FunctionDeclaration - // Common language extension is to allow function declaration in place - // of any statement. This language extension is disabled in strict mode. return ParseFunctionDeclaration(ok); } else if (peek() == Token::LET) { return ParseVariableStatement(kSourceElement, ok); @@ -1124,7 +1143,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor, int end_token, bool* ok) { // SourceElements :: - // (Statement)* <end_token> + // (SourceElement)* <end_token> // Allocate a target stack to use for this set of source // elements. This way, all scripts and functions get their own @@ -1134,7 +1153,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor, ASSERT(processor != NULL); InitializationBlockFinder block_finder(top_scope_, target_stack_); - ThisNamedPropertyAssigmentFinder this_property_assignment_finder(isolate()); + ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate()); bool directive_prologue = true; // Parsing directive prologue. while (peek() != end_token) { @@ -1295,8 +1314,13 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) { } case Token::FUNCTION: { - // In strict mode, FunctionDeclaration is only allowed in the context - // of SourceElements. + // FunctionDeclaration is only allowed in the context of SourceElements + // (Ecma 262 5th Edition, clause 14): + // SourceElement: + // Statement + // FunctionDeclaration + // Common language extension is to allow function declaration in place + // of any statement. This language extension is disabled in strict mode. if (top_scope_->is_strict_mode()) { ReportMessageAt(scanner().peek_location(), "strict_function", Vector<const char*>::empty()); @@ -1321,7 +1345,7 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) { VariableProxy* Parser::Declare(Handle<String> name, - Variable::Mode mode, + VariableMode mode, FunctionLiteral* fun, bool resolve, bool* ok) { @@ -1339,7 +1363,7 @@ VariableProxy* Parser::Declare(Handle<String> name, // Similarly, strict mode eval scope does not leak variable declarations to // the caller's scope so we declare all locals, too. - Scope* declaration_scope = mode == Variable::LET ? top_scope_ + Scope* declaration_scope = mode == LET ? top_scope_ : top_scope_->DeclarationScope(); if (declaration_scope->is_function_scope() || declaration_scope->is_strict_mode_eval_scope() || @@ -1361,12 +1385,12 @@ VariableProxy* Parser::Declare(Handle<String> name, // // because the var declaration is hoisted to the function scope where 'x' // is already bound. - if ((mode != Variable::VAR) || (var->mode() != Variable::VAR)) { + if ((mode != VAR) || (var->mode() != VAR)) { // We only have vars, consts and lets in declarations. - ASSERT(var->mode() == Variable::VAR || - var->mode() == Variable::CONST || - var->mode() == Variable::LET); - if (harmony_block_scoping_) { + ASSERT(var->mode() == VAR || + var->mode() == CONST || + var->mode() == LET); + if (harmony_scoping_) { // In harmony mode we treat re-declarations as early errors. See // ES5 16 for a definition of early errors. SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS); @@ -1376,8 +1400,8 @@ VariableProxy* Parser::Declare(Handle<String> name, *ok = false; return NULL; } - const char* type = (var->mode() == Variable::VAR) ? "var" : - (var->mode() == Variable::CONST) ? "const" : "let"; + const char* type = (var->mode() == VAR) ? "var" : + (var->mode() == CONST) ? "const" : "let"; Handle<String> type_string = isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED); Expression* expression = @@ -1410,14 +1434,10 @@ VariableProxy* Parser::Declare(Handle<String> name, new(zone()) Declaration(proxy, mode, fun, top_scope_)); // For global const variables we bind the proxy to a variable. - if (mode == Variable::CONST && declaration_scope->is_global_scope()) { + if (mode == CONST && declaration_scope->is_global_scope()) { ASSERT(resolve); // should be set by all callers Variable::Kind kind = Variable::NORMAL; - var = new(zone()) Variable(declaration_scope, - name, - Variable::CONST, - true, - kind); + var = new(zone()) Variable(declaration_scope, name, CONST, true, kind); } // If requested and we have a local variable, bind the proxy to the variable @@ -1500,7 +1520,7 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) { // other functions are setup when entering the surrounding scope. SharedFunctionInfoLiteral* lit = new(zone()) SharedFunctionInfoLiteral(isolate(), shared); - VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK); + VariableProxy* var = Declare(name, VAR, NULL, true, CHECK_OK); return new(zone()) ExpressionStatement(new(zone()) Assignment( isolate(), Token::INIT_VAR, var, lit, RelocInfo::kNoPosition)); } @@ -1522,14 +1542,14 @@ Statement* Parser::ParseFunctionDeclaration(bool* ok) { // Even if we're not at the top-level of the global or a function // scope, we treat is as such and introduce the function with it's // initial value upon entering the corresponding scope. - Variable::Mode mode = harmony_block_scoping_ ? Variable::LET : Variable::VAR; + VariableMode mode = harmony_scoping_ ? LET : VAR; Declare(name, mode, fun, true, CHECK_OK); return EmptyStatement(); } Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) { - if (harmony_block_scoping_) return ParseScopedBlock(labels, ok); + if (harmony_scoping_) return ParseScopedBlock(labels, ok); // Block :: // '{' Statement* '}' @@ -1555,6 +1575,11 @@ Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) { Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) { + // The harmony mode uses source elements instead of statements. + // + // Block :: + // '{' SourceElement* '}' + // Construct block expecting 16 statements. Block* body = new(zone()) Block(isolate(), labels, 16, false); Scope* saved_scope = top_scope_; @@ -1622,7 +1647,7 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context, // VariableDeclarations :: // ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[','] - Variable::Mode mode = Variable::VAR; + VariableMode mode = VAR; // True if the binding needs initialization. 'let' and 'const' declared // bindings are created uninitialized by their declaration nodes and // need initialization. 'var' declared bindings are always initialized @@ -1639,7 +1664,7 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context, *ok = false; return NULL; } - mode = Variable::CONST; + mode = CONST; is_const = true; needs_init = true; init_op = Token::INIT_CONST; @@ -1652,14 +1677,14 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context, *ok = false; return NULL; } - mode = Variable::LET; + mode = LET; needs_init = true; init_op = Token::INIT_LET; } else { UNREACHABLE(); // by current callers } - Scope* declaration_scope = mode == Variable::LET + Scope* declaration_scope = (mode == LET) ? top_scope_ : top_scope_->DeclarationScope(); // The scope of a var/const declared variable anywhere inside a function // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can @@ -1753,6 +1778,8 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context, value->AsCall() == NULL && value->AsCallNew() == NULL) { fni_->Infer(); + } else { + fni_->RemoveLastFunction(); } } @@ -1848,7 +1875,7 @@ Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context, // as the declaration. Thus dynamic lookups are unnecessary even if the // block scope is inside a with. if (value != NULL) { - bool in_with = mode == Variable::VAR ? inside_with() : false; + bool in_with = (mode == VAR) ? inside_with() : false; VariableProxy* proxy = initialization_scope->NewUnresolved(name, in_with); Assignment* assignment = @@ -2222,8 +2249,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) { if (top_scope_->is_strict_mode()) { catch_scope->EnableStrictMode(); } - Variable::Mode mode = harmony_block_scoping_ - ? Variable::LET : Variable::VAR; + VariableMode mode = harmony_scoping_ ? LET : VAR; catch_variable = catch_scope->DeclareLocal(name, mode); Scope* saved_scope = top_scope_; @@ -2503,6 +2529,8 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) { || op == Token::ASSIGN) && (right->AsCall() == NULL && right->AsCallNew() == NULL)) { fni_->Infer(); + } else { + fni_->RemoveLastFunction(); } fni_->Leave(); } @@ -2614,7 +2642,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) { case Token::NE_STRICT: cmp = Token::EQ_STRICT; break; default: break; } - x = NewCompareNode(cmp, x, y, position); + x = new(zone()) CompareOperation(isolate(), cmp, x, y, position); if (cmp != op) { // The comparison was negated - add a NOT. x = new(zone()) UnaryOperation(isolate(), Token::NOT, x, position); @@ -2630,27 +2658,6 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) { } -Expression* Parser::NewCompareNode(Token::Value op, - Expression* x, - Expression* y, - int position) { - ASSERT(op != Token::NE && op != Token::NE_STRICT); - if (op == Token::EQ || op == Token::EQ_STRICT) { - bool is_strict = (op == Token::EQ_STRICT); - Literal* x_literal = x->AsLiteral(); - if (x_literal != NULL && x_literal->IsNull()) { - return new(zone()) CompareToNull(isolate(), is_strict, y); - } - - Literal* y_literal = y->AsLiteral(); - if (y_literal != NULL && y_literal->IsNull()) { - return new(zone()) CompareToNull(isolate(), is_strict, x); - } - } - return new(zone()) CompareOperation(isolate(), op, x, y, position); -} - - Expression* Parser::ParseUnaryExpression(bool* ok) { // UnaryExpression :: // PostfixExpression @@ -3707,8 +3714,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name, // Function declarations are function scoped in normal mode, so they are // hoisted. In harmony block scoping mode they are block scoped, so they // are not hoisted. - Scope* scope = (type == FunctionLiteral::DECLARATION && - !harmony_block_scoping_) + Scope* scope = (type == FunctionLiteral::DECLARATION && !harmony_scoping_) ? NewScope(top_scope_->DeclarationScope(), Scope::FUNCTION_SCOPE, false) : NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with()); ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8); @@ -3750,10 +3756,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name, reserved_loc = scanner().location(); } - top_scope_->DeclareParameter(param_name, - harmony_block_scoping_ - ? Variable::LET - : Variable::VAR); + top_scope_->DeclareParameter(param_name, harmony_scoping_ ? LET : VAR); num_parameters++; if (num_parameters > kMaxNumFunctionParameters) { ReportMessageAt(scanner().location(), "too_many_parameters", @@ -3880,7 +3883,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name, } } - if (harmony_block_scoping_) { + if (harmony_scoping_) { CheckConflictingVarDeclarations(scope, CHECK_OK); } @@ -5118,10 +5121,10 @@ int ScriptDataImpl::ReadNumber(byte** source) { static ScriptDataImpl* DoPreParse(UC16CharacterStream* source, bool allow_lazy, ParserRecorder* recorder, - bool harmony_block_scoping) { + bool harmony_scoping) { Isolate* isolate = Isolate::Current(); JavaScriptScanner scanner(isolate->unicode_cache()); - scanner.SetHarmonyBlockScoping(harmony_block_scoping); + scanner.SetHarmonyScoping(harmony_scoping); scanner.Initialize(source); intptr_t stack_limit = isolate->stack_guard()->real_climit(); if (!preparser::PreParser::PreParseProgram(&scanner, @@ -5143,7 +5146,7 @@ static ScriptDataImpl* DoPreParse(UC16CharacterStream* source, // even if the preparser data is only used once. ScriptDataImpl* ParserApi::PartialPreParse(UC16CharacterStream* source, v8::Extension* extension, - bool harmony_block_scoping) { + bool harmony_scoping) { bool allow_lazy = FLAG_lazy && (extension == NULL); if (!allow_lazy) { // Partial preparsing is only about lazily compiled functions. @@ -5151,17 +5154,17 @@ ScriptDataImpl* ParserApi::PartialPreParse(UC16CharacterStream* source, return NULL; } PartialParserRecorder recorder; - return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping); + return DoPreParse(source, allow_lazy, &recorder, harmony_scoping); } ScriptDataImpl* ParserApi::PreParse(UC16CharacterStream* source, v8::Extension* extension, - bool harmony_block_scoping) { + bool harmony_scoping) { Handle<Script> no_script; bool allow_lazy = FLAG_lazy && (extension == NULL); CompleteParserRecorder recorder; - return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping); + return DoPreParse(source, allow_lazy, &recorder, harmony_scoping); } @@ -5191,11 +5194,10 @@ bool ParserApi::Parse(CompilationInfo* info) { ASSERT(info->function() == NULL); FunctionLiteral* result = NULL; Handle<Script> script = info->script(); - bool harmony_block_scoping = !info->is_native() && - FLAG_harmony_block_scoping; + bool harmony_scoping = !info->is_native() && FLAG_harmony_scoping; if (info->is_lazy()) { Parser parser(script, true, NULL, NULL); - parser.SetHarmonyBlockScoping(harmony_block_scoping); + parser.SetHarmonyScoping(harmony_scoping); result = parser.ParseLazy(info); } else { // Whether we allow %identifier(..) syntax. @@ -5206,7 +5208,7 @@ bool ParserApi::Parse(CompilationInfo* info) { allow_natives_syntax, info->extension(), pre_data); - parser.SetHarmonyBlockScoping(harmony_block_scoping); + parser.SetHarmonyScoping(harmony_scoping); if (pre_data != NULL && pre_data->has_error()) { Scanner::Location loc = pre_data->MessageLocation(); const char* message = pre_data->BuildMessage(); diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h index 3312f2f56..359bb3848 100644 --- a/deps/v8/src/parser.h +++ b/deps/v8/src/parser.h @@ -164,13 +164,13 @@ class ParserApi { // Generic preparser generating full preparse data. static ScriptDataImpl* PreParse(UC16CharacterStream* source, v8::Extension* extension, - bool harmony_block_scoping); + bool harmony_scoping); // Preparser that only does preprocessing that makes sense if only used // immediately after. static ScriptDataImpl* PartialPreParse(UC16CharacterStream* source, v8::Extension* extension, - bool harmony_block_scoping); + bool harmony_scoping); }; // ---------------------------------------------------------------------------- @@ -436,7 +436,7 @@ class Parser { void ReportMessageAt(Scanner::Location loc, const char* message, Vector<Handle<String> > args); - void SetHarmonyBlockScoping(bool block_scoping); + void SetHarmonyScoping(bool block_scoping); private: // Limit on number of function parameters is chosen arbitrarily. @@ -533,11 +533,6 @@ class Parser { ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok); Expression* ParseRegExpLiteral(bool seen_equal, bool* ok); - Expression* NewCompareNode(Token::Value op, - Expression* x, - Expression* y, - int position); - // Populate the constant properties fixed array for a materialized object // literal. void BuildObjectLiteralConstantProperties( @@ -656,7 +651,7 @@ class Parser { void CheckConflictingVarDeclarations(Scope* scope, bool* ok); // Parser support - VariableProxy* Declare(Handle<String> name, Variable::Mode mode, + VariableProxy* Declare(Handle<String> name, VariableMode mode, FunctionLiteral* fun, bool resolve, bool* ok); @@ -736,7 +731,7 @@ class Parser { // Heuristically that means that the function will be called immediately, // so never lazily compile it. bool parenthesized_function_; - bool harmony_block_scoping_; + bool harmony_scoping_; friend class LexicalScope; }; diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index b152dae9a..90f45dd16 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -78,30 +78,6 @@ double ceiling(double x) { static Mutex* limit_mutex = NULL; -static void* GetRandomMmapAddr() { - Isolate* isolate = Isolate::UncheckedCurrent(); - // Note that the current isolate isn't set up in a call path via - // CpuFeatures::Probe. We don't care about randomization in this case because - // the code page is immediately freed. - if (isolate != NULL) { -#ifdef V8_TARGET_ARCH_X64 - uint64_t rnd1 = V8::RandomPrivate(isolate); - uint64_t rnd2 = V8::RandomPrivate(isolate); - uint64_t raw_addr = (rnd1 << 32) ^ rnd2; - raw_addr &= V8_UINT64_C(0x3ffffffff000); -#else - uint32_t raw_addr = V8::RandomPrivate(isolate); - // The range 0x20000000 - 0x60000000 is relatively unpopulated across a - // variety of ASLR modes (PAE kernel, NX compat mode, etc). - raw_addr &= 0x3ffff000; - raw_addr += 0x20000000; -#endif - return reinterpret_cast<void*>(raw_addr); - } - return NULL; -} - - void OS::Setup() { // Seed the random number generator. We preserve microsecond resolution. uint64_t seed = Ticks() ^ (getpid() << 16); @@ -381,9 +357,9 @@ size_t OS::AllocateAlignment() { void* OS::Allocate(const size_t requested, size_t* allocated, bool is_executable) { - const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE)); + const size_t msize = RoundUp(requested, AllocateAlignment()); int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - void* addr = GetRandomMmapAddr(); + void* addr = OS::GetRandomMmapAddr(); void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (mbase == MAP_FAILED) { LOG(i::Isolate::Current(), @@ -453,7 +429,12 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { int size = ftell(file); void* memory = - mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + mmap(OS::GetRandomMmapAddr(), + size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + fileno(file), + 0); return new PosixMemoryMappedFile(file, memory, size); } @@ -468,13 +449,18 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, return NULL; } void* memory = - mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + mmap(OS::GetRandomMmapAddr(), + size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + fileno(file), + 0); return new PosixMemoryMappedFile(file, memory, size); } PosixMemoryMappedFile::~PosixMemoryMappedFile() { - if (memory_) munmap(memory_, size_); + if (memory_) OS::Free(memory_, size_); fclose(file_); } @@ -553,10 +539,14 @@ void OS::SignalCodeMovingGC() { // kernel log. int size = sysconf(_SC_PAGESIZE); FILE* f = fopen(kGCFakeMmap, "w+"); - void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, - fileno(f), 0); + void* addr = mmap(OS::GetRandomMmapAddr(), + size, + PROT_READ | PROT_EXEC, + MAP_PRIVATE, + fileno(f), + 0); ASSERT(addr != MAP_FAILED); - munmap(addr, size); + OS::Free(addr, size); fclose(f); } @@ -598,44 +588,126 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) { static const int kMmapFd = -1; static const int kMmapFdOffset = 0; +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } VirtualMemory::VirtualMemory(size_t size) { - address_ = mmap(GetRandomMmapAddr(), size, PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, - kMmapFd, kMmapFdOffset); + address_ = ReserveRegion(size); size_ = size; } +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast<intptr_t>(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + Address base = static_cast<Address>(reservation); + Address aligned_base = RoundUp(base, alignment); + ASSERT_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast<size_t>(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + ASSERT_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + ASSERT(aligned_size == request_size); + + address_ = static_cast<void*>(aligned_base); + size_ = aligned_size; +} + + VirtualMemory::~VirtualMemory() { if (IsReserved()) { - if (0 == munmap(address(), size())) address_ = MAP_FAILED; + bool result = ReleaseRegion(address(), size()); + ASSERT(result); + USE(result); } } bool VirtualMemory::IsReserved() { - return address_ != MAP_FAILED; + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; } bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + return UncommitRegion(address, size); +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - if (MAP_FAILED == mmap(address, size, prot, + if (MAP_FAILED == mmap(base, + size, + prot, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, - kMmapFd, kMmapFdOffset)) { + kMmapFd, + kMmapFdOffset)) { return false; } - UpdateAllocatedSpaceLimits(address, size); + UpdateAllocatedSpaceLimits(base, size); return true; } -bool VirtualMemory::Uncommit(void* address, size_t size) { - return mmap(address, size, PROT_NONE, +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return mmap(base, + size, + PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, - kMmapFd, kMmapFdOffset) != MAP_FAILED; + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return munmap(base, size) == 0; } diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc index 6be941a08..6e5d29da2 100644 --- a/deps/v8/src/platform-macos.cc +++ b/deps/v8/src/platform-macos.cc @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -94,12 +94,8 @@ static Mutex* limit_mutex = NULL; void OS::Setup() { - // Seed the random number generator. - // Convert the current time to a 64-bit integer first, before converting it - // to an unsigned. Going directly will cause an overflow and the seed to be - // set to all ones. The seed will be identical for different instances that - // call this setup code within the same millisecond. - uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()); + // Seed the random number generator. We preserve microsecond resolution. + uint64_t seed = Ticks() ^ (getpid() << 16); srandom(static_cast<unsigned int>(seed)); limit_mutex = CreateMutex(); } @@ -148,9 +144,12 @@ void* OS::Allocate(const size_t requested, bool is_executable) { const size_t msize = RoundUp(requested, getpagesize()); int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - void* mbase = mmap(NULL, msize, prot, + void* mbase = mmap(OS::GetRandomMmapAddr(), + msize, + prot, MAP_PRIVATE | MAP_ANON, - kMmapFd, kMmapFdOffset); + kMmapFd, + kMmapFdOffset); if (mbase == MAP_FAILED) { LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed")); return NULL; @@ -207,7 +206,12 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { int size = ftell(file); void* memory = - mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + mmap(OS::GetRandomMmapAddr(), + size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + fileno(file), + 0); return new PosixMemoryMappedFile(file, memory, size); } @@ -222,13 +226,18 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, return NULL; } void* memory = - mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); + mmap(OS::GetRandomMmapAddr(), + size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + fileno(file), + 0); return new PosixMemoryMappedFile(file, memory, size); } PosixMemoryMappedFile::~PosixMemoryMappedFile() { - if (memory_) munmap(memory_, size_); + if (memory_) OS::Free(memory_, size_); fclose(file_); } @@ -334,33 +343,102 @@ int OS::StackWalk(Vector<StackFrame> frames) { } +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } -VirtualMemory::VirtualMemory(size_t size) { - address_ = mmap(NULL, size, PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, - kMmapFd, kMmapFdOffset); - size_ = size; +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } + + +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast<intptr_t>(OS::AllocateAlignment())); + void* reservation = mmap(OS::GetRandomMmapAddr(), + request_size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + if (reservation == MAP_FAILED) return; + + Address base = static_cast<Address>(reservation); + Address aligned_base = RoundUp(base, alignment); + ASSERT_LE(base, aligned_base); + + // Unmap extra memory reserved before and after the desired block. + if (aligned_base != base) { + size_t prefix_size = static_cast<size_t>(aligned_base - base); + OS::Free(base, prefix_size); + request_size -= prefix_size; + } + + size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); + ASSERT_LE(aligned_size, request_size); + + if (aligned_size != request_size) { + size_t suffix_size = request_size - aligned_size; + OS::Free(aligned_base + aligned_size, suffix_size); + request_size -= suffix_size; + } + + ASSERT(aligned_size == request_size); + + address_ = static_cast<void*>(aligned_base); + size_ = aligned_size; } VirtualMemory::~VirtualMemory() { if (IsReserved()) { - if (0 == munmap(address(), size())) address_ = MAP_FAILED; + bool result = ReleaseRegion(address(), size()); + ASSERT(result); + USE(result); } } +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + void* result = mmap(OS::GetRandomMmapAddr(), + size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + kMmapFd, + kMmapFdOffset); + + if (result == MAP_FAILED) return NULL; + + return result; +} + + bool VirtualMemory::IsReserved() { - return address_ != MAP_FAILED; + return address_ != NULL; } bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + return CommitRegion(address, size, is_executable); +} + + +bool VirtualMemory::CommitRegion(void* address, + size_t size, + bool is_executable) { int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - if (MAP_FAILED == mmap(address, size, prot, + if (MAP_FAILED == mmap(address, + size, + prot, MAP_PRIVATE | MAP_ANON | MAP_FIXED, - kMmapFd, kMmapFdOffset)) { + kMmapFd, + kMmapFdOffset)) { return false; } @@ -370,9 +448,22 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { bool VirtualMemory::Uncommit(void* address, size_t size) { - return mmap(address, size, PROT_NONE, + return UncommitRegion(address, size); +} + + +bool VirtualMemory::UncommitRegion(void* address, size_t size) { + return mmap(address, + size, + PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED, - kMmapFd, kMmapFdOffset) != MAP_FAILED; + kMmapFd, + kMmapFdOffset) != MAP_FAILED; +} + + +bool VirtualMemory::ReleaseRegion(void* address, size_t size) { + return munmap(address, size) == 0; } diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc index 973329b9b..3151d1805 100644 --- a/deps/v8/src/platform-openbsd.cc +++ b/deps/v8/src/platform-openbsd.cc @@ -245,7 +245,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, PosixMemoryMappedFile::~PosixMemoryMappedFile() { - if (memory_) munmap(memory_, size_); + if (memory_) OS::Free(memory_, size_); fclose(file_); } @@ -342,7 +342,8 @@ VirtualMemory::VirtualMemory(size_t size) { VirtualMemory::~VirtualMemory() { if (IsReserved()) { - if (0 == munmap(address(), size())) address_ = MAP_FAILED; + OS::Free(address(), size()); + address_ = MAP_FAILED } } diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc index 52cf02963..78fece3f1 100644 --- a/deps/v8/src/platform-posix.cc +++ b/deps/v8/src/platform-posix.cc @@ -84,6 +84,34 @@ void OS::Guard(void* address, const size_t size) { #endif // __CYGWIN__ +void* OS::GetRandomMmapAddr() { + Isolate* isolate = Isolate::UncheckedCurrent(); + // Note that the current isolate isn't set up in a call path via + // CpuFeatures::Probe. We don't care about randomization in this case because + // the code page is immediately freed. + if (isolate != NULL) { +#ifdef V8_TARGET_ARCH_X64 + uint64_t rnd1 = V8::RandomPrivate(isolate); + uint64_t rnd2 = V8::RandomPrivate(isolate); + uint64_t raw_addr = (rnd1 << 32) ^ rnd2; + // Currently available CPUs have 48 bits of virtual addressing. Truncate + // the hint address to 46 bits to give the kernel a fighting chance of + // fulfilling our placement request. + raw_addr &= V8_UINT64_C(0x3ffffffff000); +#else + uint32_t raw_addr = V8::RandomPrivate(isolate); + // The range 0x20000000 - 0x60000000 is relatively unpopulated across a + // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos + // 10.6 and 10.7. + raw_addr &= 0x3ffff000; + raw_addr += 0x20000000; +#endif + return reinterpret_cast<void*>(raw_addr); + } + return NULL; +} + + // ---------------------------------------------------------------------------- // Math functions diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index 97788e2f6..8771c4367 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -1397,41 +1397,101 @@ void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { } -bool VirtualMemory::IsReserved() { - return address_ != NULL; -} +VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } + + +VirtualMemory::VirtualMemory(size_t size) + : address_(ReserveRegion(size)), size_(size) { } -VirtualMemory::VirtualMemory(size_t size) { - address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS); - size_ = size; +VirtualMemory::VirtualMemory(size_t size, size_t alignment) + : address_(NULL), size_(0) { + ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); + size_t request_size = RoundUp(size + alignment, + static_cast<intptr_t>(OS::AllocateAlignment())); + void* address = ReserveRegion(request_size); + if (address == NULL) return; + Address base = RoundUp(static_cast<Address>(address), alignment); + // Try reducing the size by freeing and then reallocating a specific area. + bool result = ReleaseRegion(address, request_size); + USE(result); + ASSERT(result); + address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS); + if (address != NULL) { + request_size = size; + ASSERT(base == static_cast<Address>(address)); + } else { + // Resizing failed, just go with a bigger area. + address = ReserveRegion(request_size); + if (address == NULL) return; + } + address_ = address; + size_ = request_size; } VirtualMemory::~VirtualMemory() { if (IsReserved()) { - if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL; + bool result = ReleaseRegion(address_, size_); + ASSERT(result); + USE(result); } } +bool VirtualMemory::IsReserved() { + return address_ != NULL; +} + + +void VirtualMemory::Reset() { + address_ = NULL; + size_ = 0; +} + + bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { + if (CommitRegion(address, size, is_executable)) { + UpdateAllocatedSpaceLimits(address, static_cast<int>(size)); + return true; + } + return false; +} + + +bool VirtualMemory::Uncommit(void* address, size_t size) { + ASSERT(IsReserved()); + return UncommitRegion(address, size); +} + + +void* VirtualMemory::ReserveRegion(size_t size) { + return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS); +} + + +bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; - if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) { + if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) { return false; } - UpdateAllocatedSpaceLimits(address, static_cast<int>(size)); + UpdateAllocatedSpaceLimits(base, static_cast<int>(size)); return true; } -bool VirtualMemory::Uncommit(void* address, size_t size) { - ASSERT(IsReserved()); - return VirtualFree(address, size, MEM_DECOMMIT) != false; +bool VirtualMemory::UncommitRegion(void* base, size_t size) { + return VirtualFree(base, size, MEM_DECOMMIT) != 0; } +bool VirtualMemory::ReleaseRegion(void* base, size_t size) { + return VirtualFree(base, 0, MEM_RELEASE) != 0; +} + + + // ---------------------------------------------------------------------------- // Win32 thread support. @@ -1453,6 +1513,7 @@ class Thread::PlatformData : public Malloced { public: explicit PlatformData(HANDLE thread) : thread_(thread) {} HANDLE thread_; + unsigned thread_id_; }; @@ -1496,13 +1557,15 @@ void Thread::Start() { ThreadEntry, this, 0, - NULL)); + &data_->thread_id_)); } // Wait for thread to terminate. void Thread::Join() { - WaitForSingleObject(data_->thread_, INFINITE); + if (data_->thread_id_ != GetCurrentThreadId()) { + WaitForSingleObject(data_->thread_, INFINITE); + } } diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h index 034fe3404..f84b6b17a 100644 --- a/deps/v8/src/platform.h +++ b/deps/v8/src/platform.h @@ -178,6 +178,9 @@ class OS { // Assign memory as a guard page so that access will cause an exception. static void Guard(void* address, const size_t size); + // Generate a random address to be used for hinting mmap(). + static void* GetRandomMmapAddr(); + // Get the Alignment guaranteed by Allocate(). static size_t AllocateAlignment(); @@ -301,23 +304,46 @@ class OS { DISALLOW_IMPLICIT_CONSTRUCTORS(OS); }; - +// Represents and controls an area of reserved memory. +// Control of the reserved memory can be assigned to another VirtualMemory +// object by assignment or copy-contructing. This removes the reserved memory +// from the original object. class VirtualMemory { public: + // Empty VirtualMemory object, controlling no reserved memory. + VirtualMemory(); + // Reserves virtual memory with size. explicit VirtualMemory(size_t size); + + // Reserves virtual memory containing an area of the given size that + // is aligned per alignment. This may not be at the position returned + // by address(). + VirtualMemory(size_t size, size_t alignment); + + // Releases the reserved memory, if any, controlled by this VirtualMemory + // object. ~VirtualMemory(); // Returns whether the memory has been reserved. bool IsReserved(); + // Initialize or resets an embedded VirtualMemory object. + void Reset(); + // Returns the start address of the reserved memory. + // If the memory was reserved with an alignment, this address is not + // necessarily aligned. The user might need to round it up to a multiple of + // the alignment to get the start of the aligned block. void* address() { ASSERT(IsReserved()); return address_; } - // Returns the size of the reserved memory. + // Returns the size of the reserved memory. The returned value is only + // meaningful when IsReserved() returns true. + // If the memory was reserved with an alignment, this size may be larger + // than the requested size. size_t size() { return size_; } // Commits real memory. Returns whether the operation succeeded. @@ -326,11 +352,43 @@ class VirtualMemory { // Uncommit real memory. Returns whether the operation succeeded. bool Uncommit(void* address, size_t size); + void Release() { + ASSERT(IsReserved()); + // Notice: Order is important here. The VirtualMemory object might live + // inside the allocated region. + void* address = address_; + size_t size = size_; + Reset(); + bool result = ReleaseRegion(address, size); + USE(result); + ASSERT(result); + } + + // Assign control of the reserved region to a different VirtualMemory object. + // The old object is no longer functional (IsReserved() returns false). + void TakeControl(VirtualMemory* from) { + ASSERT(!IsReserved()); + address_ = from->address_; + size_ = from->size_; + from->Reset(); + } + + static void* ReserveRegion(size_t size); + + static bool CommitRegion(void* base, size_t size, bool is_executable); + + static bool UncommitRegion(void* base, size_t size); + + // Must be called with a base pointer that has been returned by ReserveRegion + // and the same size it was reserved with. + static bool ReleaseRegion(void* base, size_t size); + private: void* address_; // Start address of the virtual memory. size_t size_; // Size of the virtual memory. }; + // ---------------------------------------------------------------------------- // Thread // diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc index 47d21bac1..9f8e1eecc 100644 --- a/deps/v8/src/preparser.cc +++ b/deps/v8/src/preparser.cc @@ -117,7 +117,18 @@ void PreParser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) { PreParser::Statement PreParser::ParseSourceElement(bool* ok) { + // (Ecma 262 5th Edition, clause 14): + // SourceElement: + // Statement + // FunctionDeclaration + // + // In harmony mode we allow additionally the following productions + // SourceElement: + // LetDeclaration + switch (peek()) { + case i::Token::FUNCTION: + return ParseFunctionDeclaration(ok); case i::Token::LET: return ParseVariableStatement(kSourceElement, ok); default: @@ -225,8 +236,19 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) { case i::Token::TRY: return ParseTryStatement(ok); - case i::Token::FUNCTION: - return ParseFunctionDeclaration(ok); + case i::Token::FUNCTION: { + i::Scanner::Location start_location = scanner_->peek_location(); + Statement statement = ParseFunctionDeclaration(CHECK_OK); + i::Scanner::Location end_location = scanner_->location(); + if (strict_mode()) { + ReportMessageAt(start_location.beg_pos, end_location.end_pos, + "strict_function", NULL); + *ok = false; + return Statement::Default(); + } else { + return statement; + } + } case i::Token::DEBUGGER: return ParseDebuggerStatement(ok); @@ -271,14 +293,10 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) { // Expect(i::Token::LBRACE, CHECK_OK); while (peek() != i::Token::RBRACE) { - i::Scanner::Location start_location = scanner_->peek_location(); - Statement statement = ParseSourceElement(CHECK_OK); - i::Scanner::Location end_location = scanner_->location(); - if (strict_mode() && statement.IsFunctionDeclaration()) { - ReportMessageAt(start_location.beg_pos, end_location.end_pos, - "strict_function", NULL); - *ok = false; - return Statement::Default(); + if (harmony_scoping_) { + ParseSourceElement(CHECK_OK); + } else { + ParseStatement(CHECK_OK); } } Expect(i::Token::RBRACE, ok); @@ -372,18 +390,11 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) { Expression expr = ParseExpression(true, CHECK_OK); if (expr.IsRawIdentifier()) { - if (peek() == i::Token::COLON && - (!strict_mode() || !expr.AsIdentifier().IsFutureReserved())) { + ASSERT(!expr.AsIdentifier().IsFutureReserved()); + ASSERT(!strict_mode() || !expr.AsIdentifier().IsFutureStrictReserved()); + if (peek() == i::Token::COLON) { Consume(i::Token::COLON); - i::Scanner::Location start_location = scanner_->peek_location(); - Statement statement = ParseStatement(CHECK_OK); - if (strict_mode() && statement.IsFunctionDeclaration()) { - i::Scanner::Location end_location = scanner_->location(); - ReportMessageAt(start_location.beg_pos, end_location.end_pos, - "strict_function", NULL); - *ok = false; - } - return Statement::Default(); + return ParseStatement(ok); } // Preparsing is disabled for extensions (because the extension details // aren't passed to lazily compiled functions), so we don't @@ -513,15 +524,7 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) { Expect(i::Token::DEFAULT, CHECK_OK); Expect(i::Token::COLON, CHECK_OK); } else { - i::Scanner::Location start_location = scanner_->peek_location(); - Statement statement = ParseStatement(CHECK_OK); - if (strict_mode() && statement.IsFunctionDeclaration()) { - i::Scanner::Location end_location = scanner_->location(); - ReportMessageAt(start_location.beg_pos, end_location.end_pos, - "strict_function", NULL); - *ok = false; - return Statement::Default(); - } + ParseStatement(CHECK_OK); } token = peek(); } @@ -1434,9 +1437,16 @@ PreParser::Identifier PreParser::ParseIdentifier(bool* ok) { ReportMessageAt(location.beg_pos, location.end_pos, "reserved_word", NULL); *ok = false; + return GetIdentifierSymbol(); } - // FALLTHROUGH case i::Token::FUTURE_STRICT_RESERVED_WORD: + if (strict_mode()) { + i::Scanner::Location location = scanner_->location(); + ReportMessageAt(location.beg_pos, location.end_pos, + "strict_reserved_word", NULL); + *ok = false; + } + // FALLTHROUGH case i::Token::IDENTIFIER: return GetIdentifierSymbol(); default: diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h index b97b7cff6..cb1d5fb4e 100644 --- a/deps/v8/src/preparser.h +++ b/deps/v8/src/preparser.h @@ -447,7 +447,7 @@ class PreParser { stack_overflow_(false), allow_lazy_(true), parenthesized_function_(false), - harmony_block_scoping_(scanner->HarmonyBlockScoping()) { } + harmony_scoping_(scanner->HarmonyScoping()) { } // Preparse the program. Only called in PreParseProgram after creating // the instance. @@ -608,7 +608,7 @@ class PreParser { bool stack_overflow_; bool allow_lazy_; bool parenthesized_function_; - bool harmony_block_scoping_; + bool harmony_scoping_; }; } } // v8::preparser diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc index 663af284b..37c76ceef 100644 --- a/deps/v8/src/prettyprinter.cc +++ b/deps/v8/src/prettyprinter.cc @@ -372,13 +372,6 @@ void PrettyPrinter::VisitCompareOperation(CompareOperation* node) { } -void PrettyPrinter::VisitCompareToNull(CompareToNull* node) { - Print("("); - Visit(node->expression()); - Print("%s null)", Token::String(node->op())); -} - - void PrettyPrinter::VisitThisFunction(ThisFunction* node) { Print("<this-function>"); } @@ -1020,15 +1013,6 @@ void AstPrinter::VisitCompareOperation(CompareOperation* node) { } -void AstPrinter::VisitCompareToNull(CompareToNull* node) { - const char* name = node->is_strict() - ? "COMPARE-TO-NULL-STRICT" - : "COMPARE-TO-NULL"; - IndentedScope indent(this, name, node); - Visit(node->expression()); -} - - void AstPrinter::VisitThisFunction(ThisFunction* node) { IndentedScope indent(this, "THIS-FUNCTION"); } @@ -1404,16 +1388,6 @@ void JsonAstBuilder::VisitCompareOperation(CompareOperation* expr) { } -void JsonAstBuilder::VisitCompareToNull(CompareToNull* expr) { - TagScope tag(this, "CompareToNull"); - { - AttributesScope attributes(this); - AddAttribute("is_strict", expr->is_strict()); - } - Visit(expr->expression()); -} - - void JsonAstBuilder::VisitThisFunction(ThisFunction* expr) { TagScope tag(this, "ThisFunction"); } diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc index a7384a62a..bae35c89e 100644 --- a/deps/v8/src/profile-generator.cc +++ b/deps/v8/src/profile-generator.cc @@ -488,8 +488,6 @@ void CpuProfile::Print() { CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL; const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL; -const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue = - CodeMap::CodeEntryInfo(NULL, 0); void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) { @@ -1015,6 +1013,11 @@ int HeapEntry::RetainedSize(bool exact) { } +Handle<HeapObject> HeapEntry::GetHeapObject() { + return snapshot_->collection()->FindHeapObjectById(id()); +} + + template<class Visitor> void HeapEntry::ApplyAndPaintAllReachable(Visitor* visitor) { List<HeapEntry*> list(10); @@ -1375,8 +1378,8 @@ HeapObjectsMap::~HeapObjectsMap() { void HeapObjectsMap::SnapshotGenerationFinished() { - initial_fill_mode_ = false; - RemoveDeadEntries(); + initial_fill_mode_ = false; + RemoveDeadEntries(); } @@ -1398,10 +1401,12 @@ void HeapObjectsMap::MoveObject(Address from, Address to) { if (entry != NULL) { void* value = entry->value; entries_map_.Remove(from, AddressHash(from)); - entry = entries_map_.Lookup(to, AddressHash(to), true); - // We can have an entry at the new location, it is OK, as GC can overwrite - // dead objects with alive objects being moved. - entry->value = value; + if (to != NULL) { + entry = entries_map_.Lookup(to, AddressHash(to), true); + // We can have an entry at the new location, it is OK, as GC can overwrite + // dead objects with alive objects being moved. + entry->value = value; + } } } @@ -1522,6 +1527,26 @@ void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) { } +Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(uint64_t id) { + // First perform a full GC in order to avoid dead objects. + HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask); + AssertNoAllocation no_allocation; + HeapObject* object = NULL; + HeapIterator iterator(HeapIterator::kFilterUnreachable); + // Make sure that object with the given id is still reachable. + for (HeapObject* obj = iterator.next(); + obj != NULL; + obj = iterator.next()) { + if (ids_.FindObject(obj->address()) == id) { + ASSERT(object == NULL); + object = obj; + // Can't break -- kFilterUnreachable requires full heap traversal. + } + } + return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>(); +} + + HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder = reinterpret_cast<HeapEntry*>(1); @@ -1812,12 +1837,13 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) { } -int V8HeapExplorer::EstimateObjectsCount() { - HeapIterator iterator(HeapIterator::kFilterUnreachable); +int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) { int objects_count = 0; - for (HeapObject* obj = iterator.next(); + for (HeapObject* obj = iterator->next(); obj != NULL; - obj = iterator.next(), ++objects_count) {} + obj = iterator->next()) { + objects_count++; + } return objects_count; } @@ -1945,6 +1971,14 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) { "descriptors", map->instance_descriptors(), Map::kInstanceDescriptorsOrBitField3Offset); } + if (map->prototype_transitions() != heap_->empty_fixed_array()) { + TagObject(map->prototype_transitions(), "(prototype transitions)"); + SetInternalReference(obj, + entry, + "prototype_transitions", + map->prototype_transitions(), + Map::kPrototypeTransitionsOffset); + } SetInternalReference(obj, entry, "code_cache", map->code_cache(), Map::kCodeCacheOffset); @@ -2175,9 +2209,11 @@ class RootsReferencesExtractor : public ObjectVisitor { bool V8HeapExplorer::IterateAndExtractReferences( SnapshotFillerInterface* filler) { - filler_ = filler; HeapIterator iterator(HeapIterator::kFilterUnreachable); + + filler_ = filler; bool interrupted = false; + // Heap iteration with filtering must be finished in any case. for (HeapObject* obj = iterator.next(); obj != NULL; @@ -2743,13 +2779,43 @@ class SnapshotFiller : public SnapshotFillerInterface { bool HeapSnapshotGenerator::GenerateSnapshot() { v8_heap_explorer_.TagGlobalObjects(); + // TODO(1562) Profiler assumes that any object that is in the heap after + // full GC is reachable from the root when computing dominators. + // This is not true for weakly reachable objects. + // As a temporary solution we call GC twice. + Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask); + Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask); + +#ifdef DEBUG + Heap* debug_heap = Isolate::Current()->heap(); + ASSERT(!debug_heap->old_data_space()->was_swept_conservatively()); + ASSERT(!debug_heap->old_pointer_space()->was_swept_conservatively()); + ASSERT(!debug_heap->code_space()->was_swept_conservatively()); + ASSERT(!debug_heap->cell_space()->was_swept_conservatively()); + ASSERT(!debug_heap->map_space()->was_swept_conservatively()); +#endif + + // The following code uses heap iterators, so we want the heap to be + // stable. It should follow TagGlobalObjects as that can allocate. AssertNoAllocation no_alloc; +#ifdef DEBUG + debug_heap->Verify(); +#endif + SetProgressTotal(4); // 2 passes + dominators + sizes. +#ifdef DEBUG + debug_heap->Verify(); +#endif + // Pass 1. Iterate heap contents to count entries and references. if (!CountEntriesAndReferences()) return false; +#ifdef DEBUG + debug_heap->Verify(); +#endif + // Allocate and fill entries in the snapshot, allocate references. snapshot_->AllocateEntries(entries_.entries_count(), entries_.total_children_count(), @@ -2787,8 +2853,9 @@ bool HeapSnapshotGenerator::ProgressReport(bool force) { void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) { if (control_ == NULL) return; + HeapIterator iterator(HeapIterator::kFilterUnreachable); progress_total_ = ( - v8_heap_explorer_.EstimateObjectsCount() + + v8_heap_explorer_.EstimateObjectsCount(&iterator) + dom_explorer_.EstimateObjectsCount()) * iterations_count; progress_counter_ = 0; } @@ -2838,7 +2905,7 @@ void HeapSnapshotGenerator::FillReversePostorderIndexes( nodes_to_visit.RemoveLast(); } } - entries->Truncate(current_entry); + ASSERT_EQ(current_entry, entries->length()); } diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h index f3737eafb..0eb73bef9 100644 --- a/deps/v8/src/profile-generator.h +++ b/deps/v8/src/profile-generator.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -257,7 +257,7 @@ class CodeMap { typedef Address Key; typedef CodeEntryInfo Value; static const Key kNoKey; - static const Value kNoValue; + static const Value NoValue() { return CodeEntryInfo(NULL, 0); } static int Compare(const Key& a, const Key& b) { return a < b ? -1 : (a > b ? 1 : 0); } @@ -550,7 +550,10 @@ class HeapEntry BASE_EMBEDDED { Vector<HeapGraphEdge*> retainers() { return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); } HeapEntry* dominator() { return dominator_; } - void set_dominator(HeapEntry* entry) { dominator_ = entry; } + void set_dominator(HeapEntry* entry) { + ASSERT(entry != NULL); + dominator_ = entry; + } void clear_paint() { painted_ = kUnpainted; } bool painted_reachable() { return painted_ == kPainted; } @@ -585,6 +588,8 @@ class HeapEntry BASE_EMBEDDED { void Print(int max_depth, int indent); + Handle<HeapObject> GetHeapObject(); + static int EntriesSize(int entries_count, int children_count, int retainers_count); @@ -763,6 +768,7 @@ class HeapSnapshotsCollection { TokenEnumerator* token_enumerator() { return token_enumerator_; } uint64_t GetObjectId(Address addr) { return ids_.FindObject(addr); } + Handle<HeapObject> FindHeapObjectById(uint64_t id); void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); } private: @@ -917,7 +923,7 @@ class V8HeapExplorer : public HeapEntriesAllocator { virtual HeapEntry* AllocateEntry( HeapThing ptr, int children_count, int retainers_count); void AddRootEntries(SnapshotFillerInterface* filler); - int EstimateObjectsCount(); + int EstimateObjectsCount(HeapIterator* iterator); bool IterateAndExtractReferences(SnapshotFillerInterface* filler); void TagGlobalObjects(); diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h index e7d9fc534..ee2e8c844 100644 --- a/deps/v8/src/property.h +++ b/deps/v8/src/property.h @@ -115,11 +115,9 @@ class MapTransitionDescriptor: public Descriptor { class ElementsTransitionDescriptor: public Descriptor { public: ElementsTransitionDescriptor(String* key, - Map* map, - ElementsKind elements_kind) - : Descriptor(key, map, PropertyDetails(NONE, - ELEMENTS_TRANSITION, - elements_kind)) { } + Object* map_or_array) + : Descriptor(key, map_or_array, PropertyDetails(NONE, + ELEMENTS_TRANSITION)) { } }; // Marks a field name in a map so that adding the field is guaranteed @@ -202,9 +200,9 @@ class LookupResult BASE_EMBEDDED { number_ = entry; } - void HandlerResult() { + void HandlerResult(JSProxy* proxy) { lookup_type_ = HANDLER_TYPE; - holder_ = NULL; + holder_ = proxy; details_ = PropertyDetails(NONE, HANDLER); cacheable_ = false; } @@ -221,7 +219,12 @@ class LookupResult BASE_EMBEDDED { JSObject* holder() { ASSERT(IsFound()); - return holder_; + return JSObject::cast(holder_); + } + + JSProxy* proxy() { + ASSERT(IsFound()); + return JSProxy::cast(holder_); } PropertyType type() { @@ -354,7 +357,7 @@ class LookupResult BASE_EMBEDDED { CONSTANT_TYPE } lookup_type_; - JSObject* holder_; + JSReceiver* holder_; int number_; bool cacheable_; PropertyDetails details_; diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js index 4e44cd4ef..a51f09ae5 100644 --- a/deps/v8/src/proxy.js +++ b/deps/v8/src/proxy.js @@ -41,14 +41,20 @@ $Proxy.createFunction = function(handler, callTrap, constructTrap) { throw MakeTypeError("handler_non_object", ["create"]) if (!IS_SPEC_FUNCTION(callTrap)) throw MakeTypeError("trap_function_expected", ["createFunction", "call"]) + var construct if (IS_UNDEFINED(constructTrap)) { - constructTrap = callTrap - } else if (!IS_SPEC_FUNCTION(constructTrap)) { + construct = DerivedConstructTrap(callTrap) + } else if (IS_SPEC_FUNCTION(constructTrap)) { + construct = function() { + // Make sure the trap receives 'undefined' as this. + return %Apply(constructTrap, void 0, arguments, 0, %_ArgumentsLength()); + } + } else { throw MakeTypeError("trap_function_expected", ["createFunction", "construct"]) } return %CreateJSFunctionProxy( - handler, callTrap, constructTrap, $Function.prototype) + handler, callTrap, construct, $Function.prototype) } @@ -57,6 +63,17 @@ $Proxy.createFunction = function(handler, callTrap, constructTrap) { // Builtins //////////////////////////////////////////////////////////////////////////////// +function DerivedConstructTrap(callTrap) { + return function() { + var proto = this.prototype + if (!IS_SPEC_OBJECT(proto)) proto = $Object.prototype + var obj = new $Object() + obj.__proto__ = proto + var result = %Apply(callTrap, obj, arguments, 0, %_ArgumentsLength()); + return IS_SPEC_OBJECT(result) ? result : obj + } +} + function DelegateCallAndConstruct(callTrap, constructTrap) { return function() { return %Apply(%_IsConstructCall() ? constructTrap : callTrap, diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc index b32d71dba..f8432784f 100644 --- a/deps/v8/src/regexp-macro-assembler-tracer.cc +++ b/deps/v8/src/regexp-macro-assembler-tracer.cc @@ -37,8 +37,8 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer( RegExpMacroAssembler* assembler) : assembler_(assembler) { unsigned int type = assembler->Implementation(); - ASSERT(type < 4); - const char* impl_names[4] = {"IA32", "ARM", "X64", "Bytecode"}; + ASSERT(type < 5); + const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"}; PrintF("RegExpMacroAssembler%s();\n", impl_names[type]); } diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js index 38d449615..0ab86f333 100644 --- a/deps/v8/src/regexp.js +++ b/deps/v8/src/regexp.js @@ -95,12 +95,11 @@ function RegExpConstructor(pattern, flags) { } } - // Deprecated RegExp.prototype.compile method. We behave like the constructor // were called again. In SpiderMonkey, this method returns the regexp object. // In JSC, it returns undefined. For compatibility with JSC, we match their // behavior. -function CompileRegExp(pattern, flags) { +function RegExpCompile(pattern, flags) { // Both JSC and SpiderMonkey treat a missing pattern argument as the // empty subject string, and an actual undefined value passed as the // pattern as the string 'undefined'. Note that JSC is inconsistent @@ -108,6 +107,11 @@ function CompileRegExp(pattern, flags) { // RegExp.prototype.compile and in the constructor, where they are // the empty string. For compatibility with JSC, we match their // behavior. + if (this == $RegExp.prototype) { + // We don't allow recompiling RegExp.prototype. + throw MakeTypeError('incompatible_method_receiver', + ['RegExp.prototype.compile', this]); + } if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) { DoConstructRegExp(this, 'undefined', flags); } else { @@ -408,7 +412,6 @@ var lastMatchInfoOverride = null; function SetUpRegExp() { %CheckIsBootstrapping(); %FunctionSetInstanceClassName($RegExp, 'RegExp'); - %FunctionSetPrototype($RegExp, new $Object()); %SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM); %SetCode($RegExp, RegExpConstructor); @@ -416,7 +419,7 @@ function SetUpRegExp() { "exec", RegExpExec, "test", RegExpTest, "toString", RegExpToString, - "compile", CompileRegExp + "compile", RegExpCompile )); // The length of compile is 1 in SpiderMonkey. diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc index 26d884610..520dd3989 100644 --- a/deps/v8/src/runtime-profiler.cc +++ b/deps/v8/src/runtime-profiler.cc @@ -35,6 +35,7 @@ #include "deoptimizer.h" #include "execution.h" #include "global-handles.h" +#include "isolate-inl.h" #include "mark-compact.h" #include "platform.h" #include "scopeinfo.h" @@ -338,7 +339,8 @@ void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) { void RuntimeProfiler::RemoveDeadSamples() { for (int i = 0; i < kSamplerWindowSize; i++) { Object* function = sampler_window_[i]; - if (function != NULL && !HeapObject::cast(function)->IsMarked()) { + if (function != NULL && + !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) { sampler_window_[i] = NULL; } } diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index 3ea93049c..e0f507e17 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -42,6 +42,7 @@ #include "deoptimizer.h" #include "execution.h" #include "global-handles.h" +#include "isolate-inl.h" #include "jsregexp.h" #include "json-parser.h" #include "liveedit.h" @@ -177,6 +178,7 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate, // Pixel elements cannot be created using an object literal. ASSERT(!copy->HasExternalArrayElements()); switch (copy->GetElementsKind()) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: { FixedArray* elements = FixedArray::cast(copy->elements()); if (elements->map() == heap->fixed_cow_array_map()) { @@ -189,6 +191,9 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate, } else { for (int i = 0; i < elements->length(); i++) { Object* value = elements->get(i); + ASSERT(value->IsSmi() || + value->IsTheHole() || + (copy->GetElementsKind() == FAST_ELEMENTS)); if (value->IsJSObject()) { JSObject* js_object = JSObject::cast(value); { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, @@ -417,6 +422,9 @@ static Handle<Object> CreateObjectLiteralBoilerplate( } +static const int kSmiOnlyLiteralMinimumLength = 1024; + + static Handle<Object> CreateArrayLiteralBoilerplate( Isolate* isolate, Handle<FixedArray> literals, @@ -426,22 +434,38 @@ static Handle<Object> CreateArrayLiteralBoilerplate( JSFunction::GlobalContextFromLiterals(*literals)->array_function()); Handle<Object> object = isolate->factory()->NewJSObject(constructor); + if (elements->length() > kSmiOnlyLiteralMinimumLength) { + Handle<Map> smi_array_map = isolate->factory()->GetElementsTransitionMap( + Handle<JSObject>::cast(object), + FAST_SMI_ONLY_ELEMENTS); + HeapObject::cast(*object)->set_map(*smi_array_map); + } + const bool is_cow = (elements->map() == isolate->heap()->fixed_cow_array_map()); Handle<FixedArray> copied_elements = is_cow ? elements : isolate->factory()->CopyFixedArray(elements); Handle<FixedArray> content = Handle<FixedArray>::cast(copied_elements); + bool has_non_smi = false; if (is_cow) { -#ifdef DEBUG // Copy-on-write arrays must be shallow (and simple). for (int i = 0; i < content->length(); i++) { + Object* current = content->get(i); + ASSERT(!current->IsFixedArray()); + if (!current->IsSmi() && !current->IsTheHole()) { + has_non_smi = true; + } + } +#if DEBUG + for (int i = 0; i < content->length(); i++) { ASSERT(!content->get(i)->IsFixedArray()); } #endif } else { for (int i = 0; i < content->length(); i++) { - if (content->get(i)->IsFixedArray()) { + Object* current = content->get(i); + if (current->IsFixedArray()) { // The value contains the constant_properties of a // simple object or array literal. Handle<FixedArray> fa(FixedArray::cast(content->get(i))); @@ -449,12 +473,23 @@ static Handle<Object> CreateArrayLiteralBoilerplate( CreateLiteralBoilerplate(isolate, literals, fa); if (result.is_null()) return result; content->set(i, *result); + has_non_smi = true; + } else { + if (!current->IsSmi() && !current->IsTheHole()) { + has_non_smi = true; + } } } } // Set the elements. - Handle<JSArray>::cast(object)->SetContent(*content); + Handle<JSArray> js_object(Handle<JSArray>::cast(object)); + isolate->factory()->SetContent(js_object, content); + + if (has_non_smi && js_object->HasFastSmiOnlyElements()) { + isolate->factory()->EnsureCanContainNonSmiElements(js_object); + } + return object; } @@ -685,10 +720,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) { NoHandleAllocation ha; ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0); - // TODO(mstarzinger): Currently we cannot use JSProxy objects as keys - // because they cannot be cast to JSObject to get an identity hash code. - CONVERT_ARG_CHECKED(JSObject, key, 1); - return weakmap->table()->Lookup(*key); + CONVERT_ARG_CHECKED(JSReceiver, key, 1); + return ObjectHashTable::cast(weakmap->table())->Lookup(*key); } @@ -696,10 +729,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) { HandleScope scope(isolate); ASSERT(args.length() == 3); CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0); - // TODO(mstarzinger): See Runtime_WeakMapGet above. - CONVERT_ARG_CHECKED(JSObject, key, 1); + CONVERT_ARG_CHECKED(JSReceiver, key, 1); Handle<Object> value(args[2]); - Handle<ObjectHashTable> table(weakmap->table()); + Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table())); Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value); weakmap->set_table(*new_table); return *value; @@ -1211,46 +1243,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) { LookupResult lookup; global->Lookup(*name, &lookup); if (lookup.IsProperty()) { - // Determine if the property is local by comparing the holder - // against the global object. The information will be used to - // avoid throwing re-declaration errors when declaring - // variables or constants that exist in the prototype chain. - bool is_local = (*global == lookup.holder()); - // Get the property attributes and determine if the property is - // read-only. + // We found an existing property. Unless it was an interceptor + // that claims the property is absent, skip this declaration. + if (lookup.type() != INTERCEPTOR) { + continue; + } PropertyAttributes attributes = global->GetPropertyAttribute(*name); - bool is_read_only = (attributes & READ_ONLY) != 0; - if (lookup.type() == INTERCEPTOR) { - // If the interceptor says the property is there, we - // just return undefined without overwriting the property. - // Otherwise, we continue to setting the property. - if (attributes != ABSENT) { - // Check if the existing property conflicts with regards to const. - if (is_local && (is_read_only || is_const_property)) { - const char* type = (is_read_only) ? "const" : "var"; - return ThrowRedeclarationError(isolate, type, name); - }; - // The property already exists without conflicting: Go to - // the next declaration. - continue; - } - // Fall-through and introduce the absent property by using - // SetProperty. - } else { - // For const properties, we treat a callback with this name - // even in the prototype as a conflicting declaration. - if (is_const_property && (lookup.type() == CALLBACKS)) { - return ThrowRedeclarationError(isolate, "const", name); - } - // Otherwise, we check for locally conflicting declarations. - if (is_local && (is_read_only || is_const_property)) { - const char* type = (is_read_only) ? "const" : "var"; - return ThrowRedeclarationError(isolate, type, name); - } - // The property already exists without conflicting: Go to - // the next declaration. + if (attributes != ABSENT) { continue; } + // Fall-through and introduce the absent property by using + // SetProperty. } } else { is_function_declaration = true; @@ -1267,20 +1270,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) { LookupResult lookup; global->LocalLookup(*name, &lookup); - // There's a local property that we need to overwrite because - // we're either declaring a function or there's an interceptor - // that claims the property is absent. - // - // Check for conflicting re-declarations. We cannot have - // conflicting types in case of intercepted properties because - // they are absent. - if (lookup.IsProperty() && - (lookup.type() != INTERCEPTOR) && - (lookup.IsReadOnly() || is_const_property)) { - const char* type = (lookup.IsReadOnly()) ? "const" : "var"; - return ThrowRedeclarationError(isolate, type, name); - } - // Compute the property attributes. According to ECMA-262, section // 13, page 71, the property must be read-only and // non-deletable. However, neither SpiderMonkey nor KJS creates the @@ -1335,15 +1324,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) { HandleScope scope(isolate); ASSERT(args.length() == 4); - CONVERT_ARG_CHECKED(Context, context, 0); + // Declarations are always made in a function or global context. In the + // case of eval code, the context passed is the context of the caller, + // which may be some nested context and not the declaration context. + RUNTIME_ASSERT(args[0]->IsContext()); + Handle<Context> context(Context::cast(args[0])->declaration_context()); + Handle<String> name(String::cast(args[1])); PropertyAttributes mode = static_cast<PropertyAttributes>(args.smi_at(2)); RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE); Handle<Object> initial_value(args[3], isolate); - // Declarations are always done in a function or global context. - context = Handle<Context>(context->declaration_context()); - int index; PropertyAttributes attributes; ContextLookupFlags flags = DONT_FOLLOW_CHAINS; @@ -1352,9 +1343,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) { context->Lookup(name, flags, &index, &attributes, &binding_flags); if (attributes != ABSENT) { - // The name was declared before; check for conflicting - // re-declarations: This is similar to the code in parser.cc in - // the AstBuildingParser::Declare function. + // The name was declared before; check for conflicting re-declarations. if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) { // Functions are not read-only. ASSERT(mode != READ_ONLY || initial_value->IsTheHole()); @@ -1365,53 +1354,41 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) { // Initialize it if necessary. if (*initial_value != NULL) { if (index >= 0) { - // The variable or constant context slot should always be in - // the function context or the arguments object. - if (holder->IsContext()) { - ASSERT(holder.is_identical_to(context)); - if (((attributes & READ_ONLY) == 0) || - context->get(index)->IsTheHole()) { - context->set(index, *initial_value); - } - } else { - // The holder is an arguments object. - Handle<JSObject> arguments(Handle<JSObject>::cast(holder)); - Handle<Object> result = SetElement(arguments, index, initial_value, - kNonStrictMode); - if (result.is_null()) return Failure::Exception(); + ASSERT(holder.is_identical_to(context)); + if (((attributes & READ_ONLY) == 0) || + context->get(index)->IsTheHole()) { + context->set(index, *initial_value); } } else { - // Slow case: The property is not in the FixedArray part of the context. - Handle<JSObject> context_ext = Handle<JSObject>::cast(holder); + // Slow case: The property is in the context extension object of a + // function context or the global object of a global context. + Handle<JSObject> object = Handle<JSObject>::cast(holder); RETURN_IF_EMPTY_HANDLE( isolate, - SetProperty(context_ext, name, initial_value, - mode, kNonStrictMode)); + SetProperty(object, name, initial_value, mode, kNonStrictMode)); } } } else { // The property is not in the function context. It needs to be - // "declared" in the function context's extension context, or in the - // global context. - Handle<JSObject> context_ext; + // "declared" in the function context's extension context or as a + // property of the the global object. + Handle<JSObject> object; if (context->has_extension()) { - // The function context's extension context exists - use it. - context_ext = Handle<JSObject>(JSObject::cast(context->extension())); + object = Handle<JSObject>(JSObject::cast(context->extension())); } else { - // The function context's extension context does not exists - allocate - // it. - context_ext = isolate->factory()->NewJSObject( + // Context extension objects are allocated lazily. + ASSERT(context->IsFunctionContext()); + object = isolate->factory()->NewJSObject( isolate->context_extension_function()); - // And store it in the extension slot. - context->set_extension(*context_ext); + context->set_extension(*object); } - ASSERT(*context_ext != NULL); + ASSERT(*object != NULL); // Declare the property by setting it to the initial value if provided, // or undefined, and use the correct mode (e.g. READ_ONLY attribute for // constant declarations). - ASSERT(!context_ext->HasLocalProperty(*name)); + ASSERT(!object->HasLocalProperty(*name)); Handle<Object> value(isolate->heap()->undefined_value(), isolate); if (*initial_value != NULL) value = initial_value; // Declaring a const context slot is a conflicting declaration if @@ -1421,15 +1398,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) { // SetProperty and no setters are invoked for those since they are // not real JSObjects. if (initial_value->IsTheHole() && - !context_ext->IsJSContextExtensionObject()) { + !object->IsJSContextExtensionObject()) { LookupResult lookup; - context_ext->Lookup(*name, &lookup); + object->Lookup(*name, &lookup); if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) { return ThrowRedeclarationError(isolate, "const", name); } } RETURN_IF_EMPTY_HANDLE(isolate, - SetProperty(context_ext, name, value, mode, + SetProperty(object, name, value, mode, kNonStrictMode)); } @@ -1465,64 +1442,32 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) { // to assign to the property. // Note that objects can have hidden prototypes, so we need to traverse // the whole chain of hidden prototypes to do a 'local' lookup. - JSObject* real_holder = global; + Object* object = global; LookupResult lookup; - while (true) { - real_holder->LocalLookup(*name, &lookup); - if (lookup.IsProperty()) { - // Determine if this is a redeclaration of something read-only. - if (lookup.IsReadOnly()) { - // If we found readonly property on one of hidden prototypes, - // just shadow it. - if (real_holder != isolate->context()->global()) break; - return ThrowRedeclarationError(isolate, "const", name); - } - - // Determine if this is a redeclaration of an intercepted read-only - // property and figure out if the property exists at all. - bool found = true; - PropertyType type = lookup.type(); - if (type == INTERCEPTOR) { - HandleScope handle_scope(isolate); - Handle<JSObject> holder(real_holder); - PropertyAttributes intercepted = holder->GetPropertyAttribute(*name); - real_holder = *holder; - if (intercepted == ABSENT) { - // The interceptor claims the property isn't there. We need to - // make sure to introduce it. - found = false; - } else if ((intercepted & READ_ONLY) != 0) { - // The property is present, but read-only. Since we're trying to - // overwrite it with a variable declaration we must throw a - // re-declaration error. However if we found readonly property - // on one of hidden prototypes, just shadow it. - if (real_holder != isolate->context()->global()) break; - return ThrowRedeclarationError(isolate, "const", name); + while (object->IsJSObject() && + JSObject::cast(object)->map()->is_hidden_prototype()) { + JSObject* raw_holder = JSObject::cast(object); + raw_holder->LocalLookup(*name, &lookup); + if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) { + HandleScope handle_scope(isolate); + Handle<JSObject> holder(raw_holder); + PropertyAttributes intercepted = holder->GetPropertyAttribute(*name); + // Update the raw pointer in case it's changed due to GC. + raw_holder = *holder; + if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) { + // Found an interceptor that's not read only. + if (assign) { + return raw_holder->SetProperty( + &lookup, *name, args[2], attributes, strict_mode); + } else { + return isolate->heap()->undefined_value(); } } - - if (found && !assign) { - // The global property is there and we're not assigning any value - // to it. Just return. - return isolate->heap()->undefined_value(); - } - - // Assign the value (or undefined) to the property. - Object* value = (assign) ? args[2] : isolate->heap()->undefined_value(); - return real_holder->SetProperty( - &lookup, *name, value, attributes, strict_mode); } - - Object* proto = real_holder->GetPrototype(); - if (!proto->IsJSObject()) - break; - - if (!JSObject::cast(proto)->map()->is_hidden_prototype()) - break; - - real_holder = JSObject::cast(proto); + object = raw_holder->GetPrototype(); } + // Reload global in case the loop above performed a GC. global = isolate->context()->global(); if (assign) { return global->SetProperty(*name, args[2], attributes, strict_mode); @@ -1560,25 +1505,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) { attributes); } - // Determine if this is a redeclaration of something not - // read-only. In case the result is hidden behind an interceptor we - // need to ask it for the property attributes. if (!lookup.IsReadOnly()) { - if (lookup.type() != INTERCEPTOR) { - return ThrowRedeclarationError(isolate, "var", name); - } - - PropertyAttributes intercepted = global->GetPropertyAttribute(*name); - - // Throw re-declaration error if the intercepted property is present - // but not read-only. - if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) { - return ThrowRedeclarationError(isolate, "var", name); - } - // Restore global object from context (in case of GC) and continue - // with setting the value because the property is either absent or - // read-only. We also have to do redo the lookup. + // with setting the value. HandleScope handle_scope(isolate); Handle<GlobalObject> global(isolate->context()->global()); @@ -1595,19 +1524,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) { return *value; } - // Set the value, but only we're assigning the initial value to a + // Set the value, but only if we're assigning the initial value to a // constant. For now, we determine this by checking if the // current value is the hole. - // Strict mode handling not needed (const disallowed in strict mode). + // Strict mode handling not needed (const is disallowed in strict mode). PropertyType type = lookup.type(); if (type == FIELD) { FixedArray* properties = global->properties(); int index = lookup.GetFieldIndex(); - if (properties->get(index)->IsTheHole()) { + if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) { properties->set(index, *value); } } else if (type == NORMAL) { - if (global->GetNormalizedProperty(&lookup)->IsTheHole()) { + if (global->GetNormalizedProperty(&lookup)->IsTheHole() || + !lookup.IsReadOnly()) { global->SetNormalizedProperty(&lookup, *value); } } else { @@ -1627,11 +1557,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) { Handle<Object> value(args[0], isolate); ASSERT(!value->IsTheHole()); - CONVERT_ARG_CHECKED(Context, context, 1); - Handle<String> name(String::cast(args[2])); // Initializations are always done in a function or global context. - context = Handle<Context>(context->declaration_context()); + RUNTIME_ASSERT(args[1]->IsContext()); + Handle<Context> context(Context::cast(args[1])->declaration_context()); + + Handle<String> name(String::cast(args[2])); int index; PropertyAttributes attributes; @@ -1640,39 +1571,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) { Handle<Object> holder = context->Lookup(name, flags, &index, &attributes, &binding_flags); - // In most situations, the property introduced by the const - // declaration should be present in the context extension object. - // However, because declaration and initialization are separate, the - // property might have been deleted (if it was introduced by eval) - // before we reach the initialization point. - // - // Example: - // - // function f() { eval("delete x; const x;"); } - // - // In that case, the initialization behaves like a normal assignment - // to property 'x'. if (index >= 0) { - if (holder->IsContext()) { - // Property was found in a context. Perform the assignment if we - // found some non-constant or an uninitialized constant. - Handle<Context> context = Handle<Context>::cast(holder); - if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) { - context->set(index, *value); - } - } else { - // The holder is an arguments object. - ASSERT((attributes & READ_ONLY) == 0); - Handle<JSObject> arguments(Handle<JSObject>::cast(holder)); - RETURN_IF_EMPTY_HANDLE( - isolate, - SetElement(arguments, index, value, kNonStrictMode)); + ASSERT(holder->IsContext()); + // Property was found in a context. Perform the assignment if we + // found some non-constant or an uninitialized constant. + Handle<Context> context = Handle<Context>::cast(holder); + if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) { + context->set(index, *value); } return *value; } - // The property could not be found, we introduce it in the global - // context. + // The property could not be found, we introduce it as a property of the + // global object. if (attributes == ABSENT) { Handle<JSObject> global = Handle<JSObject>( isolate->context()->global()); @@ -1683,29 +1594,41 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) { return *value; } - // The property was present in a context extension object. - Handle<JSObject> context_ext = Handle<JSObject>::cast(holder); + // The property was present in some function's context extension object, + // as a property on the subject of a with, or as a property of the global + // object. + // + // In most situations, eval-introduced consts should still be present in + // the context extension object. However, because declaration and + // initialization are separate, the property might have been deleted + // before we reach the initialization point. + // + // Example: + // + // function f() { eval("delete x; const x;"); } + // + // In that case, the initialization behaves like a normal assignment. + Handle<JSObject> object = Handle<JSObject>::cast(holder); - if (*context_ext == context->extension()) { - // This is the property that was introduced by the const - // declaration. Set it if it hasn't been set before. NOTE: We - // cannot use GetProperty() to get the current value as it - // 'unholes' the value. + if (*object == context->extension()) { + // This is the property that was introduced by the const declaration. + // Set it if it hasn't been set before. NOTE: We cannot use + // GetProperty() to get the current value as it 'unholes' the value. LookupResult lookup; - context_ext->LocalLookupRealNamedProperty(*name, &lookup); + object->LocalLookupRealNamedProperty(*name, &lookup); ASSERT(lookup.IsProperty()); // the property was declared ASSERT(lookup.IsReadOnly()); // and it was declared as read-only PropertyType type = lookup.type(); if (type == FIELD) { - FixedArray* properties = context_ext->properties(); + FixedArray* properties = object->properties(); int index = lookup.GetFieldIndex(); if (properties->get(index)->IsTheHole()) { properties->set(index, *value); } } else if (type == NORMAL) { - if (context_ext->GetNormalizedProperty(&lookup)->IsTheHole()) { - context_ext->SetNormalizedProperty(&lookup, *value); + if (object->GetNormalizedProperty(&lookup)->IsTheHole()) { + object->SetNormalizedProperty(&lookup, *value); } } else { // We should not reach here. Any real, named property should be @@ -1713,13 +1636,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) { UNREACHABLE(); } } else { - // The property was found in a different context extension object. - // Set it if it is not a read-only property. + // The property was found on some other object. Set it if it is not a + // read-only property. if ((attributes & READ_ONLY) == 0) { // Strict mode not needed (const disallowed in strict mode). RETURN_IF_EMPTY_HANDLE( isolate, - SetProperty(context_ext, name, value, attributes, kNonStrictMode)); + SetProperty(object, name, value, attributes, kNonStrictMode)); } } @@ -1740,6 +1663,19 @@ RUNTIME_FUNCTION(MaybeObject*, } +RUNTIME_FUNCTION(MaybeObject*, Runtime_NonSmiElementStored) { + ASSERT(args.length() == 1); + CONVERT_ARG_CHECKED(JSObject, object, 0); + if (object->HasFastSmiOnlyElements()) { + MaybeObject* maybe_map = object->GetElementsTransitionMap(FAST_ELEMENTS); + Map* map; + if (!maybe_map->To<Map>(&map)) return maybe_map; + object->set_map(Map::cast(map)); + } + return *object; +} + + RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) { HandleScope scope(isolate); ASSERT(args.length() == 4); @@ -1825,7 +1761,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) { regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, multiline); regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex, Smi::FromInt(0), - SKIP_WRITE_BARRIER); + SKIP_WRITE_BARRIER); // It's a Smi. return regexp; } @@ -2239,9 +2175,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) { literals->set(JSFunction::kLiteralGlobalContextIndex, context->global_context()); } - // It's okay to skip the write barrier here because the literals - // are guaranteed to be in old space. - target->set_literals(*literals, SKIP_WRITE_BARRIER); + target->set_literals(*literals); target->set_next_function_link(isolate->heap()->undefined_value()); if (isolate->logger()->is_logging() || CpuProfiler::is_profiling(isolate)) { @@ -2325,7 +2259,8 @@ class FixedArrayBuilder { public: explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity) : array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)), - length_(0) { + length_(0), + has_non_smi_elements_(false) { // Require a non-zero initial size. Ensures that doubling the size to // extend the array will work. ASSERT(initial_capacity > 0); @@ -2333,7 +2268,8 @@ class FixedArrayBuilder { explicit FixedArrayBuilder(Handle<FixedArray> backing_store) : array_(backing_store), - length_(0) { + length_(0), + has_non_smi_elements_(false) { // Require a non-zero initial size. Ensures that doubling the size to // extend the array will work. ASSERT(backing_store->length() > 0); @@ -2361,12 +2297,15 @@ class FixedArrayBuilder { } void Add(Object* value) { + ASSERT(!value->IsSmi()); ASSERT(length_ < capacity()); array_->set(length_, value); length_++; + has_non_smi_elements_ = true; } void Add(Smi* value) { + ASSERT(value->IsSmi()); ASSERT(length_ < capacity()); array_->set(length_, value); length_++; @@ -2391,7 +2330,7 @@ class FixedArrayBuilder { } Handle<JSArray> ToJSArray(Handle<JSArray> target_array) { - target_array->set_elements(*array_); + FACTORY->SetContent(target_array, array_); target_array->set_length(Smi::FromInt(length_)); return target_array; } @@ -2399,6 +2338,7 @@ class FixedArrayBuilder { private: Handle<FixedArray> array_; int length_; + bool has_non_smi_elements_; }; @@ -2893,7 +2833,7 @@ void FindStringIndicesDispatch(Isolate* isolate, } } else { Vector<const uc16> subject_vector = subject_content.ToUC16Vector(); - if (pattern->IsAsciiRepresentation()) { + if (pattern_content.IsAscii()) { FindStringIndices(isolate, subject_vector, pattern_content.ToAsciiVector(), @@ -3019,7 +2959,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString( // Shortcut for simple non-regexp global replacements if (is_global && - regexp->TypeTag() == JSRegExp::ATOM && + regexp_handle->TypeTag() == JSRegExp::ATOM && compiled_replacement.simple_hint()) { if (subject_handle->HasOnlyAsciiChars() && replacement_handle->HasOnlyAsciiChars()) { @@ -3242,6 +3182,9 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString( Address end_of_string = answer->address() + string_size; isolate->heap()->CreateFillerObjectAt(end_of_string, delta); + if (Marking::IsBlack(Marking::MarkBitFrom(*answer))) { + MemoryChunk::IncrementLiveBytes(answer->address(), -delta); + } return *answer; } @@ -4001,13 +3944,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) { // Slow case. CONVERT_DOUBLE_ARG_CHECKED(value, 0); if (isnan(value)) { - return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN")); + return *isolate->factory()->nan_symbol(); } if (isinf(value)) { if (value < 0) { - return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity")); + return *isolate->factory()->minus_infinity_symbol(); } - return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity")); + return *isolate->factory()->infinity_symbol(); } char* str = DoubleToRadixCString(value, radix); MaybeObject* result = @@ -4023,13 +3966,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) { CONVERT_DOUBLE_ARG_CHECKED(value, 0); if (isnan(value)) { - return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN")); + return *isolate->factory()->nan_symbol(); } if (isinf(value)) { if (value < 0) { - return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity")); + return *isolate->factory()->minus_infinity_symbol(); } - return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity")); + return *isolate->factory()->infinity_symbol(); } CONVERT_DOUBLE_ARG_CHECKED(f_number, 1); int f = FastD2I(f_number); @@ -4048,13 +3991,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) { CONVERT_DOUBLE_ARG_CHECKED(value, 0); if (isnan(value)) { - return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN")); + return *isolate->factory()->nan_symbol(); } if (isinf(value)) { if (value < 0) { - return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity")); + return *isolate->factory()->minus_infinity_symbol(); } - return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity")); + return *isolate->factory()->infinity_symbol(); } CONVERT_DOUBLE_ARG_CHECKED(f_number, 1); int f = FastD2I(f_number); @@ -4073,13 +4016,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) { CONVERT_DOUBLE_ARG_CHECKED(value, 0); if (isnan(value)) { - return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN")); + return *isolate->factory()->nan_symbol(); } if (isinf(value)) { if (value < 0) { - return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity")); + return *isolate->factory()->minus_infinity_symbol(); } - return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity")); + return *isolate->factory()->infinity_symbol(); } CONVERT_DOUBLE_ARG_CHECKED(f_number, 1); int f = FastD2I(f_number); @@ -4269,7 +4212,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) { CONVERT_CHECKED(String, name, args[1]); CONVERT_CHECKED(Smi, flag_setter, args[2]); Object* fun = args[3]; - RUNTIME_ASSERT(fun->IsJSFunction() || fun->IsUndefined()); + RUNTIME_ASSERT(fun->IsSpecFunction() || fun->IsUndefined()); CONVERT_CHECKED(Smi, flag_attr, args[4]); int unchecked = flag_attr->value(); RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); @@ -4437,6 +4380,14 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate, return isolate->Throw(*error); } + if (object->IsJSProxy()) { + bool has_pending_exception = false; + Handle<Object> name = Execution::ToString(key, &has_pending_exception); + if (has_pending_exception) return Failure::Exception(); + return JSProxy::cast(*object)->SetProperty( + String::cast(*name), *value, attr, strict_mode); + } + // If the object isn't a JavaScript object, we ignore the store. if (!object->IsJSObject()) return *value; @@ -4556,7 +4507,7 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate, // Check if the given key is an array index. uint32_t index; - if (receiver->IsJSObject() && key->ToArrayIndex(&index)) { + if (key->ToArrayIndex(&index)) { // In Firefox/SpiderMonkey, Safari and Opera you can access the // characters of a string using [] notation. In the case of a // String object we just need to redirect the deletion to the @@ -4567,8 +4518,7 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate, return isolate->heap()->true_value(); } - return JSObject::cast(*receiver)->DeleteElement( - index, JSReceiver::FORCE_DELETION); + return receiver->DeleteElement(index, JSReceiver::FORCE_DELETION); } Handle<String> key_string; @@ -4730,29 +4680,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) { RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) { NoHandleAllocation na; ASSERT(args.length() == 2); + CONVERT_CHECKED(JSReceiver, receiver, args[0]); + CONVERT_CHECKED(String, key, args[1]); - // Only JS receivers can have properties. - if (args[0]->IsJSReceiver()) { - JSReceiver* receiver = JSReceiver::cast(args[0]); - CONVERT_CHECKED(String, key, args[1]); - if (receiver->HasProperty(key)) return isolate->heap()->true_value(); - } - return isolate->heap()->false_value(); + bool result = receiver->HasProperty(key); + if (isolate->has_pending_exception()) return Failure::Exception(); + return isolate->heap()->ToBoolean(result); } RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) { NoHandleAllocation na; ASSERT(args.length() == 2); + CONVERT_CHECKED(JSReceiver, receiver, args[0]); + CONVERT_CHECKED(Smi, index, args[1]); - // Only JS objects can have elements. - if (args[0]->IsJSObject()) { - JSObject* object = JSObject::cast(args[0]); - CONVERT_CHECKED(Smi, index_obj, args[1]); - uint32_t index = index_obj->value(); - if (object->HasElement(index)) return isolate->heap()->true_value(); - } - return isolate->heap()->false_value(); + bool result = receiver->HasElement(index->value()); + if (isolate->has_pending_exception()) return Failure::Exception(); + return isolate->heap()->ToBoolean(result); } @@ -4765,7 +4710,37 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) { uint32_t index; if (key->AsArrayIndex(&index)) { - return isolate->heap()->ToBoolean(object->HasElement(index)); + JSObject::LocalElementType type = object->HasLocalElement(index); + switch (type) { + case JSObject::UNDEFINED_ELEMENT: + case JSObject::STRING_CHARACTER_ELEMENT: + return isolate->heap()->false_value(); + case JSObject::INTERCEPTED_ELEMENT: + case JSObject::FAST_ELEMENT: + return isolate->heap()->true_value(); + case JSObject::DICTIONARY_ELEMENT: { + if (object->IsJSGlobalProxy()) { + Object* proto = object->GetPrototype(); + if (proto->IsNull()) { + return isolate->heap()->false_value(); + } + ASSERT(proto->IsJSGlobalObject()); + object = JSObject::cast(proto); + } + FixedArray* elements = FixedArray::cast(object->elements()); + NumberDictionary* dictionary = NULL; + if (elements->map() == + isolate->heap()->non_strict_arguments_elements_map()) { + dictionary = NumberDictionary::cast(elements->get(1)); + } else { + dictionary = NumberDictionary::cast(elements); + } + int entry = dictionary->FindEntry(index); + ASSERT(entry != NumberDictionary::kNotFound); + PropertyDetails details = dictionary->DetailsAt(entry); + return isolate->heap()->ToBoolean(!details.IsDontEnum()); + } + } } PropertyAttributes att = object->GetLocalPropertyAttribute(key); @@ -5579,7 +5554,7 @@ static MaybeObject* SlowQuoteJsonString(Isolate* isolate, StringType* new_string = StringType::cast(new_object); Char* write_cursor = reinterpret_cast<Char*>( - new_string->address() + SeqAsciiString::kHeaderSize); + new_string->address() + SeqString::kHeaderSize); if (comma) *(write_cursor++) = ','; *(write_cursor++) = '"'; @@ -5667,16 +5642,15 @@ static MaybeObject* QuoteJsonString(Isolate* isolate, StringType* new_string = StringType::cast(new_object); ASSERT(isolate->heap()->new_space()->Contains(new_string)); - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); Char* write_cursor = reinterpret_cast<Char*>( - new_string->address() + SeqAsciiString::kHeaderSize); + new_string->address() + SeqString::kHeaderSize); if (comma) *(write_cursor++) = ','; write_cursor = WriteQuoteJsonString<Char, Char>(isolate, write_cursor, characters); int final_length = static_cast<int>( write_cursor - reinterpret_cast<Char*>( - new_string->address() + SeqAsciiString::kHeaderSize)); + new_string->address() + SeqString::kHeaderSize)); isolate->heap()->new_space()-> template ShrinkStringAtAllocationBoundary<StringType>( new_string, final_length); @@ -5754,9 +5728,8 @@ static MaybeObject* QuoteJsonStringArray(Isolate* isolate, StringType* new_string = StringType::cast(new_object); ASSERT(isolate->heap()->new_space()->Contains(new_string)); - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); Char* write_cursor = reinterpret_cast<Char*>( - new_string->address() + SeqAsciiString::kHeaderSize); + new_string->address() + SeqString::kHeaderSize); *(write_cursor++) = '['; for (int i = 0; i < length; i++) { if (i != 0) *(write_cursor++) = ','; @@ -5777,7 +5750,7 @@ static MaybeObject* QuoteJsonStringArray(Isolate* isolate, int final_length = static_cast<int>( write_cursor - reinterpret_cast<Char*>( - new_string->address() + SeqAsciiString::kHeaderSize)); + new_string->address() + SeqString::kHeaderSize)); isolate->heap()->new_space()-> template ShrinkStringAtAllocationBoundary<StringType>( new_string, final_length); @@ -6146,7 +6119,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) { static inline bool IsTrimWhiteSpace(unibrow::uchar c) { - return unibrow::WhiteSpace::Is(c) || c == 0x200b; + return unibrow::WhiteSpace::Is(c) || c == 0x200b || c == 0xfeff; } @@ -6229,6 +6202,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) { int part_count = indices.length(); Handle<JSArray> result = isolate->factory()->NewJSArray(part_count); + MaybeObject* maybe_result = result->EnsureCanContainNonSmiElements(); + if (maybe_result->IsFailure()) return maybe_result; result->set_length(Smi::FromInt(part_count)); ASSERT(result->HasFastElements()); @@ -6275,11 +6250,11 @@ static int CopyCachedAsciiCharsToArray(Heap* heap, FixedArray* ascii_cache = heap->single_character_string_cache(); Object* undefined = heap->undefined_value(); int i; + WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc); for (i = 0; i < length; ++i) { Object* value = ascii_cache->get(chars[i]); if (value == undefined) break; - ASSERT(!heap->InNewSpace(value)); - elements->set(i, value, SKIP_WRITE_BARRIER); + elements->set(i, value, mode); } if (i < length) { ASSERT(Smi::FromInt(0) == 0); @@ -6603,6 +6578,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) { // This assumption is used by the slice encoding in one or two smis. ASSERT(Smi::kMaxValue >= String::kMaxLength); + MaybeObject* maybe_result = array->EnsureCanContainNonSmiElements(); + if (maybe_result->IsFailure()) return maybe_result; + int special_length = special->length(); if (!array->HasFastElements()) { return isolate->Throw(isolate->heap()->illegal_argument_symbol()); @@ -6830,7 +6808,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) { NoHandleAllocation ha; ASSERT(args.length() == 3); CONVERT_CHECKED(JSArray, elements_array, args[0]); - RUNTIME_ASSERT(elements_array->HasFastElements()); + RUNTIME_ASSERT(elements_array->HasFastElements() || + elements_array->HasFastSmiOnlyElements()); CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]); CONVERT_CHECKED(String, separator, args[2]); // elements_array is fast-mode JSarray of alternating positions @@ -7947,8 +7926,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) { } -static SmartArrayPointer<Object**> GetNonBoundArguments(int bound_argc, - int* total_argc) { +static SmartArrayPointer<Handle<Object> > GetNonBoundArguments( + int bound_argc, + int* total_argc) { // Find frame containing arguments passed to the caller. JavaScriptFrameIterator it; JavaScriptFrame* frame = it.frame(); @@ -7964,10 +7944,11 @@ static SmartArrayPointer<Object**> GetNonBoundArguments(int bound_argc, &args_slots); *total_argc = bound_argc + args_count; - SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc)); + SmartArrayPointer<Handle<Object> > param_data( + NewArray<Handle<Object> >(*total_argc)); for (int i = 0; i < args_count; i++) { Handle<Object> val = args_slots[i].GetValue(); - param_data[bound_argc + i] = val.location(); + param_data[bound_argc + i] = val; } return param_data; } else { @@ -7976,10 +7957,11 @@ static SmartArrayPointer<Object**> GetNonBoundArguments(int bound_argc, int args_count = frame->ComputeParametersCount(); *total_argc = bound_argc + args_count; - SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc)); + SmartArrayPointer<Handle<Object> > param_data( + NewArray<Handle<Object> >(*total_argc)); for (int i = 0; i < args_count; i++) { Handle<Object> val = Handle<Object>(frame->GetParameter(i)); - param_data[bound_argc + i] = val.location(); + param_data[bound_argc + i] = val; } return param_data; } @@ -7997,17 +7979,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) { int bound_argc = 0; if (!args[1]->IsNull()) { CONVERT_ARG_CHECKED(JSArray, params, 1); - RUNTIME_ASSERT(params->HasFastElements()); + RUNTIME_ASSERT(params->HasFastTypeElements()); bound_args = Handle<FixedArray>(FixedArray::cast(params->elements())); bound_argc = Smi::cast(params->length())->value(); } int total_argc = 0; - SmartArrayPointer<Object**> param_data = + SmartArrayPointer<Handle<Object> > param_data = GetNonBoundArguments(bound_argc, &total_argc); for (int i = 0; i < bound_argc; i++) { Handle<Object> val = Handle<Object>(bound_args->get(i)); - param_data[i] = val.location(); + param_data[i] = val; } bool exception = false; @@ -8196,6 +8178,31 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) { } +class ActivationsFinder : public ThreadVisitor { + public: + explicit ActivationsFinder(JSFunction* function) + : function_(function), has_activations_(false) {} + + void VisitThread(Isolate* isolate, ThreadLocalTop* top) { + if (has_activations_) return; + + for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) { + JavaScriptFrame* frame = it.frame(); + if (frame->is_optimized() && frame->function() == function_) { + has_activations_ = true; + return; + } + } + } + + bool has_activations() { return has_activations_; } + + private: + JSFunction* function_; + bool has_activations_; +}; + + RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) { HandleScope scope(isolate); ASSERT(args.length() == 1); @@ -8242,17 +8249,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) { return isolate->heap()->undefined_value(); } - // Count the number of optimized activations of the function. - int activations = 0; + // Find other optimized activations of the function. + bool has_other_activations = false; while (!it.done()) { JavaScriptFrame* frame = it.frame(); if (frame->is_optimized() && frame->function() == *function) { - activations++; + has_other_activations = true; + break; } it.Advance(); } - if (activations == 0) { + if (!has_other_activations) { + ActivationsFinder activations_finder(*function); + isolate->thread_manager()->IterateArchivedThreads(&activations_finder); + has_other_activations = activations_finder.has_activations(); + } + + if (!has_other_activations) { if (FLAG_trace_deopt) { PrintF("[removing optimized code for: "); function->PrintName(); @@ -8307,6 +8321,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) { HandleScope scope(isolate); ASSERT(args.length() == 1); + // The least significant bit (after untagging) indicates whether the + // function is currently optimized, regardless of reason. if (!V8::UseCrankshaft()) { return Smi::FromInt(4); // 4 == "never". } @@ -8479,11 +8495,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) { argv[i] = Handle<Object>(object); } - bool threw = false; + bool threw; Handle<JSReceiver> hfun(fun); Handle<Object> hreceiver(receiver); - Handle<Object> result = Execution::Call( - hfun, hreceiver, argc, reinterpret_cast<Object***>(argv), &threw, true); + Handle<Object> result = + Execution::Call(hfun, hreceiver, argc, argv, &threw, true); if (threw) return Failure::Exception(); return *result; @@ -8646,18 +8662,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) { } // The slot was found in a JSObject, either a context extension object, - // the global object, or an arguments object. Try to delete it - // (respecting DONT_DELETE). For consistency with V8's usual behavior, - // which allows deleting all parameters in functions that mention - // 'arguments', we do this even for the case of slots found on an - // arguments object. The slot was found on an arguments object if the - // index is non-negative. + // the global object, or the subject of a with. Try to delete it + // (respecting DONT_DELETE). Handle<JSObject> object = Handle<JSObject>::cast(holder); - if (index >= 0) { - return object->DeleteElement(index, JSReceiver::NORMAL_DELETION); - } else { - return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION); - } + return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION); } @@ -8742,24 +8750,19 @@ static ObjectPair LoadContextSlotHelper(Arguments args, &attributes, &binding_flags); - // If the index is non-negative, the slot has been found in a local - // variable or a parameter. Read it from the context object or the - // arguments object. + // If the index is non-negative, the slot has been found in a context. if (index >= 0) { - // If the "property" we were looking for is a local variable or an - // argument in a context, the receiver is the global object; see - // ECMA-262, 3rd., 10.1.6 and 10.2.3. + ASSERT(holder->IsContext()); + // If the "property" we were looking for is a local variable, the + // receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3. // - // Use the hole as the receiver to signal that the receiver is - // implicit and that the global receiver should be used. + // Use the hole as the receiver to signal that the receiver is implicit + // and that the global receiver should be used (as distinguished from an + // explicit receiver that happens to be a global object). Handle<Object> receiver = isolate->factory()->the_hole_value(); - MaybeObject* value = (holder->IsContext()) - ? Context::cast(*holder)->get(index) - : JSObject::cast(*holder)->GetElement(index); + Object* value = Context::cast(*holder)->get(index); // Check for uninitialized bindings. - if (holder->IsContext() && - binding_flags == MUTABLE_CHECK_INITIALIZED && - value->IsTheHole()) { + if (binding_flags == MUTABLE_CHECK_INITIALIZED && value->IsTheHole()) { Handle<Object> reference_error = isolate->factory()->NewReferenceError("not_defined", HandleVector(&name, 1)); @@ -8769,25 +8772,18 @@ static ObjectPair LoadContextSlotHelper(Arguments args, } } - // If the holder is found, we read the property from it. - if (!holder.is_null() && holder->IsJSObject()) { - ASSERT(Handle<JSObject>::cast(holder)->HasProperty(*name)); - JSObject* object = JSObject::cast(*holder); - Object* receiver; - if (object->IsGlobalObject()) { - receiver = GlobalObject::cast(object)->global_receiver(); - } else if (context->is_exception_holder(*holder)) { - // Use the hole as the receiver to signal that the receiver is - // implicit and that the global receiver should be used. - receiver = isolate->heap()->the_hole_value(); - } else { - receiver = ComputeReceiverForNonGlobal(isolate, object); - } - + // Otherwise, if the slot was found the holder is a context extension + // object, subject of a with, or a global object. We read the named + // property from it. + if (!holder.is_null()) { + Handle<JSObject> object = Handle<JSObject>::cast(holder); + ASSERT(object->HasProperty(*name)); // GetProperty below can cause GC. - Handle<Object> receiver_handle(receiver); + Handle<Object> receiver_handle(object->IsGlobalObject() + ? GlobalObject::cast(*object)->global_receiver() + : ComputeReceiverForNonGlobal(isolate, *object)); - // No need to unhole the value here. This is taken care of by the + // No need to unhole the value here. This is taken care of by the // GetProperty function. MaybeObject* value = object->GetProperty(*name); return MakePair(value, *receiver_handle); @@ -8840,45 +8836,37 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) { &binding_flags); if (index >= 0) { - if (holder->IsContext()) { - Handle<Context> context = Handle<Context>::cast(holder); - if (binding_flags == MUTABLE_CHECK_INITIALIZED && - context->get(index)->IsTheHole()) { - Handle<Object> error = - isolate->factory()->NewReferenceError("not_defined", - HandleVector(&name, 1)); - return isolate->Throw(*error); - } - // Ignore if read_only variable. - if ((attributes & READ_ONLY) == 0) { - // Context is a fixed array and set cannot fail. - context->set(index, *value); - } else if (strict_mode == kStrictMode) { - // Setting read only property in strict mode. - Handle<Object> error = - isolate->factory()->NewTypeError("strict_cannot_assign", - HandleVector(&name, 1)); - return isolate->Throw(*error); - } - } else { - ASSERT((attributes & READ_ONLY) == 0); - Handle<Object> result = - SetElement(Handle<JSObject>::cast(holder), index, value, strict_mode); - if (result.is_null()) { - ASSERT(isolate->has_pending_exception()); - return Failure::Exception(); - } + // The property was found in a context slot. + Handle<Context> context = Handle<Context>::cast(holder); + if (binding_flags == MUTABLE_CHECK_INITIALIZED && + context->get(index)->IsTheHole()) { + Handle<Object> error = + isolate->factory()->NewReferenceError("not_defined", + HandleVector(&name, 1)); + return isolate->Throw(*error); + } + // Ignore if read_only variable. + if ((attributes & READ_ONLY) == 0) { + // Context is a fixed array and set cannot fail. + context->set(index, *value); + } else if (strict_mode == kStrictMode) { + // Setting read only property in strict mode. + Handle<Object> error = + isolate->factory()->NewTypeError("strict_cannot_assign", + HandleVector(&name, 1)); + return isolate->Throw(*error); } return *value; } - // Slow case: The property is not in a FixedArray context. - // It is either in an JSObject extension context or it was not found. - Handle<JSObject> context_ext; + // Slow case: The property is not in a context slot. It is either in a + // context extension object, a property of the subject of a with, or a + // property of the global object. + Handle<JSObject> object; if (!holder.is_null()) { - // The property exists in the extension context. - context_ext = Handle<JSObject>::cast(holder); + // The property exists on the holder. + object = Handle<JSObject>::cast(holder); } else { // The property was not found. ASSERT(attributes == ABSENT); @@ -8886,22 +8874,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) { if (strict_mode == kStrictMode) { // Throw in strict mode (assignment to undefined variable). Handle<Object> error = - isolate->factory()->NewReferenceError( - "not_defined", HandleVector(&name, 1)); + isolate->factory()->NewReferenceError( + "not_defined", HandleVector(&name, 1)); return isolate->Throw(*error); } - // In non-strict mode, the property is stored in the global context. + // In non-strict mode, the property is added to the global object. attributes = NONE; - context_ext = Handle<JSObject>(isolate->context()->global()); + object = Handle<JSObject>(isolate->context()->global()); } - // Set the property, but ignore if read_only variable on the context - // extension object itself. + // Set the property if it's not read only or doesn't yet exist. if ((attributes & READ_ONLY) == 0 || - (context_ext->GetLocalPropertyAttribute(*name) == ABSENT)) { + (object->GetLocalPropertyAttribute(*name) == ABSENT)) { RETURN_IF_EMPTY_HANDLE( isolate, - SetProperty(context_ext, name, value, NONE, strict_mode)); + SetProperty(object, name, value, NONE, strict_mode)); } else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) { // Setting read only property in strict mode. Handle<Object> error = @@ -9121,6 +9108,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) { FlattenString(str); CONVERT_ARG_CHECKED(JSArray, output, 1); + + MaybeObject* maybe_result_array = + output->EnsureCanContainNonSmiElements(); + if (maybe_result_array->IsFailure()) return maybe_result_array; RUNTIME_ASSERT(output->HasFastElements()); AssertNoAllocation no_allocation; @@ -9306,6 +9297,9 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) { PropertyAttributes attributes = ABSENT; BindingFlags binding_flags; while (true) { + // Don't follow context chains in Context::Lookup and implement the loop + // up the context chain here, so that we can know the context where eval + // was found. receiver = context->Lookup(isolate->factory()->eval_symbol(), FOLLOW_PROTOTYPE_CHAIN, &index, @@ -9421,7 +9415,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) { ASSERT(args.length() == 2); CONVERT_CHECKED(JSArray, array, args[0]); CONVERT_CHECKED(JSObject, element, args[1]); - RUNTIME_ASSERT(array->HasFastElements()); + RUNTIME_ASSERT(array->HasFastElements() || array->HasFastSmiOnlyElements()); int length = Smi::cast(array->length())->value(); FixedArray* elements = FixedArray::cast(array->elements()); for (int i = 0; i < length; i++) { @@ -9504,9 +9498,11 @@ class ArrayConcatVisitor { isolate_->factory()->NewNumber(static_cast<double>(index_offset_)); Handle<Map> map; if (fast_elements_) { - map = isolate_->factory()->GetFastElementsMap(Handle<Map>(array->map())); + map = isolate_->factory()->GetElementsTransitionMap(array, + FAST_ELEMENTS); } else { - map = isolate_->factory()->GetSlowElementsMap(Handle<Map>(array->map())); + map = isolate_->factory()->GetElementsTransitionMap(array, + DICTIONARY_ELEMENTS); } array->set_map(*map); array->set_length(*length); @@ -9650,6 +9646,7 @@ static void CollectElementIndices(Handle<JSObject> object, List<uint32_t>* indices) { ElementsKind kind = object->GetElementsKind(); switch (kind) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: { Handle<FixedArray> elements(FixedArray::cast(object->elements())); uint32_t length = static_cast<uint32_t>(elements->length()); @@ -9769,6 +9766,7 @@ static bool IterateElements(Isolate* isolate, ArrayConcatVisitor* visitor) { uint32_t length = static_cast<uint32_t>(receiver->length()->Number()); switch (receiver->GetElementsKind()) { + case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: { // Run through the elements FixedArray and use HasElement and GetElement // to check the prototype for missing elements. @@ -9997,15 +9995,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) { CONVERT_CHECKED(JSArray, to, args[1]); FixedArrayBase* new_elements = from->elements(); MaybeObject* maybe_new_map; + ElementsKind elements_kind; if (new_elements->map() == isolate->heap()->fixed_array_map() || new_elements->map() == isolate->heap()->fixed_cow_array_map()) { - maybe_new_map = to->map()->GetFastElementsMap(); + elements_kind = FAST_ELEMENTS; } else if (new_elements->map() == isolate->heap()->fixed_double_array_map()) { - maybe_new_map = to->map()->GetFastDoubleElementsMap(); + elements_kind = FAST_DOUBLE_ELEMENTS; } else { - maybe_new_map = to->map()->GetSlowElementsMap(); + elements_kind = DICTIONARY_ELEMENTS; } + maybe_new_map = to->GetElementsTransitionMap(elements_kind); Object* new_map; if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map; to->set_map(Map::cast(new_map)); @@ -10090,7 +10090,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) { } return *isolate->factory()->NewJSArrayWithElements(keys); } else { - ASSERT(array->HasFastElements() || array->HasFastDoubleElements()); + ASSERT(array->HasFastElements() || + array->HasFastSmiOnlyElements() || + array->HasFastDoubleElements()); Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2); // -1 means start of array. single_interval->set(0, Smi::FromInt(-1)); @@ -10209,8 +10211,8 @@ static MaybeObject* DebugLookupResultValue(Heap* heap, case CALLBACKS: { Object* structure = result->GetCallbackObject(); if (structure->IsForeign() || structure->IsAccessorInfo()) { - MaybeObject* maybe_value = receiver->GetPropertyWithCallback( - receiver, structure, name, result->holder()); + MaybeObject* maybe_value = result->holder()->GetPropertyWithCallback( + receiver, structure, name); if (!maybe_value->ToObject(&value)) { if (maybe_value->IsRetryAfterGC()) return maybe_value; ASSERT(maybe_value->IsException()); @@ -11460,48 +11462,53 @@ Object* Runtime::FindSharedFunctionInfoInScript(Isolate* isolate, int target_start_position = RelocInfo::kNoPosition; Handle<SharedFunctionInfo> target; while (!done) { - HeapIterator iterator; - for (HeapObject* obj = iterator.next(); - obj != NULL; obj = iterator.next()) { - if (obj->IsSharedFunctionInfo()) { - Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj)); - if (shared->script() == *script) { - // If the SharedFunctionInfo found has the requested script data and - // contains the source position it is a candidate. - int start_position = shared->function_token_position(); - if (start_position == RelocInfo::kNoPosition) { - start_position = shared->start_position(); - } - if (start_position <= position && - position <= shared->end_position()) { - // If there is no candidate or this function is within the current - // candidate this is the new candidate. - if (target.is_null()) { - target_start_position = start_position; - target = shared; - } else { - if (target_start_position == start_position && - shared->end_position() == target->end_position()) { - // If a top-level function contain only one function - // declartion the source for the top-level and the function is - // the same. In that case prefer the non top-level function. - if (!shared->is_toplevel()) { + { // Extra scope for iterator and no-allocation. + isolate->heap()->EnsureHeapIsIterable(); + AssertNoAllocation no_alloc_during_heap_iteration; + HeapIterator iterator; + for (HeapObject* obj = iterator.next(); + obj != NULL; obj = iterator.next()) { + if (obj->IsSharedFunctionInfo()) { + Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj)); + if (shared->script() == *script) { + // If the SharedFunctionInfo found has the requested script data and + // contains the source position it is a candidate. + int start_position = shared->function_token_position(); + if (start_position == RelocInfo::kNoPosition) { + start_position = shared->start_position(); + } + if (start_position <= position && + position <= shared->end_position()) { + // If there is no candidate or this function is within the current + // candidate this is the new candidate. + if (target.is_null()) { + target_start_position = start_position; + target = shared; + } else { + if (target_start_position == start_position && + shared->end_position() == target->end_position()) { + // If a top-level function contain only one function + // declartion the source for the top-level and the + // function is the same. In that case prefer the non + // top-level function. + if (!shared->is_toplevel()) { + target_start_position = start_position; + target = shared; + } + } else if (target_start_position <= start_position && + shared->end_position() <= target->end_position()) { + // This containment check includes equality as a function + // inside a top-level function can share either start or end + // position with the top-level function. target_start_position = start_position; target = shared; } - } else if (target_start_position <= start_position && - shared->end_position() <= target->end_position()) { - // This containment check includes equality as a function inside - // a top-level function can share either start or end position - // with the top-level function. - target_start_position = start_position; - target = shared; } } } } - } - } + } // End for loop. + } // End No allocation scope. if (target.is_null()) { return isolate->heap()->undefined_value(); @@ -11516,7 +11523,7 @@ Object* Runtime::FindSharedFunctionInfoInScript(Isolate* isolate, // functions which might contain the requested source position. CompileLazyShared(target, KEEP_EXCEPTION); } - } + } // End while loop. return *target; } @@ -11882,12 +11889,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) { &sinfo, function_context); // Invoke the evaluation function and return the result. - const int argc = 2; - Object** argv[argc] = { arguments.location(), - Handle<Object>::cast(source).location() }; + Handle<Object> argv[] = { arguments, source }; Handle<Object> result = - Execution::Call(Handle<JSFunction>::cast(evaluation_function), receiver, - argc, argv, &has_pending_exception); + Execution::Call(Handle<JSFunction>::cast(evaluation_function), + receiver, + ARRAY_SIZE(argv), + argv, + &has_pending_exception); if (has_pending_exception) return Failure::Exception(); // Skip the global proxy as it has no properties and always delegates to the @@ -11966,6 +11974,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) { Handle<Object> result = Execution::Call(compiled_function, receiver, 0, NULL, &has_pending_exception); + // Clear the oneshot breakpoints so that the debugger does not step further. + isolate->debug()->ClearStepping(); if (has_pending_exception) return Failure::Exception(); return *result; } @@ -11993,13 +12003,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetLoadedScripts) { // Return result as a JS array. Handle<JSObject> result = isolate->factory()->NewJSObject(isolate->array_function()); - Handle<JSArray>::cast(result)->SetContent(*instances); + isolate->factory()->SetContent(Handle<JSArray>::cast(result), instances); return *result; } // Helper function used by Runtime_DebugReferencedBy below. -static int DebugReferencedBy(JSObject* target, +static int DebugReferencedBy(HeapIterator* iterator, + JSObject* target, Object* instance_filter, int max_references, FixedArray* instances, int instances_size, JSFunction* arguments_function) { @@ -12009,9 +12020,8 @@ static int DebugReferencedBy(JSObject* target, // Iterate the heap. int count = 0; JSObject* last = NULL; - HeapIterator iterator; HeapObject* heap_obj = NULL; - while (((heap_obj = iterator.next()) != NULL) && + while (((heap_obj = iterator->next()) != NULL) && (max_references == 0 || count < max_references)) { // Only look at all JSObjects. if (heap_obj->IsJSObject()) { @@ -12076,7 +12086,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) { ASSERT(args.length() == 3); // First perform a full GC in order to avoid references from dead objects. - isolate->heap()->CollectAllGarbage(false); + isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask); + // The heap iterator reserves the right to do a GC to make the heap iterable. + // Due to the GC above we know it won't need to do that, but it seems cleaner + // to get the heap iterator constructed before we start having unprotected + // Object* locals that are not protected by handles. // Check parameters. CONVERT_CHECKED(JSObject, target, args[0]); @@ -12086,6 +12100,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) { CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]); RUNTIME_ASSERT(max_references >= 0); + // Get the constructor function for context extension and arguments array. JSObject* arguments_boilerplate = isolate->context()->global_context()->arguments_boilerplate(); @@ -12094,7 +12109,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) { // Get the number of referencing objects. int count; - count = DebugReferencedBy(target, instance_filter, max_references, + HeapIterator heap_iterator; + count = DebugReferencedBy(&heap_iterator, + target, instance_filter, max_references, NULL, 0, arguments_function); // Allocate an array to hold the result. @@ -12105,30 +12122,34 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) { FixedArray* instances = FixedArray::cast(object); // Fill the referencing objects. - count = DebugReferencedBy(target, instance_filter, max_references, + // AllocateFixedArray above does not make the heap non-iterable. + ASSERT(HEAP->IsHeapIterable()); + HeapIterator heap_iterator2; + count = DebugReferencedBy(&heap_iterator2, + target, instance_filter, max_references, instances, count, arguments_function); // Return result as JS array. Object* result; - { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject( + MaybeObject* maybe_result = isolate->heap()->AllocateJSObject( isolate->context()->global_context()->array_function()); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - JSArray::cast(result)->SetContent(instances); - return result; + if (!maybe_result->ToObject(&result)) return maybe_result; + return JSArray::cast(result)->SetContent(instances); } // Helper function used by Runtime_DebugConstructedBy below. -static int DebugConstructedBy(JSFunction* constructor, int max_references, - FixedArray* instances, int instances_size) { +static int DebugConstructedBy(HeapIterator* iterator, + JSFunction* constructor, + int max_references, + FixedArray* instances, + int instances_size) { AssertNoAllocation no_alloc; // Iterate the heap. int count = 0; - HeapIterator iterator; HeapObject* heap_obj = NULL; - while (((heap_obj = iterator.next()) != NULL) && + while (((heap_obj = iterator->next()) != NULL) && (max_references == 0 || count < max_references)) { // Only look at all JSObjects. if (heap_obj->IsJSObject()) { @@ -12156,7 +12177,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) { ASSERT(args.length() == 2); // First perform a full GC in order to avoid dead objects. - isolate->heap()->CollectAllGarbage(false); + isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask); // Check parameters. CONVERT_CHECKED(JSFunction, constructor, args[0]); @@ -12165,7 +12186,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) { // Get the number of referencing objects. int count; - count = DebugConstructedBy(constructor, max_references, NULL, 0); + HeapIterator heap_iterator; + count = DebugConstructedBy(&heap_iterator, + constructor, + max_references, + NULL, + 0); // Allocate an array to hold the result. Object* object; @@ -12174,8 +12200,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) { } FixedArray* instances = FixedArray::cast(object); + ASSERT(HEAP->IsHeapIterable()); // Fill the referencing objects. - count = DebugConstructedBy(constructor, max_references, instances, count); + HeapIterator heap_iterator2; + count = DebugConstructedBy(&heap_iterator2, + constructor, + max_references, + instances, + count); // Return result as JS array. Object* result; @@ -12183,8 +12215,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) { isolate->context()->global_context()->array_function()); if (!maybe_result->ToObject(&result)) return maybe_result; } - JSArray::cast(result)->SetContent(instances); - return result; + return JSArray::cast(result)->SetContent(instances); } @@ -12248,14 +12279,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) { } -static int FindSharedFunctionInfosForScript(Script* script, +static int FindSharedFunctionInfosForScript(HeapIterator* iterator, + Script* script, FixedArray* buffer) { AssertNoAllocation no_allocations; - int counter = 0; int buffer_size = buffer->length(); - HeapIterator iterator; - for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { + for (HeapObject* obj = iterator->next(); + obj != NULL; + obj = iterator->next()) { ASSERT(obj != NULL); if (!obj->IsSharedFunctionInfo()) { continue; @@ -12281,16 +12313,30 @@ RUNTIME_FUNCTION(MaybeObject*, HandleScope scope(isolate); CONVERT_CHECKED(JSValue, script_value, args[0]); + Handle<Script> script = Handle<Script>(Script::cast(script_value->value())); const int kBufferSize = 32; Handle<FixedArray> array; array = isolate->factory()->NewFixedArray(kBufferSize); - int number = FindSharedFunctionInfosForScript(*script, *array); + int number; + { + isolate->heap()->EnsureHeapIsIterable(); + AssertNoAllocation no_allocations; + HeapIterator heap_iterator; + Script* scr = *script; + FixedArray* arr = *array; + number = FindSharedFunctionInfosForScript(&heap_iterator, scr, arr); + } if (number > kBufferSize) { array = isolate->factory()->NewFixedArray(number); - FindSharedFunctionInfosForScript(*script, *array); + isolate->heap()->EnsureHeapIsIterable(); + AssertNoAllocation no_allocations; + HeapIterator heap_iterator; + Script* scr = *script; + FixedArray* arr = *array; + FindSharedFunctionInfosForScript(&heap_iterator, scr, arr); } Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(array); @@ -12771,6 +12817,8 @@ static Handle<Object> Runtime_GetScriptFromScriptName( // Scan the heap for Script objects to find the script with the requested // script data. Handle<Script> script; + script_name->GetHeap()->EnsureHeapIsIterable(); + AssertNoAllocation no_allocation_during_heap_iteration; HeapIterator iterator; HeapObject* obj = NULL; while (script.is_null() && ((obj = iterator.next()) != NULL)) { @@ -12982,11 +13030,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) { // TODO(antonm): consider passing a receiver when constructing a cache. Handle<Object> receiver(isolate->global_context()->global()); // This handle is nor shared, nor used later, so it's safe. - Object** argv[] = { key_handle.location() }; - bool pending_exception = false; + Handle<Object> argv[] = { key_handle }; + bool pending_exception; value = Execution::Call(factory, receiver, - 1, + ARRAY_SIZE(argv), argv, &pending_exception); if (pending_exception) return Failure::Exception(); @@ -13139,6 +13187,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) { return isolate->heap()->ToBoolean(obj->Has##Name()); \ } +ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOnlyElements) ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements) ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements) ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements) @@ -13155,6 +13204,14 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalDoubleElements) #undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION + +RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) { + ASSERT(args.length() == 2); + CONVERT_CHECKED(JSObject, obj1, args[0]); + CONVERT_CHECKED(JSObject, obj2, args[1]); + return isolate->heap()->ToBoolean(obj1->map() == obj2->map()); +} + // ---------------------------------------------------------------------------- // Implementation of Runtime @@ -13222,6 +13279,9 @@ void Runtime::PerformGC(Object* result) { Isolate* isolate = Isolate::Current(); Failure* failure = Failure::cast(result); if (failure->IsRetryAfterGC()) { + if (isolate->heap()->new_space()->AddFreshPage()) { + return; + } // Try to do a garbage collection; ignore it if it fails. The C // entry stub will throw an out-of-memory exception in that case. isolate->heap()->CollectGarbage(failure->allocation_space()); @@ -13229,7 +13289,7 @@ void Runtime::PerformGC(Object* result) { // Handle last resort GC and make sure to allow future allocations // to grow the heap without causing GCs (if possible). isolate->counters()->gc_last_resort_from_js()->Increment(); - isolate->heap()->CollectAllGarbage(false); + isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags); } } diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h index 1538b7d84..ed9c2b889 100644 --- a/deps/v8/src/runtime.h +++ b/deps/v8/src/runtime.h @@ -330,6 +330,8 @@ namespace internal { F(InitializeConstContextSlot, 3, 1) \ F(OptimizeObjectForAddingMultipleProperties, 2, 1) \ \ + /* Arrays */ \ + F(NonSmiElementStored, 1, 1) \ /* Debugging */ \ F(DebugPrint, 1, 1) \ F(DebugTrace, 0, 1) \ @@ -354,6 +356,7 @@ namespace internal { F(IS_VAR, 1, 1) \ \ /* expose boolean functions from objects-inl.h */ \ + F(HasFastSmiOnlyElements, 1, 1) \ F(HasFastElements, 1, 1) \ F(HasFastDoubleElements, 1, 1) \ F(HasDictionaryElements, 1, 1) \ @@ -367,6 +370,7 @@ namespace internal { F(HasExternalUnsignedIntElements, 1, 1) \ F(HasExternalFloatElements, 1, 1) \ F(HasExternalDoubleElements, 1, 1) \ + F(HaveSameMap, 2, 1) \ /* profiler */ \ F(ProfilerResume, 0, 1) \ F(ProfilerPause, 0, 1) diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js index 14ff1b69c..a12f6c7b0 100644 --- a/deps/v8/src/runtime.js +++ b/deps/v8/src/runtime.js @@ -355,7 +355,7 @@ function IN(x) { if (!IS_SPEC_OBJECT(x)) { throw %MakeTypeError('invalid_in_operator_use', [this, x]); } - return %_IsNonNegativeSmi(this) && !%IsJSProxy(x) ? + return %_IsNonNegativeSmi(this) ? %HasElement(x, this) : %HasProperty(x, %ToString(this)); } @@ -429,20 +429,10 @@ function CALL_FUNCTION_PROXY() { } -function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR(proxy) { - var arity = %_ArgumentsLength() - 1; +function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR() { + var proxy = this; var trap = %GetConstructTrap(proxy); - var receiver = void 0; - if (!IS_UNDEFINED(trap)) { - trap = %GetCallTrap(proxy); - var proto = proxy.prototype; - if (!IS_SPEC_OBJECT(proto) && proto !== null) { - throw MakeTypeError("proto_object_or_null", [proto]); - } - receiver = new global.Object(); - receiver.__proto__ = proto; - } - return %Apply(trap, this, arguments, 1, arity); + return %Apply(trap, this, arguments, 0, %_ArgumentsLength()); } diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc index 69ea8ae6e..95748f241 100644 --- a/deps/v8/src/scanner.cc +++ b/deps/v8/src/scanner.cc @@ -95,7 +95,7 @@ uc32 Scanner::ScanHexNumber(int expected_length) { JavaScriptScanner::JavaScriptScanner(UnicodeCache* scanner_contants) : Scanner(scanner_contants), octal_pos_(Location::invalid()), - harmony_block_scoping_(false) { } + harmony_scoping_(false) { } void JavaScriptScanner::Initialize(UC16CharacterStream* source) { @@ -872,7 +872,7 @@ uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() { KEYWORD("instanceof", Token::INSTANCEOF) \ KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \ KEYWORD_GROUP('l') \ - KEYWORD("let", harmony_block_scoping \ + KEYWORD("let", harmony_scoping \ ? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \ KEYWORD_GROUP('n') \ KEYWORD("new", Token::NEW) \ @@ -906,7 +906,7 @@ uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() { static Token::Value KeywordOrIdentifierToken(const char* input, int input_length, - bool harmony_block_scoping) { + bool harmony_scoping) { ASSERT(input_length >= 1); const int kMinLength = 2; const int kMaxLength = 10; @@ -982,7 +982,7 @@ Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() { Vector<const char> chars = next_.literal_chars->ascii_literal(); return KeywordOrIdentifierToken(chars.start(), chars.length(), - harmony_block_scoping_); + harmony_scoping_); } return Token::IDENTIFIER; diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h index 16c3a427c..6651c3875 100644 --- a/deps/v8/src/scanner.h +++ b/deps/v8/src/scanner.h @@ -509,11 +509,11 @@ class JavaScriptScanner : public Scanner { // tokens, which is what it is used for. void SeekForward(int pos); - bool HarmonyBlockScoping() const { - return harmony_block_scoping_; + bool HarmonyScoping() const { + return harmony_scoping_; } - void SetHarmonyBlockScoping(bool block_scoping) { - harmony_block_scoping_ = block_scoping; + void SetHarmonyScoping(bool block_scoping) { + harmony_scoping_ = block_scoping; } @@ -556,7 +556,7 @@ class JavaScriptScanner : public Scanner { bool has_multiline_comment_before_next_; // Whether we scan 'let' as a keyword for harmony block scoped // let bindings. - bool harmony_block_scoping_; + bool harmony_scoping_; }; } } // namespace v8::internal diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc index ad31ca47c..1aa51603d 100644 --- a/deps/v8/src/scopeinfo.cc +++ b/deps/v8/src/scopeinfo.cc @@ -138,7 +138,7 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope) ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS == context_modes_.length()); context_slots_.Add(FACTORY->empty_symbol()); - context_modes_.Add(Variable::INTERNAL); + context_modes_.Add(INTERNAL); } } } @@ -216,7 +216,7 @@ static Object** ReadList(Object** p, List<Handle<String>, Allocator >* list) { template <class Allocator> static Object** ReadList(Object** p, List<Handle<String>, Allocator>* list, - List<Variable::Mode, Allocator>* modes) { + List<VariableMode, Allocator>* modes) { ASSERT(list->is_empty()); int n; p = ReadInt(p, &n); @@ -226,7 +226,7 @@ static Object** ReadList(Object** p, p = ReadSymbol(p, &s); p = ReadInt(p, &m); list->Add(s); - modes->Add(static_cast<Variable::Mode>(m)); + modes->Add(static_cast<VariableMode>(m)); } return p; } @@ -285,7 +285,7 @@ static Object** WriteList(Object** p, List<Handle<String>, Allocator >* list) { template <class Allocator> static Object** WriteList(Object** p, List<Handle<String>, Allocator>* list, - List<Variable::Mode, Allocator>* modes) { + List<VariableMode, Allocator>* modes) { const int n = list->length(); p = WriteInt(p, n); for (int i = 0; i < n; i++) { @@ -456,7 +456,7 @@ int SerializedScopeInfo::StackSlotIndex(String* name) { return -1; } -int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) { +int SerializedScopeInfo::ContextSlotIndex(String* name, VariableMode* mode) { ASSERT(name->IsSymbol()); Isolate* isolate = GetIsolate(); int result = isolate->context_slot_cache()->Lookup(this, name, mode); @@ -473,7 +473,7 @@ int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) { ASSERT(((p - p0) & 1) == 0); int v; ReadInt(p + 1, &v); - Variable::Mode mode_value = static_cast<Variable::Mode>(v); + VariableMode mode_value = static_cast<VariableMode>(v); if (mode != NULL) *mode = mode_value; result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS; isolate->context_slot_cache()->Update(this, name, mode_value, result); @@ -482,7 +482,7 @@ int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) { p += 2; } } - isolate->context_slot_cache()->Update(this, name, Variable::INTERNAL, -1); + isolate->context_slot_cache()->Update(this, name, INTERNAL, -1); return -1; } @@ -540,7 +540,7 @@ int ContextSlotCache::Hash(Object* data, String* name) { int ContextSlotCache::Lookup(Object* data, String* name, - Variable::Mode* mode) { + VariableMode* mode) { int index = Hash(data, name); Key& key = keys_[index]; if ((key.data == data) && key.name->Equals(name)) { @@ -554,7 +554,7 @@ int ContextSlotCache::Lookup(Object* data, void ContextSlotCache::Update(Object* data, String* name, - Variable::Mode mode, + VariableMode mode, int slot_index) { String* symbol; ASSERT(slot_index > kNotFound); @@ -581,7 +581,7 @@ void ContextSlotCache::Clear() { void ContextSlotCache::ValidateEntry(Object* data, String* name, - Variable::Mode mode, + VariableMode mode, int slot_index) { String* symbol; if (HEAP->LookupSymbolIfExists(name, &symbol)) { diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h index 40c5c8a68..03f321be7 100644 --- a/deps/v8/src/scopeinfo.h +++ b/deps/v8/src/scopeinfo.h @@ -97,70 +97,7 @@ class ScopeInfo BASE_EMBEDDED { List<Handle<String>, Allocator > parameters_; List<Handle<String>, Allocator > stack_slots_; List<Handle<String>, Allocator > context_slots_; - List<Variable::Mode, Allocator > context_modes_; -}; - - -// This object provides quick access to scope info details for runtime -// routines w/o the need to explicitly create a ScopeInfo object. -class SerializedScopeInfo : public FixedArray { - public : - - static SerializedScopeInfo* cast(Object* object) { - ASSERT(object->IsSerializedScopeInfo()); - return reinterpret_cast<SerializedScopeInfo*>(object); - } - - // Does this scope call eval? - bool CallsEval(); - - // Is this scope a strict mode scope? - bool IsStrictMode(); - - // Return the number of stack slots for code. - int NumberOfStackSlots(); - - // Return the number of context slots for code. - int NumberOfContextSlots(); - - // Return if this has context slots besides MIN_CONTEXT_SLOTS; - bool HasHeapAllocatedLocals(); - - // Lookup support for serialized scope info. Returns the - // the stack slot index for a given slot name if the slot is - // present; otherwise returns a value < 0. The name must be a symbol - // (canonicalized). - int StackSlotIndex(String* name); - - // Lookup support for serialized scope info. Returns the - // context slot index for a given slot name if the slot is present; otherwise - // returns a value < 0. The name must be a symbol (canonicalized). - // If the slot is present and mode != NULL, sets *mode to the corresponding - // mode for that variable. - int ContextSlotIndex(String* name, Variable::Mode* mode); - - // Lookup support for serialized scope info. Returns the - // parameter index for a given parameter name if the parameter is present; - // otherwise returns a value < 0. The name must be a symbol (canonicalized). - int ParameterIndex(String* name); - - // Lookup support for serialized scope info. Returns the - // function context slot index if the function name is present (named - // function expressions, only), otherwise returns a value < 0. The name - // must be a symbol (canonicalized). - int FunctionContextSlotIndex(String* name); - - static Handle<SerializedScopeInfo> Create(Scope* scope); - - // Serializes empty scope info. - static SerializedScopeInfo* Empty(); - - private: - inline Object** ContextEntriesAddr(); - - inline Object** ParameterEntriesAddr(); - - inline Object** StackSlotEntriesAddr(); + List<VariableMode, Allocator > context_modes_; }; @@ -174,12 +111,12 @@ class ContextSlotCache { // If absent, kNotFound is returned. int Lookup(Object* data, String* name, - Variable::Mode* mode); + VariableMode* mode); // Update an element in the cache. void Update(Object* data, String* name, - Variable::Mode mode, + VariableMode mode, int slot_index); // Clear the cache. @@ -201,7 +138,7 @@ class ContextSlotCache { #ifdef DEBUG void ValidateEntry(Object* data, String* name, - Variable::Mode mode, + VariableMode mode, int slot_index); #endif @@ -212,7 +149,7 @@ class ContextSlotCache { }; struct Value { - Value(Variable::Mode mode, int index) { + Value(VariableMode mode, int index) { ASSERT(ModeField::is_valid(mode)); ASSERT(IndexField::is_valid(index)); value_ = ModeField::encode(mode) | IndexField::encode(index); @@ -224,14 +161,14 @@ class ContextSlotCache { uint32_t raw() { return value_; } - Variable::Mode mode() { return ModeField::decode(value_); } + VariableMode mode() { return ModeField::decode(value_); } int index() { return IndexField::decode(value_); } // Bit fields in value_ (type, shift, size). Must be public so the // constants can be embedded in generated code. - class ModeField: public BitField<Variable::Mode, 0, 3> {}; - class IndexField: public BitField<int, 3, 32-3> {}; + class ModeField: public BitField<VariableMode, 0, 3> {}; + class IndexField: public BitField<int, 3, 32-3> {}; private: uint32_t value_; }; diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc index d5a7a9f9c..e67b7f826 100644 --- a/deps/v8/src/scopes.cc +++ b/deps/v8/src/scopes.cc @@ -85,7 +85,7 @@ VariableMap::~VariableMap() {} Variable* VariableMap::Declare(Scope* scope, Handle<String> name, - Variable::Mode mode, + VariableMode mode, bool is_valid_lhs, Variable::Kind kind) { HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true); @@ -179,7 +179,7 @@ Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name) ++num_var_or_const_; Variable* variable = variables_.Declare(this, catch_variable_name, - Variable::VAR, + VAR, true, // Valid left-hand side. Variable::NORMAL); AllocateHeapSlot(variable); @@ -310,7 +310,7 @@ void Scope::Initialize(bool inside_with) { Variable* var = variables_.Declare(this, isolate_->factory()->this_symbol(), - Variable::VAR, + VAR, false, Variable::THIS); var->AllocateTo(Variable::PARAMETER, -1); @@ -323,7 +323,7 @@ void Scope::Initialize(bool inside_with) { // allocated during variable allocation. variables_.Declare(this, isolate_->factory()->arguments_symbol(), - Variable::VAR, + VAR, true, Variable::ARGUMENTS); } @@ -373,11 +373,11 @@ Variable* Scope::LocalLookup(Handle<String> name) { ASSERT(scope_info_->StackSlotIndex(*name) < 0); // Check context slot lookup. - Variable::Mode mode; + VariableMode mode; int index = scope_info_->ContextSlotIndex(*name, &mode); if (index < 0) { // Check parameters. - mode = Variable::VAR; + mode = VAR; index = scope_info_->ParameterIndex(*name); if (index < 0) { // Check the function name. @@ -407,13 +407,13 @@ Variable* Scope::Lookup(Handle<String> name) { Variable* Scope::DeclareFunctionVar(Handle<String> name) { ASSERT(is_function_scope() && function_ == NULL); Variable* function_var = - new Variable(this, name, Variable::CONST, true, Variable::NORMAL); + new Variable(this, name, CONST, true, Variable::NORMAL); function_ = new(isolate_->zone()) VariableProxy(isolate_, function_var); return function_var; } -void Scope::DeclareParameter(Handle<String> name, Variable::Mode mode) { +void Scope::DeclareParameter(Handle<String> name, VariableMode mode) { ASSERT(!already_resolved()); ASSERT(is_function_scope()); Variable* var = @@ -422,14 +422,12 @@ void Scope::DeclareParameter(Handle<String> name, Variable::Mode mode) { } -Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) { +Variable* Scope::DeclareLocal(Handle<String> name, VariableMode mode) { ASSERT(!already_resolved()); // This function handles VAR and CONST modes. DYNAMIC variables are // introduces during variable allocation, INTERNAL variables are allocated // explicitly, and TEMPORARY variables are allocated via NewTemporary(). - ASSERT(mode == Variable::VAR || - mode == Variable::CONST || - mode == Variable::LET); + ASSERT(mode == VAR || mode == CONST || mode == LET); ++num_var_or_const_; return variables_.Declare(this, name, mode, true, Variable::NORMAL); } @@ -437,7 +435,7 @@ Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) { Variable* Scope::DeclareGlobal(Handle<String> name) { ASSERT(is_global_scope()); - return variables_.Declare(this, name, Variable::DYNAMIC_GLOBAL, + return variables_.Declare(this, name, DYNAMIC_GLOBAL, true, Variable::NORMAL); } @@ -473,7 +471,7 @@ Variable* Scope::NewTemporary(Handle<String> name) { ASSERT(!already_resolved()); Variable* var = new Variable(this, name, - Variable::TEMPORARY, + TEMPORARY, true, Variable::NORMAL); temps_.Add(var); @@ -505,13 +503,13 @@ Declaration* Scope::CheckConflictingVarDeclarations() { int length = decls_.length(); for (int i = 0; i < length; i++) { Declaration* decl = decls_[i]; - if (decl->mode() != Variable::VAR) continue; + if (decl->mode() != VAR) continue; Handle<String> name = decl->proxy()->name(); bool cond = true; for (Scope* scope = decl->scope(); cond ; scope = scope->outer_scope_) { // There is a conflict if there exists a non-VAR binding. Variable* other_var = scope->variables_.Lookup(name); - if (other_var != NULL && other_var->mode() != Variable::VAR) { + if (other_var != NULL && other_var->mode() != VAR) { return decl; } @@ -779,9 +777,9 @@ void Scope::Print(int n) { Indent(n1, "// dynamic vars\n"); if (dynamics_ != NULL) { - PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC)); - PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_LOCAL)); - PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_GLOBAL)); + PrintMap(n1, dynamics_->GetMap(DYNAMIC)); + PrintMap(n1, dynamics_->GetMap(DYNAMIC_LOCAL)); + PrintMap(n1, dynamics_->GetMap(DYNAMIC_GLOBAL)); } // Print inner scopes (disable by providing negative n). @@ -797,7 +795,7 @@ void Scope::Print(int n) { #endif // DEBUG -Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) { +Variable* Scope::NonLocal(Handle<String> name, VariableMode mode) { if (dynamics_ == NULL) dynamics_ = new DynamicScopePart(); VariableMap* map = dynamics_->GetMap(mode); Variable* var = map->Lookup(name); @@ -903,7 +901,7 @@ void Scope::ResolveVariable(Scope* global_scope, // Note that we must do a lookup anyway, because if we find one, // we must mark that variable as potentially accessed from this // inner scope (the property may not be in the 'with' object). - var = NonLocal(proxy->name(), Variable::DYNAMIC); + var = NonLocal(proxy->name(), DYNAMIC); } else { // We are not inside a local 'with' statement. @@ -926,13 +924,13 @@ void Scope::ResolveVariable(Scope* global_scope, } else if (scope_inside_with_) { // If we are inside a with statement we give up and look up // the variable at runtime. - var = NonLocal(proxy->name(), Variable::DYNAMIC); + var = NonLocal(proxy->name(), DYNAMIC); } else if (invalidated_local != NULL) { // No with statements are involved and we found a local // variable that might be shadowed by eval introduced // variables. - var = NonLocal(proxy->name(), Variable::DYNAMIC_LOCAL); + var = NonLocal(proxy->name(), DYNAMIC_LOCAL); var->set_local_if_not_shadowed(invalidated_local); } else if (outer_scope_is_eval_scope_) { @@ -942,10 +940,10 @@ void Scope::ResolveVariable(Scope* global_scope, // variable is global if it is not shadowed by eval-introduced // variables. if (context->GlobalIfNotShadowedByEval(proxy->name())) { - var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL); + var = NonLocal(proxy->name(), DYNAMIC_GLOBAL); } else { - var = NonLocal(proxy->name(), Variable::DYNAMIC); + var = NonLocal(proxy->name(), DYNAMIC); } } else { @@ -953,7 +951,7 @@ void Scope::ResolveVariable(Scope* global_scope, // is not executed with a call to eval. We know that this // variable is global unless it is shadowed by eval-introduced // variables. - var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL); + var = NonLocal(proxy->name(), DYNAMIC_GLOBAL); } } } @@ -1040,7 +1038,7 @@ bool Scope::MustAllocateInContext(Variable* var) { // // Exceptions: temporary variables are never allocated in a context; // catch-bound variables are always allocated in a context. - if (var->mode() == Variable::TEMPORARY) return false; + if (var->mode() == TEMPORARY) return false; if (is_catch_scope() || is_block_scope()) return true; return var->is_accessed_from_inner_scope() || scope_calls_eval_ || diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h index 2917a63bb..7e789b8bd 100644 --- a/deps/v8/src/scopes.h +++ b/deps/v8/src/scopes.h @@ -50,7 +50,7 @@ class VariableMap: public HashMap { Variable* Declare(Scope* scope, Handle<String> name, - Variable::Mode mode, + VariableMode mode, bool is_valid_lhs, Variable::Kind kind); @@ -64,8 +64,8 @@ class VariableMap: public HashMap { // and setup time for scopes that don't need them. class DynamicScopePart : public ZoneObject { public: - VariableMap* GetMap(Variable::Mode mode) { - int index = mode - Variable::DYNAMIC; + VariableMap* GetMap(VariableMode mode) { + int index = mode - DYNAMIC; ASSERT(index >= 0 && index < 3); return &maps_[index]; } @@ -135,11 +135,11 @@ class Scope: public ZoneObject { // Declare a parameter in this scope. When there are duplicated // parameters the rightmost one 'wins'. However, the implementation // expects all parameters to be declared and from left to right. - void DeclareParameter(Handle<String> name, Variable::Mode mode); + void DeclareParameter(Handle<String> name, VariableMode mode); // Declare a local variable in this scope. If the variable has been // declared before, the previously declared variable is returned. - Variable* DeclareLocal(Handle<String> name, Variable::Mode mode); + Variable* DeclareLocal(Handle<String> name, VariableMode mode); // Declare an implicit global variable in this scope which must be a // global scope. The variable was introduced (possibly from an inner @@ -406,7 +406,7 @@ class Scope: public ZoneObject { // Create a non-local variable with a given name. // These variables are looked up dynamically at runtime. - Variable* NonLocal(Handle<String> name, Variable::Mode mode); + Variable* NonLocal(Handle<String> name, VariableMode mode); // Variable resolution. Variable* LookupRecursive(Handle<String> name, diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc index ecb480a8f..84ab94a97 100644 --- a/deps/v8/src/serialize.cc +++ b/deps/v8/src/serialize.cc @@ -300,12 +300,24 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) { RUNTIME_ENTRY, 4, "HandleScope::DeleteExtensions"); + Add(ExternalReference:: + incremental_marking_record_write_function(isolate).address(), + RUNTIME_ENTRY, + 5, + "IncrementalMarking::RecordWrite"); + Add(ExternalReference::store_buffer_overflow_function(isolate).address(), + RUNTIME_ENTRY, + 6, + "StoreBuffer::StoreBufferOverflow"); + Add(ExternalReference:: + incremental_evacuation_record_write_function(isolate).address(), + RUNTIME_ENTRY, + 7, + "IncrementalMarking::RecordWrite"); + + // Miscellaneous - Add(ExternalReference::the_hole_value_location(isolate).address(), - UNCLASSIFIED, - 2, - "Factory::the_hole_value().location()"); Add(ExternalReference::roots_address(isolate).address(), UNCLASSIFIED, 3, @@ -351,129 +363,133 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) { "Heap::always_allocate_scope_depth()"); Add(ExternalReference::new_space_allocation_limit_address(isolate).address(), UNCLASSIFIED, - 13, + 14, "Heap::NewSpaceAllocationLimitAddress()"); Add(ExternalReference::new_space_allocation_top_address(isolate).address(), UNCLASSIFIED, - 14, + 15, "Heap::NewSpaceAllocationTopAddress()"); #ifdef ENABLE_DEBUGGER_SUPPORT Add(ExternalReference::debug_break(isolate).address(), UNCLASSIFIED, - 15, + 16, "Debug::Break()"); Add(ExternalReference::debug_step_in_fp_address(isolate).address(), UNCLASSIFIED, - 16, + 17, "Debug::step_in_fp_addr()"); #endif Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(), UNCLASSIFIED, - 17, + 18, "add_two_doubles"); Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(), UNCLASSIFIED, - 18, + 19, "sub_two_doubles"); Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(), UNCLASSIFIED, - 19, + 20, "mul_two_doubles"); Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(), UNCLASSIFIED, - 20, + 21, "div_two_doubles"); Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(), UNCLASSIFIED, - 21, + 22, "mod_two_doubles"); Add(ExternalReference::compare_doubles(isolate).address(), UNCLASSIFIED, - 22, + 23, "compare_doubles"); #ifndef V8_INTERPRETED_REGEXP Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(), UNCLASSIFIED, - 23, + 24, "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()"); Add(ExternalReference::re_check_stack_guard_state(isolate).address(), UNCLASSIFIED, - 24, + 25, "RegExpMacroAssembler*::CheckStackGuardState()"); Add(ExternalReference::re_grow_stack(isolate).address(), UNCLASSIFIED, - 25, + 26, "NativeRegExpMacroAssembler::GrowStack()"); Add(ExternalReference::re_word_character_map().address(), UNCLASSIFIED, - 26, + 27, "NativeRegExpMacroAssembler::word_character_map"); #endif // V8_INTERPRETED_REGEXP // Keyed lookup cache. Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(), UNCLASSIFIED, - 27, + 28, "KeyedLookupCache::keys()"); Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(), UNCLASSIFIED, - 28, + 29, "KeyedLookupCache::field_offsets()"); Add(ExternalReference::transcendental_cache_array_address(isolate).address(), UNCLASSIFIED, - 29, + 30, "TranscendentalCache::caches()"); Add(ExternalReference::handle_scope_next_address().address(), UNCLASSIFIED, - 30, + 31, "HandleScope::next"); Add(ExternalReference::handle_scope_limit_address().address(), UNCLASSIFIED, - 31, + 32, "HandleScope::limit"); Add(ExternalReference::handle_scope_level_address().address(), UNCLASSIFIED, - 32, + 33, "HandleScope::level"); Add(ExternalReference::new_deoptimizer_function(isolate).address(), UNCLASSIFIED, - 33, + 34, "Deoptimizer::New()"); Add(ExternalReference::compute_output_frames_function(isolate).address(), UNCLASSIFIED, - 34, + 35, "Deoptimizer::ComputeOutputFrames()"); Add(ExternalReference::address_of_min_int().address(), UNCLASSIFIED, - 35, + 36, "LDoubleConstant::min_int"); Add(ExternalReference::address_of_one_half().address(), UNCLASSIFIED, - 36, + 37, "LDoubleConstant::one_half"); Add(ExternalReference::isolate_address().address(), UNCLASSIFIED, - 37, + 38, "isolate"); Add(ExternalReference::address_of_minus_zero().address(), UNCLASSIFIED, - 38, + 39, "LDoubleConstant::minus_zero"); Add(ExternalReference::address_of_negative_infinity().address(), UNCLASSIFIED, - 39, + 40, "LDoubleConstant::negative_infinity"); Add(ExternalReference::power_double_double_function(isolate).address(), UNCLASSIFIED, - 40, + 41, "power_double_double_function"); Add(ExternalReference::power_double_int_function(isolate).address(), UNCLASSIFIED, - 41, + 42, "power_double_int_function"); - Add(ExternalReference::arguments_marker_location(isolate).address(), + Add(ExternalReference::store_buffer_top(isolate).address(), UNCLASSIFIED, - 42, - "Factory::arguments_marker().location()"); + 43, + "store_buffer_top"); + Add(ExternalReference::address_of_canonical_non_hole_nan().address(), + UNCLASSIFIED, + 44, + "canonical_nan"); } @@ -569,6 +585,7 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) { maybe_new_allocation = reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size); } + ASSERT(!maybe_new_allocation->IsFailure()); Object* new_allocation = maybe_new_allocation->ToObjectUnchecked(); HeapObject* new_object = HeapObject::cast(new_allocation); address = new_object->address(); @@ -577,14 +594,13 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) { ASSERT(SpaceIsLarge(space_index)); LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space); Object* new_allocation; - if (space_index == kLargeData) { - new_allocation = lo_space->AllocateRaw(size)->ToObjectUnchecked(); - } else if (space_index == kLargeFixedArray) { + if (space_index == kLargeData || space_index == kLargeFixedArray) { new_allocation = - lo_space->AllocateRawFixedArray(size)->ToObjectUnchecked(); + lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked(); } else { ASSERT_EQ(kLargeCode, space_index); - new_allocation = lo_space->AllocateRawCode(size)->ToObjectUnchecked(); + new_allocation = + lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked(); } HeapObject* new_object = HeapObject::cast(new_allocation); // Record all large objects in the same space. @@ -629,6 +645,7 @@ HeapObject* Deserializer::GetAddressFromStart(int space) { void Deserializer::Deserialize() { isolate_ = Isolate::Current(); + ASSERT(isolate_ != NULL); // Don't GC while deserializing - just expand the heap. AlwaysAllocateScope always_allocate; // Don't use the free lists while deserializing. @@ -685,9 +702,8 @@ void Deserializer::VisitPointers(Object** start, Object** end) { // This routine writes the new object into the pointer provided and then // returns true if the new object was in young space and false otherwise. // The reason for this strange interface is that otherwise the object is -// written very late, which means the ByteArray map is not set up by the -// time we need to use it to mark the space at the end of a page free (by -// making it into a byte array). +// written very late, which means the FreeSpace map is not set up by the +// time we need to use it to mark the space at the end of a page free. void Deserializer::ReadObject(int space_number, Space* space, Object** write_back) { @@ -758,8 +774,9 @@ void Deserializer::ReadChunk(Object** current, if (where == kNewObject && how == kPlain && within == kStartOfObject) {\ ASSIGN_DEST_SPACE(space_number) \ ReadObject(space_number, dest_space, current); \ - emit_write_barrier = \ - (space_number == NEW_SPACE && source_space != NEW_SPACE); \ + emit_write_barrier = (space_number == NEW_SPACE && \ + source_space != NEW_SPACE && \ + source_space != CELL_SPACE); \ } else { \ Object* new_object = NULL; /* May not be a real Object pointer. */ \ if (where == kNewObject) { \ @@ -778,14 +795,16 @@ void Deserializer::ReadChunk(Object** current, Decode(reference_id); \ new_object = reinterpret_cast<Object*>(address); \ } else if (where == kBackref) { \ - emit_write_barrier = \ - (space_number == NEW_SPACE && source_space != NEW_SPACE); \ + emit_write_barrier = (space_number == NEW_SPACE && \ + source_space != NEW_SPACE && \ + source_space != CELL_SPACE); \ new_object = GetAddressFromEnd(data & kSpaceMask); \ } else { \ ASSERT(where == kFromStart); \ if (offset_from_start == kUnknownOffsetFromStart) { \ - emit_write_barrier = \ - (space_number == NEW_SPACE && source_space != NEW_SPACE); \ + emit_write_barrier = (space_number == NEW_SPACE && \ + source_space != NEW_SPACE && \ + source_space != CELL_SPACE); \ new_object = GetAddressFromStart(data & kSpaceMask); \ } else { \ Address object_address = pages_[space_number][0] + \ @@ -973,6 +992,11 @@ void Deserializer::ReadChunk(Object** current, break; } + case kSkip: { + current++; + break; + } + case kNativesStringResource: { int index = source_->Get(); Vector<const char> source_vector = Natives::GetRawScriptSource(index); @@ -1097,8 +1121,13 @@ void PartialSerializer::Serialize(Object** object) { void Serializer::VisitPointers(Object** start, Object** end) { + Isolate* isolate = Isolate::Current(); + for (Object** current = start; current < end; current++) { - if ((*current)->IsSmi()) { + if (reinterpret_cast<Address>(current) == + isolate->heap()->store_buffer()->TopAddress()) { + sink_->Put(kSkip, "Skip"); + } else if ((*current)->IsSmi()) { sink_->Put(kRawData, "RawData"); sink_->PutInt(kPointerSize, "length"); for (int i = 0; i < kPointerSize; i++) { @@ -1420,7 +1449,7 @@ void Serializer::ObjectSerializer::VisitExternalAsciiString( if (!source->IsUndefined()) { ExternalAsciiString* string = ExternalAsciiString::cast(source); typedef v8::String::ExternalAsciiStringResource Resource; - Resource* resource = string->resource(); + const Resource* resource = string->resource(); if (resource == *resource_pointer) { sink_->Put(kNativesStringResource, "NativesStringResource"); sink_->PutSection(i, "NativesStringResourceEnd"); diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h index 66d6fb511..c07092332 100644 --- a/deps/v8/src/serialize.h +++ b/deps/v8/src/serialize.h @@ -238,7 +238,8 @@ class SerializerDeserializer: public ObjectVisitor { kRootArray = 0x9, // Object is found in root array. kPartialSnapshotCache = 0xa, // Object is in the cache. kExternalReference = 0xb, // Pointer to an external reference. - // 0xc-0xf Free. + kSkip = 0xc, // Skip a pointer sized cell. + // 0xd-0xf Free. kBackref = 0x10, // Object is described relative to end. // 0x11-0x18 One per space. // 0x19-0x1f Common backref offsets. diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h index 35d722409..d9e6053ad 100644 --- a/deps/v8/src/spaces-inl.h +++ b/deps/v8/src/spaces-inl.h @@ -1,4 +1,4 @@ -// Copyright 2006-2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -37,355 +37,213 @@ namespace internal { // ----------------------------------------------------------------------------- -// PageIterator +// Bitmap -bool PageIterator::has_next() { - return prev_page_ != stop_page_; -} - - -Page* PageIterator::next() { - ASSERT(has_next()); - prev_page_ = (prev_page_ == NULL) - ? space_->first_page_ - : prev_page_->next_page(); - return prev_page_; +void Bitmap::Clear(MemoryChunk* chunk) { + Bitmap* bitmap = chunk->markbits(); + for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0; + chunk->ResetLiveBytes(); } // ----------------------------------------------------------------------------- -// Page - -Page* Page::next_page() { - return heap_->isolate()->memory_allocator()->GetNextPage(this); -} - - -Address Page::AllocationTop() { - PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this); - return owner->PageAllocationTop(this); -} - - -Address Page::AllocationWatermark() { - PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this); - if (this == owner->AllocationTopPage()) { - return owner->top(); - } - return address() + AllocationWatermarkOffset(); -} - +// PageIterator -uint32_t Page::AllocationWatermarkOffset() { - return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >> - kAllocationWatermarkOffsetShift); -} +PageIterator::PageIterator(PagedSpace* space) + : space_(space), + prev_page_(&space->anchor_), + next_page_(prev_page_->next_page()) { } -void Page::SetAllocationWatermark(Address allocation_watermark) { - if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) { - // When iterating intergenerational references during scavenge - // we might decide to promote an encountered young object. - // We will allocate a space for such an object and put it - // into the promotion queue to process it later. - // If space for object was allocated somewhere beyond allocation - // watermark this might cause garbage pointers to appear under allocation - // watermark. To avoid visiting them during dirty regions iteration - // which might be still in progress we store a valid allocation watermark - // value and mark this page as having an invalid watermark. - SetCachedAllocationWatermark(AllocationWatermark()); - InvalidateWatermark(true); - } - flags_ = (flags_ & kFlagsMask) | - Offset(allocation_watermark) << kAllocationWatermarkOffsetShift; - ASSERT(AllocationWatermarkOffset() - == static_cast<uint32_t>(Offset(allocation_watermark))); +bool PageIterator::has_next() { + return next_page_ != &space_->anchor_; } -void Page::SetCachedAllocationWatermark(Address allocation_watermark) { - mc_first_forwarded = allocation_watermark; +Page* PageIterator::next() { + ASSERT(has_next()); + prev_page_ = next_page_; + next_page_ = next_page_->next_page(); + return prev_page_; } -Address Page::CachedAllocationWatermark() { - return mc_first_forwarded; -} +// ----------------------------------------------------------------------------- +// NewSpacePageIterator -uint32_t Page::GetRegionMarks() { - return dirty_regions_; -} +NewSpacePageIterator::NewSpacePageIterator(NewSpace* space) + : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()), + next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())), + last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { } +NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space) + : prev_page_(space->anchor()), + next_page_(prev_page_->next_page()), + last_page_(prev_page_->prev_page()) { } -void Page::SetRegionMarks(uint32_t marks) { - dirty_regions_ = marks; +NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit) + : prev_page_(NewSpacePage::FromAddress(start)->prev_page()), + next_page_(NewSpacePage::FromAddress(start)), + last_page_(NewSpacePage::FromLimit(limit)) { + SemiSpace::AssertValidRange(start, limit); } -int Page::GetRegionNumberForAddress(Address addr) { - // Each page is divided into 256 byte regions. Each region has a corresponding - // dirty mark bit in the page header. Region can contain intergenerational - // references iff its dirty mark is set. - // A normal 8K page contains exactly 32 regions so all region marks fit - // into 32-bit integer field. To calculate a region number we just divide - // offset inside page by region size. - // A large page can contain more then 32 regions. But we want to avoid - // additional write barrier code for distinguishing between large and normal - // pages so we just ignore the fact that addr points into a large page and - // calculate region number as if addr pointed into a normal 8K page. This way - // we get a region number modulo 32 so for large pages several regions might - // be mapped to a single dirty mark. - ASSERT_PAGE_ALIGNED(this->address()); - STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt); - - // We are using masking with kPageAlignmentMask instead of Page::Offset() - // to get an offset to the beginning of 8K page containing addr not to the - // beginning of actual page which can be bigger then 8K. - intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask; - return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2); +bool NewSpacePageIterator::has_next() { + return prev_page_ != last_page_; } -uint32_t Page::GetRegionMaskForAddress(Address addr) { - return 1 << GetRegionNumberForAddress(addr); +NewSpacePage* NewSpacePageIterator::next() { + ASSERT(has_next()); + prev_page_ = next_page_; + next_page_ = next_page_->next_page(); + return prev_page_; } -uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) { - uint32_t result = 0; - static const intptr_t kRegionMask = (1 << kRegionSizeLog2) - 1; - if (length_in_bytes + (OffsetFrom(start) & kRegionMask) >= kPageSize) { - result = kAllRegionsDirtyMarks; - } else if (length_in_bytes > 0) { - int start_region = GetRegionNumberForAddress(start); - int end_region = - GetRegionNumberForAddress(start + length_in_bytes - kPointerSize); - uint32_t start_mask = (~0) << start_region; - uint32_t end_mask = ~((~1) << end_region); - result = start_mask & end_mask; - // if end_region < start_region, the mask is ored. - if (result == 0) result = start_mask | end_mask; - } -#ifdef DEBUG - if (FLAG_enable_slow_asserts) { - uint32_t expected = 0; - for (Address a = start; a < start + length_in_bytes; a += kPointerSize) { - expected |= GetRegionMaskForAddress(a); +// ----------------------------------------------------------------------------- +// HeapObjectIterator +HeapObject* HeapObjectIterator::FromCurrentPage() { + while (cur_addr_ != cur_end_) { + if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) { + cur_addr_ = space_->limit(); + continue; + } + HeapObject* obj = HeapObject::FromAddress(cur_addr_); + int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); + cur_addr_ += obj_size; + ASSERT(cur_addr_ <= cur_end_); + if (!obj->IsFiller()) { + ASSERT_OBJECT_SIZE(obj_size); + return obj; } - ASSERT(expected == result); } -#endif - return result; + return NULL; } -void Page::MarkRegionDirty(Address address) { - SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address)); -} +// ----------------------------------------------------------------------------- +// MemoryAllocator +#ifdef ENABLE_HEAP_PROTECTION -bool Page::IsRegionDirty(Address address) { - return GetRegionMarks() & GetRegionMaskForAddress(address); +void MemoryAllocator::Protect(Address start, size_t size) { + OS::Protect(start, size); } -void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) { - int rstart = GetRegionNumberForAddress(start); - int rend = GetRegionNumberForAddress(end); - - if (reaches_limit) { - end += 1; - } - - if ((rend - rstart) == 0) { - return; - } - - uint32_t bitmask = 0; - - if ((OffsetFrom(start) & kRegionAlignmentMask) == 0 - || (start == ObjectAreaStart())) { - // First region is fully covered - bitmask = 1 << rstart; - } +void MemoryAllocator::Unprotect(Address start, + size_t size, + Executability executable) { + OS::Unprotect(start, size, executable); +} - while (++rstart < rend) { - bitmask |= 1 << rstart; - } - if (bitmask) { - SetRegionMarks(GetRegionMarks() & ~bitmask); - } +void MemoryAllocator::ProtectChunkFromPage(Page* page) { + int id = GetChunkId(page); + OS::Protect(chunks_[id].address(), chunks_[id].size()); } -void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) { - heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED; +void MemoryAllocator::UnprotectChunkFromPage(Page* page) { + int id = GetChunkId(page); + OS::Unprotect(chunks_[id].address(), chunks_[id].size(), + chunks_[id].owner()->executable() == EXECUTABLE); } +#endif -bool Page::IsWatermarkValid() { - return (flags_ & (1 << WATERMARK_INVALIDATED)) != - heap_->page_watermark_invalidated_mark_; -} +// -------------------------------------------------------------------------- +// PagedSpace +Page* Page::Initialize(Heap* heap, + MemoryChunk* chunk, + Executability executable, + PagedSpace* owner) { + Page* page = reinterpret_cast<Page*>(chunk); + ASSERT(chunk->size() == static_cast<size_t>(kPageSize)); + ASSERT(chunk->owner() == owner); + owner->IncreaseCapacity(Page::kObjectAreaSize); + owner->Free(page->ObjectAreaStart(), + static_cast<int>(page->ObjectAreaEnd() - + page->ObjectAreaStart())); -void Page::InvalidateWatermark(bool value) { - if (value) { - flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) | - heap_->page_watermark_invalidated_mark_; - } else { - flags_ = - (flags_ & ~(1 << WATERMARK_INVALIDATED)) | - (heap_->page_watermark_invalidated_mark_ ^ - (1 << WATERMARK_INVALIDATED)); - } + heap->incremental_marking()->SetOldSpacePageFlags(chunk); - ASSERT(IsWatermarkValid() == !value); + return page; } -bool Page::GetPageFlag(PageFlag flag) { - return (flags_ & static_cast<intptr_t>(1 << flag)) != 0; +bool PagedSpace::Contains(Address addr) { + Page* p = Page::FromAddress(addr); + if (!p->is_valid()) return false; + return p->owner() == this; } -void Page::SetPageFlag(PageFlag flag, bool value) { - if (value) { - flags_ |= static_cast<intptr_t>(1 << flag); +void MemoryChunk::set_scan_on_scavenge(bool scan) { + if (scan) { + if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages(); + SetFlag(SCAN_ON_SCAVENGE); } else { - flags_ &= ~static_cast<intptr_t>(1 << flag); + if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages(); + ClearFlag(SCAN_ON_SCAVENGE); } -} - - -void Page::ClearPageFlags() { - flags_ = 0; -} - - -void Page::ClearGCFields() { - InvalidateWatermark(true); - SetAllocationWatermark(ObjectAreaStart()); - if (heap_->gc_state() == Heap::SCAVENGE) { - SetCachedAllocationWatermark(ObjectAreaStart()); + heap_->incremental_marking()->SetOldSpacePageFlags(this); +} + + +MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) { + MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>( + OffsetFrom(addr) & ~Page::kPageAlignmentMask); + if (maybe->owner() != NULL) return maybe; + LargeObjectIterator iterator(HEAP->lo_space()); + for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) { + // Fixed arrays are the only pointer-containing objects in large object + // space. + if (o->IsFixedArray()) { + MemoryChunk* chunk = MemoryChunk::FromAddress(o->address()); + if (chunk->Contains(addr)) { + return chunk; + } + } } - SetRegionMarks(kAllRegionsCleanMarks); -} - - -bool Page::WasInUseBeforeMC() { - return GetPageFlag(WAS_IN_USE_BEFORE_MC); -} - - -void Page::SetWasInUseBeforeMC(bool was_in_use) { - SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use); -} - - -bool Page::IsLargeObjectPage() { - return !GetPageFlag(IS_NORMAL_PAGE); -} - - -void Page::SetIsLargeObjectPage(bool is_large_object_page) { - SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page); -} - -Executability Page::PageExecutability() { - return GetPageFlag(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; -} - - -void Page::SetPageExecutability(Executability executable) { - SetPageFlag(IS_EXECUTABLE, executable == EXECUTABLE); -} - - -// ----------------------------------------------------------------------------- -// MemoryAllocator - -void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) { - address_ = a; - size_ = s; - owner_ = o; - executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable(); - owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity(); -} - - -bool MemoryAllocator::IsValidChunk(int chunk_id) { - if (!IsValidChunkId(chunk_id)) return false; - - ChunkInfo& c = chunks_[chunk_id]; - return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL); -} - - -bool MemoryAllocator::IsValidChunkId(int chunk_id) { - return (0 <= chunk_id) && (chunk_id < max_nof_chunks_); -} - - -bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) { - ASSERT(p->is_valid()); - - int chunk_id = GetChunkId(p); - if (!IsValidChunkId(chunk_id)) return false; - - ChunkInfo& c = chunks_[chunk_id]; - return (c.address() <= p->address()) && - (p->address() < c.address() + c.size()) && - (space == c.owner()); + UNREACHABLE(); + return NULL; } -Page* MemoryAllocator::GetNextPage(Page* p) { - ASSERT(p->is_valid()); - intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask; - return Page::FromAddress(AddressFrom<Address>(raw_addr)); -} +PointerChunkIterator::PointerChunkIterator(Heap* heap) + : state_(kOldPointerState), + old_pointer_iterator_(heap->old_pointer_space()), + map_iterator_(heap->map_space()), + lo_iterator_(heap->lo_space()) { } -int MemoryAllocator::GetChunkId(Page* p) { - ASSERT(p->is_valid()); - return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask); +Page* Page::next_page() { + ASSERT(next_chunk()->owner() == owner()); + return static_cast<Page*>(next_chunk()); } -void MemoryAllocator::SetNextPage(Page* prev, Page* next) { - ASSERT(prev->is_valid()); - int chunk_id = GetChunkId(prev); - ASSERT_PAGE_ALIGNED(next->address()); - prev->opaque_header = OffsetFrom(next->address()) | chunk_id; +Page* Page::prev_page() { + ASSERT(prev_chunk()->owner() == owner()); + return static_cast<Page*>(prev_chunk()); } -PagedSpace* MemoryAllocator::PageOwner(Page* page) { - int chunk_id = GetChunkId(page); - ASSERT(IsValidChunk(chunk_id)); - return chunks_[chunk_id].owner(); +void Page::set_next_page(Page* page) { + ASSERT(page->owner() == owner()); + set_next_chunk(page); } -bool MemoryAllocator::InInitialChunk(Address address) { - if (initial_chunk_ == NULL) return false; - - Address start = static_cast<Address>(initial_chunk_->address()); - return (start <= address) && (address < start + initial_chunk_->size()); -} - - -// -------------------------------------------------------------------------- -// PagedSpace - -bool PagedSpace::Contains(Address addr) { - Page* p = Page::FromAddress(addr); - if (!p->is_valid()) return false; - return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this); +void Page::set_prev_page(Page* page) { + ASSERT(page->owner() == owner()); + set_prev_chunk(page); } @@ -393,15 +251,14 @@ bool PagedSpace::Contains(Address addr) { // not contain slow case logic (eg, move to the next page or try free list // allocation) so it can be used by all the allocation functions and for all // the paged spaces. -HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info, - int size_in_bytes) { - Address current_top = alloc_info->top; +HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { + Address current_top = allocation_info_.top; Address new_top = current_top + size_in_bytes; - if (new_top > alloc_info->limit) return NULL; + if (new_top > allocation_info_.limit) return NULL; - alloc_info->top = new_top; - ASSERT(alloc_info->VerifyPagedAllocation()); - accounting_stats_.AllocateBytes(size_in_bytes); + allocation_info_.top = new_top; + ASSERT(allocation_info_.VerifyPagedAllocation()); + ASSERT(current_top != NULL); return HeapObject::FromAddress(current_top); } @@ -410,54 +267,78 @@ HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info, MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { ASSERT(HasBeenSetup()); ASSERT_OBJECT_SIZE(size_in_bytes); - HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes); - if (object != NULL) return object; + HeapObject* object = AllocateLinearly(size_in_bytes); + if (object != NULL) { + if (identity() == CODE_SPACE) { + SkipList::Update(object->address(), size_in_bytes); + } + return object; + } + + object = free_list_.Allocate(size_in_bytes); + if (object != NULL) { + if (identity() == CODE_SPACE) { + SkipList::Update(object->address(), size_in_bytes); + } + return object; + } object = SlowAllocateRaw(size_in_bytes); - if (object != NULL) return object; + if (object != NULL) { + if (identity() == CODE_SPACE) { + SkipList::Update(object->address(), size_in_bytes); + } + return object; + } return Failure::RetryAfterGC(identity()); } -// Reallocating (and promoting) objects during a compacting collection. -MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) { - ASSERT(HasBeenSetup()); - ASSERT_OBJECT_SIZE(size_in_bytes); - HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes); - if (object != NULL) return object; +// ----------------------------------------------------------------------------- +// NewSpace +MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes) { + Address old_top = allocation_info_.top; + if (allocation_info_.limit - old_top < size_in_bytes) { + Address new_top = old_top + size_in_bytes; + Address high = to_space_.page_high(); + if (allocation_info_.limit < high) { + // Incremental marking has lowered the limit to get a + // chance to do a step. + allocation_info_.limit = Min( + allocation_info_.limit + inline_allocation_limit_step_, + high); + int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); + heap()->incremental_marking()->Step(bytes_allocated); + top_on_previous_step_ = new_top; + return AllocateRawInternal(size_in_bytes); + } else if (AddFreshPage()) { + // Switched to new page. Try allocating again. + int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); + heap()->incremental_marking()->Step(bytes_allocated); + top_on_previous_step_ = to_space_.page_low(); + return AllocateRawInternal(size_in_bytes); + } else { + return Failure::RetryAfterGC(); + } + } - object = SlowMCAllocateRaw(size_in_bytes); - if (object != NULL) return object; + Object* obj = HeapObject::FromAddress(allocation_info_.top); + allocation_info_.top += size_in_bytes; + ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); - return Failure::RetryAfterGC(identity()); + return obj; } -// ----------------------------------------------------------------------------- -// NewSpace - -MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes, - AllocationInfo* alloc_info) { - Address new_top = alloc_info->top + size_in_bytes; - if (new_top > alloc_info->limit) return Failure::RetryAfterGC(); - - Object* obj = HeapObject::FromAddress(alloc_info->top); - alloc_info->top = new_top; -#ifdef DEBUG - SemiSpace* space = - (alloc_info == &allocation_info_) ? &to_space_ : &from_space_; - ASSERT(space->low() <= alloc_info->top - && alloc_info->top <= space->high() - && alloc_info->limit == space->high()); -#endif - return obj; +LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) { + heap->incremental_marking()->SetOldSpacePageFlags(chunk); + return static_cast<LargePage*>(chunk); } intptr_t LargeObjectSpace::Available() { - return LargeObjectChunk::ObjectSizeFor( - heap()->isolate()->memory_allocator()->Available()); + return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available()); } @@ -467,16 +348,23 @@ void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) { ASSERT(string->IsSeqString()); ASSERT(string->address() + StringType::SizeFor(string->length()) == allocation_info_.top); + Address old_top = allocation_info_.top; allocation_info_.top = string->address() + StringType::SizeFor(length); string->set_length(length); + if (Marking::IsBlack(Marking::MarkBitFrom(string))) { + int delta = static_cast<int>(old_top - allocation_info_.top); + MemoryChunk::IncrementLiveBytes(string->address(), -delta); + } } bool FreeListNode::IsFreeListNode(HeapObject* object) { - return object->map() == HEAP->raw_unchecked_byte_array_map() - || object->map() == HEAP->raw_unchecked_one_pointer_filler_map() - || object->map() == HEAP->raw_unchecked_two_pointer_filler_map(); + Map* map = object->map(); + Heap* heap = object->GetHeap(); + return map == heap->raw_unchecked_free_space_map() + || map == heap->raw_unchecked_one_pointer_filler_map() + || map == heap->raw_unchecked_two_pointer_filler_map(); } } } // namespace v8::internal diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc index 97c6d2ac1..61b318118 100644 --- a/deps/v8/src/spaces.cc +++ b/deps/v8/src/spaces.cc @@ -35,52 +35,66 @@ namespace v8 { namespace internal { -// For contiguous spaces, top should be in the space (or at the end) and limit -// should be the end of the space. -#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ - ASSERT((space).low() <= (info).top \ - && (info).top <= (space).high() \ - && (info).limit == (space).high()) // ---------------------------------------------------------------------------- // HeapObjectIterator HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { - Initialize(space->bottom(), space->top(), NULL); + // You can't actually iterate over the anchor page. It is not a real page, + // just an anchor for the double linked page list. Initialize as if we have + // reached the end of the anchor page, then the first iteration will move on + // to the first page. + Initialize(space, + NULL, + NULL, + kAllPagesInSpace, + NULL); } HeapObjectIterator::HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func) { - Initialize(space->bottom(), space->top(), size_func); -} - - -HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) { - Initialize(start, space->top(), NULL); -} - - -HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start, - HeapObjectCallback size_func) { - Initialize(start, space->top(), size_func); + // You can't actually iterate over the anchor page. It is not a real page, + // just an anchor for the double linked page list. Initialize the current + // address and end as NULL, then the first iteration will move on + // to the first page. + Initialize(space, + NULL, + NULL, + kAllPagesInSpace, + size_func); } HeapObjectIterator::HeapObjectIterator(Page* page, HeapObjectCallback size_func) { - Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func); -} - - -void HeapObjectIterator::Initialize(Address cur, Address end, + Space* owner = page->owner(); + ASSERT(owner == HEAP->old_pointer_space() || + owner == HEAP->old_data_space() || + owner == HEAP->map_space() || + owner == HEAP->cell_space() || + owner == HEAP->code_space()); + Initialize(reinterpret_cast<PagedSpace*>(owner), + page->ObjectAreaStart(), + page->ObjectAreaEnd(), + kOnePageOnly, + size_func); + ASSERT(page->WasSweptPrecisely()); +} + + +void HeapObjectIterator::Initialize(PagedSpace* space, + Address cur, Address end, + HeapObjectIterator::PageMode mode, HeapObjectCallback size_f) { + // Check that we actually can iterate this space. + ASSERT(!space->was_swept_conservatively()); + + space_ = space; cur_addr_ = cur; - end_addr_ = end; - end_page_ = Page::FromAllocationTop(end); + cur_end_ = end; + page_mode_ = mode; size_func_ = size_f; - Page* p = Page::FromAllocationTop(cur_addr_); - cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop(); #ifdef DEBUG Verify(); @@ -88,63 +102,35 @@ void HeapObjectIterator::Initialize(Address cur, Address end, } -HeapObject* HeapObjectIterator::FromNextPage() { - if (cur_addr_ == end_addr_) return NULL; - - Page* cur_page = Page::FromAllocationTop(cur_addr_); +// We have hit the end of the page and should advance to the next block of +// objects. This happens at the end of the page. +bool HeapObjectIterator::AdvanceToNextPage() { + ASSERT(cur_addr_ == cur_end_); + if (page_mode_ == kOnePageOnly) return false; + Page* cur_page; + if (cur_addr_ == NULL) { + cur_page = space_->anchor(); + } else { + cur_page = Page::FromAddress(cur_addr_ - 1); + ASSERT(cur_addr_ == cur_page->ObjectAreaEnd()); + } cur_page = cur_page->next_page(); - ASSERT(cur_page->is_valid()); - + if (cur_page == space_->anchor()) return false; cur_addr_ = cur_page->ObjectAreaStart(); - cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop(); - - if (cur_addr_ == end_addr_) return NULL; - ASSERT(cur_addr_ < cur_limit_); -#ifdef DEBUG - Verify(); -#endif - return FromCurrentPage(); + cur_end_ = cur_page->ObjectAreaEnd(); + ASSERT(cur_page->WasSweptPrecisely()); + return true; } #ifdef DEBUG void HeapObjectIterator::Verify() { - Page* p = Page::FromAllocationTop(cur_addr_); - ASSERT(p == Page::FromAllocationTop(cur_limit_)); - ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_)); + // TODO(gc): We should do something here. } #endif // ----------------------------------------------------------------------------- -// PageIterator - -PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) { - prev_page_ = NULL; - switch (mode) { - case PAGES_IN_USE: - stop_page_ = space->AllocationTopPage(); - break; - case PAGES_USED_BY_MC: - stop_page_ = space->MCRelocationTopPage(); - break; - case ALL_PAGES: -#ifdef DEBUG - // Verify that the cached last page in the space is actually the - // last page. - for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) { - if (!p->next_page()->is_valid()) { - ASSERT(space->last_page_ == p); - } - } -#endif - stop_page_ = space->last_page_; - break; - } -} - - -// ----------------------------------------------------------------------------- // CodeRange @@ -171,7 +157,12 @@ bool CodeRange::Setup(const size_t requested) { // We are sure that we have mapped a block of requested addresses. ASSERT(code_range_->size() == requested); LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); - allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size())); + Address base = reinterpret_cast<Address>(code_range_->address()); + Address aligned_base = + RoundUp(reinterpret_cast<Address>(code_range_->address()), + MemoryChunk::kAlignment); + size_t size = code_range_->size() - (aligned_base - base); + allocation_list_.Add(FreeBlock(aligned_base, size)); current_allocation_block_index_ = 0; return true; } @@ -228,7 +219,8 @@ void CodeRange::GetNextAllocationBlock(size_t requested) { -void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) { +Address CodeRange::AllocateRawMemory(const size_t requested, + size_t* allocated) { ASSERT(current_allocation_block_index_ < allocation_list_.length()); if (requested > allocation_list_[current_allocation_block_index_].size) { // Find an allocation block large enough. This function call may @@ -236,13 +228,16 @@ void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) { GetNextAllocationBlock(requested); } // Commit the requested memory at the start of the current allocation block. - *allocated = RoundUp(requested, Page::kPageSize); + size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); FreeBlock current = allocation_list_[current_allocation_block_index_]; - if (*allocated >= current.size - Page::kPageSize) { + if (aligned_requested >= (current.size - Page::kPageSize)) { // Don't leave a small free block, useless for a large object or chunk. *allocated = current.size; + } else { + *allocated = aligned_requested; } ASSERT(*allocated <= current.size); + ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); if (!code_range_->Commit(current.start, *allocated, true)) { *allocated = 0; return NULL; @@ -256,7 +251,8 @@ void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) { } -void CodeRange::FreeRawMemory(void* address, size_t length) { +void CodeRange::FreeRawMemory(Address address, size_t length) { + ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); free_list_.Add(FreeBlock(address, length)); code_range_->Uncommit(address, length); } @@ -274,35 +270,12 @@ void CodeRange::TearDown() { // MemoryAllocator // -// 270 is an estimate based on the static default heap size of a pair of 256K -// semispaces and a 64M old generation. -const int kEstimatedNumberOfChunks = 270; - - MemoryAllocator::MemoryAllocator(Isolate* isolate) : isolate_(isolate), capacity_(0), capacity_executable_(0), size_(0), - size_executable_(0), - initial_chunk_(NULL), - chunks_(kEstimatedNumberOfChunks), - free_chunk_ids_(kEstimatedNumberOfChunks), - max_nof_chunks_(0), - top_(0) { -} - - -void MemoryAllocator::Push(int free_chunk_id) { - ASSERT(max_nof_chunks_ > 0); - ASSERT(top_ < max_nof_chunks_); - free_chunk_ids_[top_++] = free_chunk_id; -} - - -int MemoryAllocator::Pop() { - ASSERT(top_ > 0); - return free_chunk_ids_[--top_]; + size_executable_(0) { } @@ -311,269 +284,303 @@ bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) { capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); ASSERT_GE(capacity_, capacity_executable_); - // Over-estimate the size of chunks_ array. It assumes the expansion of old - // space is always in the unit of a chunk (kChunkSize) except the last - // expansion. - // - // Due to alignment, allocated space might be one page less than required - // number (kPagesPerChunk) of pages for old spaces. - // - // Reserve two chunk ids for semispaces, one for map space, one for old - // space, and one for code space. - max_nof_chunks_ = - static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5; - if (max_nof_chunks_ > kMaxNofChunks) return false; - size_ = 0; size_executable_ = 0; - ChunkInfo info; // uninitialized element. - for (int i = max_nof_chunks_ - 1; i >= 0; i--) { - chunks_.Add(info); - free_chunk_ids_.Add(i); - } - top_ = max_nof_chunks_; + return true; } void MemoryAllocator::TearDown() { - for (int i = 0; i < max_nof_chunks_; i++) { - if (chunks_[i].address() != NULL) DeleteChunk(i); - } - chunks_.Clear(); - free_chunk_ids_.Clear(); - - if (initial_chunk_ != NULL) { - LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address())); - delete initial_chunk_; - initial_chunk_ = NULL; - } - - ASSERT(top_ == max_nof_chunks_); // all chunks are free - top_ = 0; + // Check that spaces were torn down before MemoryAllocator. + ASSERT(size_ == 0); + // TODO(gc) this will be true again when we fix FreeMemory. + // ASSERT(size_executable_ == 0); capacity_ = 0; capacity_executable_ = 0; - size_ = 0; - max_nof_chunks_ = 0; } -void* MemoryAllocator::AllocateRawMemory(const size_t requested, - size_t* allocated, - Executability executable) { - if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) { - return NULL; - } +void MemoryAllocator::FreeMemory(VirtualMemory* reservation, + Executability executable) { + // TODO(gc) make code_range part of memory allocator? + ASSERT(reservation->IsReserved()); + size_t size = reservation->size(); + ASSERT(size_ >= size); + size_ -= size; + + isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); - void* mem; if (executable == EXECUTABLE) { - // Check executable memory limit. - if (size_executable_ + requested > - static_cast<size_t>(capacity_executable_)) { - LOG(isolate_, - StringEvent("MemoryAllocator::AllocateRawMemory", - "V8 Executable Allocation capacity exceeded")); - return NULL; - } - // Allocate executable memory either from code range or from the - // OS. - if (isolate_->code_range()->exists()) { - mem = isolate_->code_range()->AllocateRawMemory(requested, allocated); - } else { - mem = OS::Allocate(requested, allocated, true); - } - // Update executable memory size. - size_executable_ += static_cast<int>(*allocated); - } else { - mem = OS::Allocate(requested, allocated, false); + ASSERT(size_executable_ >= size); + size_executable_ -= size; } - int alloced = static_cast<int>(*allocated); - size_ += alloced; - -#ifdef DEBUG - ZapBlock(reinterpret_cast<Address>(mem), alloced); -#endif - isolate_->counters()->memory_allocated()->Increment(alloced); - return mem; + // Code which is part of the code-range does not have its own VirtualMemory. + ASSERT(!isolate_->code_range()->contains( + static_cast<Address>(reservation->address()))); + ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); + reservation->Release(); } -void MemoryAllocator::FreeRawMemory(void* mem, - size_t length, - Executability executable) { -#ifdef DEBUG - // Do not try to zap the guard page. - size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0; - ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size); -#endif - if (isolate_->code_range()->contains(static_cast<Address>(mem))) { - isolate_->code_range()->FreeRawMemory(mem, length); +void MemoryAllocator::FreeMemory(Address base, + size_t size, + Executability executable) { + // TODO(gc) make code_range part of memory allocator? + ASSERT(size_ >= size); + size_ -= size; + + isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); + + if (executable == EXECUTABLE) { + ASSERT(size_executable_ >= size); + size_executable_ -= size; + } + if (isolate_->code_range()->contains(static_cast<Address>(base))) { + ASSERT(executable == EXECUTABLE); + isolate_->code_range()->FreeRawMemory(base, size); } else { - OS::Free(mem, length); + ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); + bool result = VirtualMemory::ReleaseRegion(base, size); + USE(result); + ASSERT(result); } - isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length)); - size_ -= static_cast<int>(length); - if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length); +} - ASSERT(size_ >= 0); - ASSERT(size_executable_ >= 0); + +Address MemoryAllocator::ReserveAlignedMemory(size_t size, + size_t alignment, + VirtualMemory* controller) { + VirtualMemory reservation(size, alignment); + + if (!reservation.IsReserved()) return NULL; + size_ += reservation.size(); + Address base = RoundUp(static_cast<Address>(reservation.address()), + alignment); + controller->TakeControl(&reservation); + return base; } -void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, - AllocationAction action, - size_t size) { - for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { - MemoryAllocationCallbackRegistration registration = - memory_allocation_callbacks_[i]; - if ((registration.space & space) == space && - (registration.action & action) == action) - registration.callback(space, action, static_cast<int>(size)); +Address MemoryAllocator::AllocateAlignedMemory(size_t size, + size_t alignment, + Executability executable, + VirtualMemory* controller) { + VirtualMemory reservation; + Address base = ReserveAlignedMemory(size, alignment, &reservation); + if (base == NULL) return NULL; + if (!reservation.Commit(base, + size, + executable == EXECUTABLE)) { + return NULL; } + controller->TakeControl(&reservation); + return base; } -bool MemoryAllocator::MemoryAllocationCallbackRegistered( - MemoryAllocationCallback callback) { - for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { - if (memory_allocation_callbacks_[i].callback == callback) return true; - } - return false; +void Page::InitializeAsAnchor(PagedSpace* owner) { + set_owner(owner); + set_prev_page(this); + set_next_page(this); } -void MemoryAllocator::AddMemoryAllocationCallback( - MemoryAllocationCallback callback, - ObjectSpace space, - AllocationAction action) { - ASSERT(callback != NULL); - MemoryAllocationCallbackRegistration registration(callback, space, action); - ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); - return memory_allocation_callbacks_.Add(registration); +NewSpacePage* NewSpacePage::Initialize(Heap* heap, + Address start, + SemiSpace* semi_space) { + MemoryChunk* chunk = MemoryChunk::Initialize(heap, + start, + Page::kPageSize, + NOT_EXECUTABLE, + semi_space); + chunk->set_next_chunk(NULL); + chunk->set_prev_chunk(NULL); + chunk->initialize_scan_on_scavenge(true); + bool in_to_space = (semi_space->id() != kFromSpace); + chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE + : MemoryChunk::IN_FROM_SPACE); + ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE + : MemoryChunk::IN_TO_SPACE)); + NewSpacePage* page = static_cast<NewSpacePage*>(chunk); + heap->incremental_marking()->SetNewSpacePageFlags(page); + return page; } -void MemoryAllocator::RemoveMemoryAllocationCallback( - MemoryAllocationCallback callback) { - ASSERT(callback != NULL); - for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { - if (memory_allocation_callbacks_[i].callback == callback) { - memory_allocation_callbacks_.Remove(i); - return; - } - } - UNREACHABLE(); +void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { + set_owner(semi_space); + set_next_chunk(this); + set_prev_chunk(this); + // Flags marks this invalid page as not being in new-space. + // All real new-space pages will be in new-space. + SetFlags(0, ~0); } -void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { - ASSERT(initial_chunk_ == NULL); - initial_chunk_ = new VirtualMemory(requested); - CHECK(initial_chunk_ != NULL); - if (!initial_chunk_->IsReserved()) { - delete initial_chunk_; - initial_chunk_ = NULL; - return NULL; - } +MemoryChunk* MemoryChunk::Initialize(Heap* heap, + Address base, + size_t size, + Executability executable, + Space* owner) { + MemoryChunk* chunk = FromAddress(base); - // We are sure that we have mapped a block of requested addresses. - ASSERT(initial_chunk_->size() == requested); - LOG(isolate_, - NewEvent("InitialChunk", initial_chunk_->address(), requested)); - size_ += static_cast<int>(requested); - return initial_chunk_->address(); -} + ASSERT(base == chunk->address()); + + chunk->heap_ = heap; + chunk->size_ = size; + chunk->flags_ = 0; + chunk->set_owner(owner); + chunk->InitializeReservedMemory(); + chunk->slots_buffer_ = NULL; + chunk->skip_list_ = NULL; + chunk->ResetLiveBytes(); + Bitmap::Clear(chunk); + chunk->initialize_scan_on_scavenge(false); + chunk->SetFlag(WAS_SWEPT_PRECISELY); + ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); + ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); -static int PagesInChunk(Address start, size_t size) { - // The first page starts on the first page-aligned address from start onward - // and the last page ends on the last page-aligned address before - // start+size. Page::kPageSize is a power of two so we can divide by - // shifting. - return static_cast<int>((RoundDown(start + size, Page::kPageSize) - - RoundUp(start, Page::kPageSize)) >> kPageSizeBits); + if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); + + if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA); + + return chunk; } -Page* MemoryAllocator::AllocatePages(int requested_pages, - int* allocated_pages, - PagedSpace* owner) { - if (requested_pages <= 0) return Page::FromAddress(NULL); - size_t chunk_size = requested_pages * Page::kPageSize; +void MemoryChunk::InsertAfter(MemoryChunk* other) { + next_chunk_ = other->next_chunk_; + prev_chunk_ = other; + other->next_chunk_->prev_chunk_ = this; + other->next_chunk_ = this; +} - void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); - if (chunk == NULL) return Page::FromAddress(NULL); - LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size)); - *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); +void MemoryChunk::Unlink() { + if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { + heap_->decrement_scan_on_scavenge_pages(); + ClearFlag(SCAN_ON_SCAVENGE); + } + next_chunk_->prev_chunk_ = prev_chunk_; + prev_chunk_->next_chunk_ = next_chunk_; + prev_chunk_ = NULL; + next_chunk_ = NULL; +} - // We may 'lose' a page due to alignment. - ASSERT(*allocated_pages >= kPagesPerChunk - 1); - size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0; +MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, + Executability executable, + Space* owner) { + size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; + Heap* heap = isolate_->heap(); + Address base = NULL; + VirtualMemory reservation; + if (executable == EXECUTABLE) { + // Check executable memory limit. + if (size_executable_ + chunk_size > capacity_executable_) { + LOG(isolate_, + StringEvent("MemoryAllocator::AllocateRawMemory", + "V8 Executable Allocation capacity exceeded")); + return NULL; + } - // Check that we got at least one page that we can use. - if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) { - FreeRawMemory(chunk, - chunk_size, - owner->executable()); - LOG(isolate_, DeleteEvent("PagedChunk", chunk)); - return Page::FromAddress(NULL); + // Allocate executable memory either from code range or from the + // OS. + if (isolate_->code_range()->exists()) { + base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); + ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), + MemoryChunk::kAlignment)); + if (base == NULL) return NULL; + size_ += chunk_size; + // Update executable memory size. + size_executable_ += chunk_size; + } else { + base = AllocateAlignedMemory(chunk_size, + MemoryChunk::kAlignment, + executable, + &reservation); + if (base == NULL) return NULL; + // Update executable memory size. + size_executable_ += reservation.size(); + } + } else { + base = AllocateAlignedMemory(chunk_size, + MemoryChunk::kAlignment, + executable, + &reservation); + + if (base == NULL) return NULL; } - if (guard_size != 0) { - OS::Guard(chunk, guard_size); - chunk_size -= guard_size; - chunk = static_cast<Address>(chunk) + guard_size; - --*allocated_pages; +#ifdef DEBUG + ZapBlock(base, chunk_size); +#endif + isolate_->counters()->memory_allocated()-> + Increment(static_cast<int>(chunk_size)); + + LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); + if (owner != NULL) { + ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); + PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); } - int chunk_id = Pop(); - chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); + MemoryChunk* result = MemoryChunk::Initialize(heap, + base, + chunk_size, + executable, + owner); + result->set_reserved_memory(&reservation); + return result; +} + - ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); - PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); - Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner); +Page* MemoryAllocator::AllocatePage(PagedSpace* owner, + Executability executable) { + MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); + + if (chunk == NULL) return NULL; - return new_pages; + return Page::Initialize(isolate_->heap(), chunk, executable, owner); } -Page* MemoryAllocator::CommitPages(Address start, size_t size, - PagedSpace* owner, int* num_pages) { - ASSERT(start != NULL); - *num_pages = PagesInChunk(start, size); - ASSERT(*num_pages > 0); - ASSERT(initial_chunk_ != NULL); - ASSERT(InInitialChunk(start)); - ASSERT(InInitialChunk(start + size - 1)); - if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { - return Page::FromAddress(NULL); +LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, + Executability executable, + Space* owner) { + MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); + if (chunk == NULL) return NULL; + return LargePage::Initialize(isolate_->heap(), chunk); +} + + +void MemoryAllocator::Free(MemoryChunk* chunk) { + LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); + if (chunk->owner() != NULL) { + ObjectSpace space = + static_cast<ObjectSpace>(1 << chunk->owner()->identity()); + PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); } -#ifdef DEBUG - ZapBlock(start, size); -#endif - isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); - // So long as we correctly overestimated the number of chunks we should not - // run out of chunk ids. - CHECK(!OutOfChunkIds()); - int chunk_id = Pop(); - chunks_[chunk_id].init(start, size, owner); - return InitializePagesInChunk(chunk_id, *num_pages, owner); + delete chunk->slots_buffer(); + delete chunk->skip_list(); + + VirtualMemory* reservation = chunk->reserved_memory(); + if (reservation->IsReserved()) { + FreeMemory(reservation, chunk->executable()); + } else { + FreeMemory(chunk->address(), + chunk->size(), + chunk->executable()); + } } bool MemoryAllocator::CommitBlock(Address start, size_t size, Executability executable) { - ASSERT(start != NULL); - ASSERT(size > 0); - ASSERT(initial_chunk_ != NULL); - ASSERT(InInitialChunk(start)); - ASSERT(InInitialChunk(start + size - 1)); - - if (!initial_chunk_->Commit(start, size, executable)) return false; + if (!VirtualMemory::CommitRegion(start, size, executable)) return false; #ifdef DEBUG ZapBlock(start, size); #endif @@ -583,13 +590,7 @@ bool MemoryAllocator::CommitBlock(Address start, bool MemoryAllocator::UncommitBlock(Address start, size_t size) { - ASSERT(start != NULL); - ASSERT(size > 0); - ASSERT(initial_chunk_ != NULL); - ASSERT(InInitialChunk(start)); - ASSERT(InInitialChunk(start + size - 1)); - - if (!initial_chunk_->Uncommit(start, size)) return false; + if (!VirtualMemory::UncommitRegion(start, size)) return false; isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); return true; } @@ -602,130 +603,49 @@ void MemoryAllocator::ZapBlock(Address start, size_t size) { } -Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk, - PagedSpace* owner) { - ASSERT(IsValidChunk(chunk_id)); - ASSERT(pages_in_chunk > 0); - - Address chunk_start = chunks_[chunk_id].address(); - - Address low = RoundUp(chunk_start, Page::kPageSize); - -#ifdef DEBUG - size_t chunk_size = chunks_[chunk_id].size(); - Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); - ASSERT(pages_in_chunk <= - ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize)); -#endif - - Address page_addr = low; - for (int i = 0; i < pages_in_chunk; i++) { - Page* p = Page::FromAddress(page_addr); - p->heap_ = owner->heap(); - p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; - p->InvalidateWatermark(true); - p->SetIsLargeObjectPage(false); - p->SetAllocationWatermark(p->ObjectAreaStart()); - p->SetCachedAllocationWatermark(p->ObjectAreaStart()); - page_addr += Page::kPageSize; +void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, + AllocationAction action, + size_t size) { + for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { + MemoryAllocationCallbackRegistration registration = + memory_allocation_callbacks_[i]; + if ((registration.space & space) == space && + (registration.action & action) == action) + registration.callback(space, action, static_cast<int>(size)); } - - // Set the next page of the last page to 0. - Page* last_page = Page::FromAddress(page_addr - Page::kPageSize); - last_page->opaque_header = OffsetFrom(0) | chunk_id; - - return Page::FromAddress(low); } -Page* MemoryAllocator::FreePages(Page* p) { - if (!p->is_valid()) return p; - - // Find the first page in the same chunk as 'p' - Page* first_page = FindFirstPageInSameChunk(p); - Page* page_to_return = Page::FromAddress(NULL); - - if (p != first_page) { - // Find the last page in the same chunk as 'prev'. - Page* last_page = FindLastPageInSameChunk(p); - first_page = GetNextPage(last_page); // first page in next chunk - - // set the next_page of last_page to NULL - SetNextPage(last_page, Page::FromAddress(NULL)); - page_to_return = p; // return 'p' when exiting - } - - while (first_page->is_valid()) { - int chunk_id = GetChunkId(first_page); - ASSERT(IsValidChunk(chunk_id)); - - // Find the first page of the next chunk before deleting this chunk. - first_page = GetNextPage(FindLastPageInSameChunk(first_page)); - - // Free the current chunk. - DeleteChunk(chunk_id); +bool MemoryAllocator::MemoryAllocationCallbackRegistered( + MemoryAllocationCallback callback) { + for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { + if (memory_allocation_callbacks_[i].callback == callback) return true; } - - return page_to_return; + return false; } -void MemoryAllocator::FreeAllPages(PagedSpace* space) { - for (int i = 0, length = chunks_.length(); i < length; i++) { - if (chunks_[i].owner() == space) { - DeleteChunk(i); - } - } +void MemoryAllocator::AddMemoryAllocationCallback( + MemoryAllocationCallback callback, + ObjectSpace space, + AllocationAction action) { + ASSERT(callback != NULL); + MemoryAllocationCallbackRegistration registration(callback, space, action); + ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); + return memory_allocation_callbacks_.Add(registration); } -void MemoryAllocator::DeleteChunk(int chunk_id) { - ASSERT(IsValidChunk(chunk_id)); - - ChunkInfo& c = chunks_[chunk_id]; - - // We cannot free a chunk contained in the initial chunk because it was not - // allocated with AllocateRawMemory. Instead we uncommit the virtual - // memory. - if (InInitialChunk(c.address())) { - // TODO(1240712): VirtualMemory::Uncommit has a return value which - // is ignored here. - initial_chunk_->Uncommit(c.address(), c.size()); - Counters* counters = isolate_->counters(); - counters->memory_allocated()->Decrement(static_cast<int>(c.size())); - } else { - LOG(isolate_, DeleteEvent("PagedChunk", c.address())); - ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity()); - size_t size = c.size(); - size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0; - FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable()); - PerformAllocationCallback(space, kAllocationActionFree, size); +void MemoryAllocator::RemoveMemoryAllocationCallback( + MemoryAllocationCallback callback) { + ASSERT(callback != NULL); + for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { + if (memory_allocation_callbacks_[i].callback == callback) { + memory_allocation_callbacks_.Remove(i); + return; + } } - c.init(NULL, 0, NULL); - Push(chunk_id); -} - - -Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { - int chunk_id = GetChunkId(p); - ASSERT(IsValidChunk(chunk_id)); - - Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize); - return Page::FromAddress(low); -} - - -Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) { - int chunk_id = GetChunkId(p); - ASSERT(IsValidChunk(chunk_id)); - - Address chunk_start = chunks_[chunk_id].address(); - size_t chunk_size = chunks_[chunk_id].size(); - - Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); - ASSERT(chunk_start <= p->address() && p->address() < high); - - return Page::FromAddress(high - Page::kPageSize); + UNREACHABLE(); } @@ -739,75 +659,6 @@ void MemoryAllocator::ReportStatistics() { } #endif - -void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space, - Page** first_page, - Page** last_page, - Page** last_page_in_use) { - Page* first = NULL; - Page* last = NULL; - - for (int i = 0, length = chunks_.length(); i < length; i++) { - ChunkInfo& chunk = chunks_[i]; - - if (chunk.owner() == space) { - if (first == NULL) { - Address low = RoundUp(chunk.address(), Page::kPageSize); - first = Page::FromAddress(low); - } - last = RelinkPagesInChunk(i, - chunk.address(), - chunk.size(), - last, - last_page_in_use); - } - } - - if (first_page != NULL) { - *first_page = first; - } - - if (last_page != NULL) { - *last_page = last; - } -} - - -Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id, - Address chunk_start, - size_t chunk_size, - Page* prev, - Page** last_page_in_use) { - Address page_addr = RoundUp(chunk_start, Page::kPageSize); - int pages_in_chunk = PagesInChunk(chunk_start, chunk_size); - - if (prev->is_valid()) { - SetNextPage(prev, Page::FromAddress(page_addr)); - } - - for (int i = 0; i < pages_in_chunk; i++) { - Page* p = Page::FromAddress(page_addr); - p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; - page_addr += Page::kPageSize; - - p->InvalidateWatermark(true); - if (p->WasInUseBeforeMC()) { - *last_page_in_use = p; - } - } - - // Set the next page of the last page to 0. - Page* last_page = Page::FromAddress(page_addr - Page::kPageSize); - last_page->opaque_header = OffsetFrom(0) | chunk_id; - - if (last_page->WasInUseBeforeMC()) { - *last_page_in_use = last_page; - } - - return last_page; -} - - // ----------------------------------------------------------------------------- // PagedSpace implementation @@ -815,7 +666,11 @@ PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, Executability executable) - : Space(heap, id, executable) { + : Space(heap, id, executable), + free_list_(this), + was_swept_conservatively_(false), + first_unswept_page_(Page::FromAddress(NULL)), + last_unswept_page_(Page::FromAddress(NULL)) { max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * Page::kObjectAreaSize; accounting_stats_.Clear(); @@ -823,215 +678,73 @@ PagedSpace::PagedSpace(Heap* heap, allocation_info_.top = NULL; allocation_info_.limit = NULL; - mc_forwarding_info_.top = NULL; - mc_forwarding_info_.limit = NULL; + anchor_.InitializeAsAnchor(this); } -bool PagedSpace::Setup(Address start, size_t size) { - if (HasBeenSetup()) return false; - - int num_pages = 0; - // Try to use the virtual memory range passed to us. If it is too small to - // contain at least one page, ignore it and allocate instead. - int pages_in_chunk = PagesInChunk(start, size); - if (pages_in_chunk > 0) { - first_page_ = Isolate::Current()->memory_allocator()->CommitPages( - RoundUp(start, Page::kPageSize), - Page::kPageSize * pages_in_chunk, - this, &num_pages); - } else { - int requested_pages = - Min(MemoryAllocator::kPagesPerChunk, - static_cast<int>(max_capacity_ / Page::kObjectAreaSize)); - first_page_ = - Isolate::Current()->memory_allocator()->AllocatePages( - requested_pages, &num_pages, this); - if (!first_page_->is_valid()) return false; - } - - // We are sure that the first page is valid and that we have at least one - // page. - ASSERT(first_page_->is_valid()); - ASSERT(num_pages > 0); - accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize); - ASSERT(Capacity() <= max_capacity_); - - // Sequentially clear region marks in the newly allocated - // pages and cache the current last page in the space. - for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { - p->SetRegionMarks(Page::kAllRegionsCleanMarks); - last_page_ = p; - } - - // Use first_page_ for allocation. - SetAllocationInfo(&allocation_info_, first_page_); - - page_list_is_chunk_ordered_ = true; - +bool PagedSpace::Setup() { return true; } bool PagedSpace::HasBeenSetup() { - return (Capacity() > 0); + return true; } void PagedSpace::TearDown() { - Isolate::Current()->memory_allocator()->FreeAllPages(this); - first_page_ = NULL; - accounting_stats_.Clear(); -} - - -void PagedSpace::MarkAllPagesClean() { - PageIterator it(this, PageIterator::ALL_PAGES); - while (it.has_next()) { - it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); + PageIterator iterator(this); + while (iterator.has_next()) { + heap()->isolate()->memory_allocator()->Free(iterator.next()); } + anchor_.set_next_page(&anchor_); + anchor_.set_prev_page(&anchor_); + accounting_stats_.Clear(); } MaybeObject* PagedSpace::FindObject(Address addr) { - // Note: this function can only be called before or after mark-compact GC - // because it accesses map pointers. + // Note: this function can only be called on precisely swept spaces. ASSERT(!heap()->mark_compact_collector()->in_use()); if (!Contains(addr)) return Failure::Exception(); Page* p = Page::FromAddress(addr); - ASSERT(IsUsed(p)); - Address cur = p->ObjectAreaStart(); - Address end = p->AllocationTop(); - while (cur < end) { - HeapObject* obj = HeapObject::FromAddress(cur); + HeapObjectIterator it(p, NULL); + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { + Address cur = obj->address(); Address next = cur + obj->Size(); if ((cur <= addr) && (addr < next)) return obj; - cur = next; } UNREACHABLE(); return Failure::Exception(); } +bool PagedSpace::CanExpand() { + ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); + ASSERT(Capacity() % Page::kObjectAreaSize == 0); -bool PagedSpace::IsUsed(Page* page) { - PageIterator it(this, PageIterator::PAGES_IN_USE); - while (it.has_next()) { - if (page == it.next()) return true; - } - return false; -} - - -void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) { - alloc_info->top = p->ObjectAreaStart(); - alloc_info->limit = p->ObjectAreaEnd(); - ASSERT(alloc_info->VerifyPagedAllocation()); -} - - -void PagedSpace::MCResetRelocationInfo() { - // Set page indexes. - int i = 0; - PageIterator it(this, PageIterator::ALL_PAGES); - while (it.has_next()) { - Page* p = it.next(); - p->mc_page_index = i++; - } - - // Set mc_forwarding_info_ to the first page in the space. - SetAllocationInfo(&mc_forwarding_info_, first_page_); - // All the bytes in the space are 'available'. We will rediscover - // allocated and wasted bytes during GC. - accounting_stats_.Reset(); -} - - -int PagedSpace::MCSpaceOffsetForAddress(Address addr) { -#ifdef DEBUG - // The Contains function considers the address at the beginning of a - // page in the page, MCSpaceOffsetForAddress considers it is in the - // previous page. - if (Page::IsAlignedToPageSize(addr)) { - ASSERT(Contains(addr - kPointerSize)); - } else { - ASSERT(Contains(addr)); - } -#endif - - // If addr is at the end of a page, it belongs to previous page - Page* p = Page::IsAlignedToPageSize(addr) - ? Page::FromAllocationTop(addr) - : Page::FromAddress(addr); - int index = p->mc_page_index; - return (index * Page::kPageSize) + p->Offset(addr); -} + if (Capacity() == max_capacity_) return false; + ASSERT(Capacity() < max_capacity_); -// Slow case for reallocating and promoting objects during a compacting -// collection. This function is not space-specific. -HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) { - Page* current_page = TopPageOf(mc_forwarding_info_); - if (!current_page->next_page()->is_valid()) { - if (!Expand(current_page)) { - return NULL; - } - } + // Are we going to exceed capacity for this space? + if ((Capacity() + Page::kPageSize) > max_capacity_) return false; - // There are surely more pages in the space now. - ASSERT(current_page->next_page()->is_valid()); - // We do not add the top of page block for current page to the space's - // free list---the block may contain live objects so we cannot write - // bookkeeping information to it. Instead, we will recover top of page - // blocks when we move objects to their new locations. - // - // We do however write the allocation pointer to the page. The encoding - // of forwarding addresses is as an offset in terms of live bytes, so we - // need quick access to the allocation top of each page to decode - // forwarding addresses. - current_page->SetAllocationWatermark(mc_forwarding_info_.top); - current_page->next_page()->InvalidateWatermark(true); - SetAllocationInfo(&mc_forwarding_info_, current_page->next_page()); - return AllocateLinearly(&mc_forwarding_info_, size_in_bytes); + return true; } +bool PagedSpace::Expand() { + if (!CanExpand()) return false; -bool PagedSpace::Expand(Page* last_page) { - ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); - ASSERT(Capacity() % Page::kObjectAreaSize == 0); - - if (Capacity() == max_capacity_) return false; + Page* p = heap()->isolate()->memory_allocator()-> + AllocatePage(this, executable()); + if (p == NULL) return false; - ASSERT(Capacity() < max_capacity_); - // Last page must be valid and its next page is invalid. - ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid()); - - int available_pages = - static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize); - // We don't want to have to handle small chunks near the end so if there are - // not kPagesPerChunk pages available without exceeding the max capacity then - // act as if memory has run out. - if (available_pages < MemoryAllocator::kPagesPerChunk) return false; - - int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk); - Page* p = heap()->isolate()->memory_allocator()->AllocatePages( - desired_pages, &desired_pages, this); - if (!p->is_valid()) return false; - - accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize); ASSERT(Capacity() <= max_capacity_); - heap()->isolate()->memory_allocator()->SetNextPage(last_page, p); - - // Sequentially clear region marks of new pages and and cache the - // new last page in the space. - while (p->is_valid()) { - p->SetRegionMarks(Page::kAllRegionsCleanMarks); - last_page_ = p; - p = p->next_page(); - } + p->InsertAfter(anchor_.prev_page()); return true; } @@ -1039,8 +752,10 @@ bool PagedSpace::Expand(Page* last_page) { #ifdef DEBUG int PagedSpace::CountTotalPages() { + PageIterator it(this); int count = 0; - for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { + while (it.has_next()) { + it.next(); count++; } return count; @@ -1048,63 +763,30 @@ int PagedSpace::CountTotalPages() { #endif -void PagedSpace::Shrink() { - if (!page_list_is_chunk_ordered_) { - // We can't shrink space if pages is not chunk-ordered - // (see comment for class MemoryAllocator for definition). - return; - } - - // Release half of free pages. - Page* top_page = AllocationTopPage(); - ASSERT(top_page->is_valid()); - - // Count the number of pages we would like to free. - int pages_to_free = 0; - for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) { - pages_to_free++; - } - - // Free pages after top_page. - Page* p = heap()->isolate()->memory_allocator()-> - FreePages(top_page->next_page()); - heap()->isolate()->memory_allocator()->SetNextPage(top_page, p); - - // Find out how many pages we failed to free and update last_page_. - // Please note pages can only be freed in whole chunks. - last_page_ = top_page; - for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) { - pages_to_free--; - last_page_ = p; +void PagedSpace::ReleasePage(Page* page) { + ASSERT(page->LiveBytes() == 0); + page->Unlink(); + if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { + heap()->isolate()->memory_allocator()->Free(page); + } else { + heap()->QueueMemoryChunkForFree(page); } - accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize); - ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize); + ASSERT(Capacity() > 0); + ASSERT(Capacity() % Page::kObjectAreaSize == 0); + accounting_stats_.ShrinkSpace(Page::kObjectAreaSize); } -bool PagedSpace::EnsureCapacity(int capacity) { - if (Capacity() >= capacity) return true; - - // Start from the allocation top and loop to the last page in the space. - Page* last_page = AllocationTopPage(); - Page* next_page = last_page->next_page(); - while (next_page->is_valid()) { - last_page = heap()->isolate()->memory_allocator()-> - FindLastPageInSameChunk(next_page); - next_page = last_page->next_page(); +void PagedSpace::ReleaseAllUnusedPages() { + PageIterator it(this); + while (it.has_next()) { + Page* page = it.next(); + if (page->LiveBytes() == 0) { + ReleasePage(page); + } } - - // Expand the space until it has the required capacity or expansion fails. - do { - if (!Expand(last_page)) return false; - ASSERT(last_page->next_page()->is_valid()); - last_page = - heap()->isolate()->memory_allocator()->FindLastPageInSameChunk( - last_page->next_page()); - } while (Capacity() < capacity); - - return true; + heap()->FreeQueuedChunks(); } @@ -1114,61 +796,52 @@ void PagedSpace::Print() { } #ifdef DEBUG -// We do not assume that the PageIterator works, because it depends on the -// invariants we are checking during verification. void PagedSpace::Verify(ObjectVisitor* visitor) { - // The allocation pointer should be valid, and it should be in a page in the - // space. - ASSERT(allocation_info_.VerifyPagedAllocation()); - Page* top_page = Page::FromAllocationTop(allocation_info_.top); - ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this)); - - // Loop over all the pages. - bool above_allocation_top = false; - Page* current_page = first_page_; - while (current_page->is_valid()) { - if (above_allocation_top) { - // We don't care what's above the allocation top. - } else { - Address top = current_page->AllocationTop(); - if (current_page == top_page) { - ASSERT(top == allocation_info_.top); - // The next page will be above the allocation top. - above_allocation_top = true; - } - - // It should be packed with objects from the bottom to the top. - Address current = current_page->ObjectAreaStart(); - while (current < top) { - HeapObject* object = HeapObject::FromAddress(current); - - // The first word should be a map, and we expect all map pointers to - // be in map space. - Map* map = object->map(); - ASSERT(map->IsMap()); - ASSERT(heap()->map_space()->Contains(map)); - - // Perform space-specific object verification. - VerifyObject(object); - - // The object itself should look OK. - object->Verify(); - - // All the interior pointers should be contained in the heap and - // have page regions covering intergenerational references should be - // marked dirty. - int size = object->Size(); - object->IterateBody(map->instance_type(), size, visitor); - - current += size; + // We can only iterate over the pages if they were swept precisely. + if (was_swept_conservatively_) return; + + bool allocation_pointer_found_in_space = + (allocation_info_.top == allocation_info_.limit); + PageIterator page_iterator(this); + while (page_iterator.has_next()) { + Page* page = page_iterator.next(); + ASSERT(page->owner() == this); + if (page == Page::FromAllocationTop(allocation_info_.top)) { + allocation_pointer_found_in_space = true; + } + ASSERT(page->WasSweptPrecisely()); + HeapObjectIterator it(page, NULL); + Address end_of_previous_object = page->ObjectAreaStart(); + Address top = page->ObjectAreaEnd(); + int black_size = 0; + for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { + ASSERT(end_of_previous_object <= object->address()); + + // The first word should be a map, and we expect all map pointers to + // be in map space. + Map* map = object->map(); + ASSERT(map->IsMap()); + ASSERT(heap()->map_space()->Contains(map)); + + // Perform space-specific object verification. + VerifyObject(object); + + // The object itself should look OK. + object->Verify(); + + // All the interior pointers should be contained in the heap. + int size = object->Size(); + object->IterateBody(map->instance_type(), size, visitor); + if (Marking::IsBlack(Marking::MarkBitFrom(object))) { + black_size += size; } - // The allocation pointer should not be in the middle of an object. - ASSERT(current == top); + ASSERT(object->address() + size <= top); + end_of_previous_object = object->address() + size; } - - current_page = current_page->next_page(); + ASSERT_LE(black_size, page->LiveBytes()); } + ASSERT(allocation_pointer_found_in_space); } #endif @@ -1177,13 +850,23 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { // NewSpace implementation -bool NewSpace::Setup(Address start, int size) { +bool NewSpace::Setup(int reserved_semispace_capacity, + int maximum_semispace_capacity) { // Setup new space based on the preallocated memory block defined by // start and size. The provided space is divided into two semi-spaces. // To support fast containment testing in the new space, the size of // this chunk must be a power of two and it must be aligned to its size. int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); - int maximum_semispace_capacity = heap()->MaxSemiSpaceSize(); + + size_t size = 2 * reserved_semispace_capacity; + Address base = + heap()->isolate()->memory_allocator()->ReserveAlignedMemory( + size, size, &reservation_); + if (base == NULL) return false; + + chunk_base_ = base; + chunk_size_ = static_cast<uintptr_t>(size); + LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); ASSERT(IsPowerOf2(maximum_semispace_capacity)); @@ -1197,31 +880,29 @@ bool NewSpace::Setup(Address start, int size) { INSTANCE_TYPE_LIST(SET_NAME) #undef SET_NAME - ASSERT(size == 2 * heap()->ReservedSemiSpaceSize()); - ASSERT(IsAddressAligned(start, size, 0)); + ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); + ASSERT(static_cast<intptr_t>(chunk_size_) >= + 2 * heap()->ReservedSemiSpaceSize()); + ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); - if (!to_space_.Setup(start, + if (!to_space_.Setup(chunk_base_, initial_semispace_capacity, maximum_semispace_capacity)) { return false; } - if (!from_space_.Setup(start + maximum_semispace_capacity, + if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity, initial_semispace_capacity, maximum_semispace_capacity)) { return false; } - start_ = start; - address_mask_ = ~(size - 1); + start_ = chunk_base_; + address_mask_ = ~(2 * reserved_semispace_capacity - 1); object_mask_ = address_mask_ | kHeapObjectTagMask; - object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; + object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; - allocation_info_.top = to_space_.low(); - allocation_info_.limit = to_space_.high(); - mc_forwarding_info_.top = NULL; - mc_forwarding_info_.limit = NULL; + ResetAllocationInfo(); - ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); return true; } @@ -1239,28 +920,34 @@ void NewSpace::TearDown() { start_ = NULL; allocation_info_.top = NULL; allocation_info_.limit = NULL; - mc_forwarding_info_.top = NULL; - mc_forwarding_info_.limit = NULL; to_space_.TearDown(); from_space_.TearDown(); + + LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); + + ASSERT(reservation_.IsReserved()); + heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, + NOT_EXECUTABLE); + chunk_base_ = NULL; + chunk_size_ = 0; } void NewSpace::Flip() { - SemiSpace tmp = from_space_; - from_space_ = to_space_; - to_space_ = tmp; + SemiSpace::Swap(&from_space_, &to_space_); } void NewSpace::Grow() { + // Double the semispace size but only up to maximum capacity. ASSERT(Capacity() < MaximumCapacity()); - if (to_space_.Grow()) { - // Only grow from space if we managed to grow to space. - if (!from_space_.Grow()) { - // If we managed to grow to space but couldn't grow from space, - // attempt to shrink to space. + int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity())); + if (to_space_.GrowTo(new_capacity)) { + // Only grow from space if we managed to grow to-space. + if (!from_space_.GrowTo(new_capacity)) { + // If we managed to grow to-space but couldn't grow from-space, + // attempt to shrink to-space. if (!to_space_.ShrinkTo(from_space_.Capacity())) { // We are in an inconsistent state because we could not // commit/uncommit memory from new space. @@ -1268,21 +955,20 @@ void NewSpace::Grow() { } } } - allocation_info_.limit = to_space_.high(); ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); } void NewSpace::Shrink() { int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt()); - int rounded_new_capacity = - RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment())); + int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize); if (rounded_new_capacity < Capacity() && to_space_.ShrinkTo(rounded_new_capacity)) { - // Only shrink from space if we managed to shrink to space. + // Only shrink from-space if we managed to shrink to-space. + from_space_.Reset(); if (!from_space_.ShrinkTo(rounded_new_capacity)) { - // If we managed to shrink to space but couldn't shrink from - // space, attempt to grow to space again. + // If we managed to shrink to-space but couldn't shrink from + // space, attempt to grow to-space again. if (!to_space_.GrowTo(from_space_.Capacity())) { // We are in an inconsistent state because we could not // commit/uncommit memory from new space. @@ -1290,36 +976,65 @@ void NewSpace::Shrink() { } } } - allocation_info_.limit = to_space_.high(); + allocation_info_.limit = to_space_.page_high(); ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); } -void NewSpace::ResetAllocationInfo() { - allocation_info_.top = to_space_.low(); - allocation_info_.limit = to_space_.high(); +void NewSpace::UpdateAllocationInfo() { + allocation_info_.top = to_space_.page_low(); + allocation_info_.limit = to_space_.page_high(); + + // Lower limit during incremental marking. + if (heap()->incremental_marking()->IsMarking() && + inline_allocation_limit_step() != 0) { + Address new_limit = + allocation_info_.top + inline_allocation_limit_step(); + allocation_info_.limit = Min(new_limit, allocation_info_.limit); + } ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); } -void NewSpace::MCResetRelocationInfo() { - mc_forwarding_info_.top = from_space_.low(); - mc_forwarding_info_.limit = from_space_.high(); - ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_); +void NewSpace::ResetAllocationInfo() { + to_space_.Reset(); + UpdateAllocationInfo(); + pages_used_ = 0; + // Clear all mark-bits in the to-space. + NewSpacePageIterator it(&to_space_); + while (it.has_next()) { + Bitmap::Clear(it.next()); + } } -void NewSpace::MCCommitRelocationInfo() { - // Assumes that the spaces have been flipped so that mc_forwarding_info_ is - // valid allocation info for the to space. - allocation_info_.top = mc_forwarding_info_.top; - allocation_info_.limit = to_space_.high(); - ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); +bool NewSpace::AddFreshPage() { + Address top = allocation_info_.top; + if (NewSpacePage::IsAtStart(top)) { + // The current page is already empty. Don't try to make another. + + // We should only get here if someone asks to allocate more + // than what can be stored in a single page. + // TODO(gc): Change the limit on new-space allocation to prevent this + // from happening (all such allocations should go directly to LOSpace). + return false; + } + if (!to_space_.AdvancePage()) { + // Failed to get a new page in to-space. + return false; + } + // Clear remainder of current page. + int remaining_in_page = + static_cast<int>(NewSpacePage::FromLimit(top)->body_limit() - top); + heap()->CreateFillerObjectAt(top, remaining_in_page); + pages_used_++; + UpdateAllocationInfo(); + return true; } #ifdef DEBUG -// We do not use the SemispaceIterator because verification doesn't assume +// We do not use the SemiSpaceIterator because verification doesn't assume // that it works (it depends on the invariants we are checking). void NewSpace::Verify() { // The allocation pointer should be in the space or at the very end. @@ -1327,58 +1042,52 @@ void NewSpace::Verify() { // There should be objects packed in from the low address up to the // allocation pointer. - Address current = to_space_.low(); - while (current < top()) { - HeapObject* object = HeapObject::FromAddress(current); - - // The first word should be a map, and we expect all map pointers to - // be in map space. - Map* map = object->map(); - ASSERT(map->IsMap()); - ASSERT(heap()->map_space()->Contains(map)); + Address current = to_space_.first_page()->body(); + CHECK_EQ(current, to_space_.space_start()); - // The object should not be code or a map. - ASSERT(!object->IsMap()); - ASSERT(!object->IsCode()); + while (current != top()) { + if (!NewSpacePage::IsAtEnd(current)) { + // The allocation pointer should not be in the middle of an object. + CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || + current < top()); - // The object itself should look OK. - object->Verify(); + HeapObject* object = HeapObject::FromAddress(current); - // All the interior pointers should be contained in the heap. - VerifyPointersVisitor visitor; - int size = object->Size(); - object->IterateBody(map->instance_type(), size, &visitor); + // The first word should be a map, and we expect all map pointers to + // be in map space. + Map* map = object->map(); + CHECK(map->IsMap()); + CHECK(heap()->map_space()->Contains(map)); - current += size; - } + // The object should not be code or a map. + CHECK(!object->IsMap()); + CHECK(!object->IsCode()); - // The allocation pointer should not be in the middle of an object. - ASSERT(current == top()); -} -#endif + // The object itself should look OK. + object->Verify(); + // All the interior pointers should be contained in the heap. + VerifyPointersVisitor visitor; + int size = object->Size(); + object->IterateBody(map->instance_type(), size, &visitor); -bool SemiSpace::Commit() { - ASSERT(!is_committed()); - if (!heap()->isolate()->memory_allocator()->CommitBlock( - start_, capacity_, executable())) { - return false; + current += size; + } else { + // At end of page, switch to next page. + NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page(); + // Next page should be valid. + CHECK(!page->is_anchor()); + current = page->body(); + } } - committed_ = true; - return true; -} - -bool SemiSpace::Uncommit() { - ASSERT(is_committed()); - if (!heap()->isolate()->memory_allocator()->UncommitBlock( - start_, capacity_)) { - return false; - } - committed_ = false; - return true; + // Check semi-spaces. + ASSERT_EQ(from_space_.id(), kFromSpace); + ASSERT_EQ(to_space_.id(), kToSpace); + from_space_.Verify(); + to_space_.Verify(); } - +#endif // ----------------------------------------------------------------------------- // SemiSpace implementation @@ -1392,11 +1101,11 @@ bool SemiSpace::Setup(Address start, // otherwise. In the mark-compact collector, the memory region of the from // space is used as the marking stack. It requires contiguous memory // addresses. - initial_capacity_ = initial_capacity; + ASSERT(maximum_capacity >= Page::kPageSize); + initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); capacity_ = initial_capacity; - maximum_capacity_ = maximum_capacity; + maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); committed_ = false; - start_ = start; address_mask_ = ~(maximum_capacity - 1); object_mask_ = address_mask_ | kHeapObjectTagMask; @@ -1413,81 +1122,258 @@ void SemiSpace::TearDown() { } -bool SemiSpace::Grow() { - // Double the semispace size but only up to maximum capacity. - int maximum_extra = maximum_capacity_ - capacity_; - int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())), - maximum_extra); - if (!heap()->isolate()->memory_allocator()->CommitBlock( - high(), extra, executable())) { +bool SemiSpace::Commit() { + ASSERT(!is_committed()); + int pages = capacity_ / Page::kPageSize; + Address end = start_ + maximum_capacity_; + Address start = end - pages * Page::kPageSize; + if (!heap()->isolate()->memory_allocator()->CommitBlock(start, + capacity_, + executable())) { return false; } - capacity_ += extra; + + NewSpacePage* page = anchor(); + for (int i = 1; i <= pages; i++) { + NewSpacePage* new_page = + NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this); + new_page->InsertAfter(page); + page = new_page; + } + + committed_ = true; + Reset(); + return true; +} + + +bool SemiSpace::Uncommit() { + ASSERT(is_committed()); + Address start = start_ + maximum_capacity_ - capacity_; + if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) { + return false; + } + anchor()->set_next_page(anchor()); + anchor()->set_prev_page(anchor()); + + committed_ = false; return true; } bool SemiSpace::GrowTo(int new_capacity) { + ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); ASSERT(new_capacity <= maximum_capacity_); ASSERT(new_capacity > capacity_); + int pages_before = capacity_ / Page::kPageSize; + int pages_after = new_capacity / Page::kPageSize; + + Address end = start_ + maximum_capacity_; + Address start = end - new_capacity; size_t delta = new_capacity - capacity_; + ASSERT(IsAligned(delta, OS::AllocateAlignment())); if (!heap()->isolate()->memory_allocator()->CommitBlock( - high(), delta, executable())) { + start, delta, executable())) { return false; } capacity_ = new_capacity; + NewSpacePage* last_page = anchor()->prev_page(); + ASSERT(last_page != anchor()); + for (int i = pages_before + 1; i <= pages_after; i++) { + Address page_address = end - i * Page::kPageSize; + NewSpacePage* new_page = NewSpacePage::Initialize(heap(), + page_address, + this); + new_page->InsertAfter(last_page); + Bitmap::Clear(new_page); + // Duplicate the flags that was set on the old page. + new_page->SetFlags(last_page->GetFlags(), + NewSpacePage::kCopyOnFlipFlagsMask); + last_page = new_page; + } return true; } bool SemiSpace::ShrinkTo(int new_capacity) { + ASSERT((new_capacity & Page::kPageAlignmentMask) == 0); ASSERT(new_capacity >= initial_capacity_); ASSERT(new_capacity < capacity_); + // Semispaces grow backwards from the end of their allocated capacity, + // so we find the before and after start addresses relative to the + // end of the space. + Address space_end = start_ + maximum_capacity_; + Address old_start = space_end - capacity_; size_t delta = capacity_ - new_capacity; ASSERT(IsAligned(delta, OS::AllocateAlignment())); - if (!heap()->isolate()->memory_allocator()->UncommitBlock( - high() - delta, delta)) { + if (!heap()->isolate()->memory_allocator()->UncommitBlock(old_start, delta)) { return false; } capacity_ = new_capacity; + + int pages_after = capacity_ / Page::kPageSize; + NewSpacePage* new_last_page = + NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize); + new_last_page->set_next_page(anchor()); + anchor()->set_prev_page(new_last_page); + ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page)); + return true; } +void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { + anchor_.set_owner(this); + // Fixup back-pointers to anchor. Address of anchor changes + // when we swap. + anchor_.prev_page()->set_next_page(&anchor_); + anchor_.next_page()->set_prev_page(&anchor_); + + bool becomes_to_space = (id_ == kFromSpace); + id_ = becomes_to_space ? kToSpace : kFromSpace; + NewSpacePage* page = anchor_.next_page(); + while (page != &anchor_) { + page->set_owner(this); + page->SetFlags(flags, mask); + if (becomes_to_space) { + page->ClearFlag(MemoryChunk::IN_FROM_SPACE); + page->SetFlag(MemoryChunk::IN_TO_SPACE); + page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); + page->ResetLiveBytes(); + } else { + page->SetFlag(MemoryChunk::IN_FROM_SPACE); + page->ClearFlag(MemoryChunk::IN_TO_SPACE); + } + ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); + ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) || + page->IsFlagSet(MemoryChunk::IN_FROM_SPACE)); + page = page->next_page(); + } +} + + +void SemiSpace::Reset() { + ASSERT(anchor_.next_page() != &anchor_); + current_page_ = anchor_.next_page(); +} + + +void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { + // We won't be swapping semispaces without data in them. + ASSERT(from->anchor_.next_page() != &from->anchor_); + ASSERT(to->anchor_.next_page() != &to->anchor_); + + // Swap bits. + SemiSpace tmp = *from; + *from = *to; + *to = tmp; + + // Fixup back-pointers to the page list anchor now that its address + // has changed. + // Swap to/from-space bits on pages. + // Copy GC flags from old active space (from-space) to new (to-space). + intptr_t flags = from->current_page()->GetFlags(); + to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask); + + from->FlipPages(0, 0); +} + + +void SemiSpace::set_age_mark(Address mark) { + ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this); + age_mark_ = mark; + // Mark all pages up to the one containing mark. + NewSpacePageIterator it(space_start(), mark); + while (it.has_next()) { + it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); + } +} + + #ifdef DEBUG void SemiSpace::Print() { } -void SemiSpace::Verify() { } +void SemiSpace::Verify() { + bool is_from_space = (id_ == kFromSpace); + NewSpacePage* page = anchor_.next_page(); + CHECK(anchor_.semi_space() == this); + while (page != &anchor_) { + CHECK(page->semi_space() == this); + CHECK(page->InNewSpace()); + CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE + : MemoryChunk::IN_TO_SPACE)); + CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE + : MemoryChunk::IN_FROM_SPACE)); + CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING)); + if (!is_from_space) { + // The pointers-from-here-are-interesting flag isn't updated dynamically + // on from-space pages, so it might be out of sync with the marking state. + if (page->heap()->incremental_marking()->IsMarking()) { + CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); + } else { + CHECK(!page->IsFlagSet( + MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); + } + // TODO(gc): Check that the live_bytes_count_ field matches the + // black marking on the page (if we make it match in new-space). + } + CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); + CHECK(page->prev_page()->next_page() == page); + page = page->next_page(); + } +} + + +void SemiSpace::AssertValidRange(Address start, Address end) { + // Addresses belong to same semi-space + NewSpacePage* page = NewSpacePage::FromLimit(start); + NewSpacePage* end_page = NewSpacePage::FromLimit(end); + SemiSpace* space = page->semi_space(); + CHECK_EQ(space, end_page->semi_space()); + // Start address is before end address, either on same page, + // or end address is on a later page in the linked list of + // semi-space pages. + if (page == end_page) { + CHECK(start <= end); + } else { + while (page != end_page) { + page = page->next_page(); + CHECK_NE(page, space->anchor()); + } + } +} #endif // ----------------------------------------------------------------------------- // SemiSpaceIterator implementation. SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) { - Initialize(space, space->bottom(), space->top(), NULL); + Initialize(space->bottom(), space->top(), NULL); } SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func) { - Initialize(space, space->bottom(), space->top(), size_func); + Initialize(space->bottom(), space->top(), size_func); } SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) { - Initialize(space, start, space->top(), NULL); + Initialize(start, space->top(), NULL); +} + + +SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) { + Initialize(from, to, NULL); } -void SemiSpaceIterator::Initialize(NewSpace* space, Address start, +void SemiSpaceIterator::Initialize(Address start, Address end, HeapObjectCallback size_func) { - ASSERT(space->ToSpaceContains(start)); - ASSERT(space->ToSpaceLow() <= end - && end <= space->ToSpaceHigh()); - space_ = &space->to_space_; + SemiSpace::AssertValidRange(start, end); current_ = start; limit_ = end; size_func_ = size_func; @@ -1623,7 +1509,7 @@ void NewSpace::ClearHistograms() { void NewSpace::CollectStatistics() { ClearHistograms(); SemiSpaceIterator it(this); - for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) RecordAllocation(obj); } @@ -1699,7 +1585,6 @@ void NewSpace::RecordPromotion(HeapObject* obj) { promoted_histogram_[type].increment_bytes(obj->Size()); } - // ----------------------------------------------------------------------------- // Free lists for old object spaces implementation @@ -1708,17 +1593,17 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) { ASSERT(IsAligned(size_in_bytes, kPointerSize)); // We write a map and possibly size information to the block. If the block - // is big enough to be a ByteArray with at least one extra word (the next - // pointer), we set its map to be the byte array map and its size to an + // is big enough to be a FreeSpace with at least one extra word (the next + // pointer), we set its map to be the free space map and its size to an // appropriate array length for the desired size from HeapObject::Size(). // If the block is too small (eg, one or two words), to hold both a size // field and a next pointer, we give it a filler map that gives it the // correct size. - if (size_in_bytes > ByteArray::kHeaderSize) { - set_map(heap->raw_unchecked_byte_array_map()); - // Can't use ByteArray::cast because it fails during deserialization. - ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); - this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes)); + if (size_in_bytes > FreeSpace::kHeaderSize) { + set_map(heap->raw_unchecked_free_space_map()); + // Can't use FreeSpace::cast because it fails during deserialization. + FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); + this_as_free_space->set_size(size_in_bytes); } else if (size_in_bytes == kPointerSize) { set_map(heap->raw_unchecked_one_pointer_filler_map()); } else if (size_in_bytes == 2 * kPointerSize) { @@ -1727,318 +1612,300 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) { UNREACHABLE(); } // We would like to ASSERT(Size() == size_in_bytes) but this would fail during - // deserialization because the byte array map is not done yet. + // deserialization because the free space map is not done yet. } -Address FreeListNode::next(Heap* heap) { +FreeListNode* FreeListNode::next() { ASSERT(IsFreeListNode(this)); - if (map() == heap->raw_unchecked_byte_array_map()) { - ASSERT(Size() >= kNextOffset + kPointerSize); - return Memory::Address_at(address() + kNextOffset); + if (map() == HEAP->raw_unchecked_free_space_map()) { + ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); + return reinterpret_cast<FreeListNode*>( + Memory::Address_at(address() + kNextOffset)); } else { - return Memory::Address_at(address() + kPointerSize); + return reinterpret_cast<FreeListNode*>( + Memory::Address_at(address() + kPointerSize)); } } -void FreeListNode::set_next(Heap* heap, Address next) { +FreeListNode** FreeListNode::next_address() { ASSERT(IsFreeListNode(this)); - if (map() == heap->raw_unchecked_byte_array_map()) { + if (map() == HEAP->raw_unchecked_free_space_map()) { ASSERT(Size() >= kNextOffset + kPointerSize); - Memory::Address_at(address() + kNextOffset) = next; + return reinterpret_cast<FreeListNode**>(address() + kNextOffset); } else { - Memory::Address_at(address() + kPointerSize) = next; + return reinterpret_cast<FreeListNode**>(address() + kPointerSize); } } -OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner) - : heap_(heap), - owner_(owner) { - Reset(); +void FreeListNode::set_next(FreeListNode* next) { + ASSERT(IsFreeListNode(this)); + // While we are booting the VM the free space map will actually be null. So + // we have to make sure that we don't try to use it for anything at that + // stage. + if (map() == HEAP->raw_unchecked_free_space_map()) { + ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); + Memory::Address_at(address() + kNextOffset) = + reinterpret_cast<Address>(next); + } else { + Memory::Address_at(address() + kPointerSize) = + reinterpret_cast<Address>(next); + } } -void OldSpaceFreeList::Reset() { - available_ = 0; - for (int i = 0; i < kFreeListsLength; i++) { - free_[i].head_node_ = NULL; - } - needs_rebuild_ = false; - finger_ = kHead; - free_[kHead].next_size_ = kEnd; +FreeList::FreeList(PagedSpace* owner) + : owner_(owner), heap_(owner->heap()) { + Reset(); } -void OldSpaceFreeList::RebuildSizeList() { - ASSERT(needs_rebuild_); - int cur = kHead; - for (int i = cur + 1; i < kFreeListsLength; i++) { - if (free_[i].head_node_ != NULL) { - free_[cur].next_size_ = i; - cur = i; - } - } - free_[cur].next_size_ = kEnd; - needs_rebuild_ = false; +void FreeList::Reset() { + available_ = 0; + small_list_ = NULL; + medium_list_ = NULL; + large_list_ = NULL; + huge_list_ = NULL; } -int OldSpaceFreeList::Free(Address start, int size_in_bytes) { -#ifdef DEBUG - Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes); -#endif +int FreeList::Free(Address start, int size_in_bytes) { + if (size_in_bytes == 0) return 0; FreeListNode* node = FreeListNode::FromAddress(start); node->set_size(heap_, size_in_bytes); - // We don't use the freelists in compacting mode. This makes it more like a - // GC that only has mark-sweep-compact and doesn't have a mark-sweep - // collector. - if (FLAG_always_compact) { - return size_in_bytes; - } - - // Early return to drop too-small blocks on the floor (one or two word - // blocks cannot hold a map pointer, a size field, and a pointer to the - // next block in the free list). - if (size_in_bytes < kMinBlockSize) { - return size_in_bytes; + // Early return to drop too-small blocks on the floor. + if (size_in_bytes < kSmallListMin) return size_in_bytes; + + // Insert other blocks at the head of a free list of the appropriate + // magnitude. + if (size_in_bytes <= kSmallListMax) { + node->set_next(small_list_); + small_list_ = node; + } else if (size_in_bytes <= kMediumListMax) { + node->set_next(medium_list_); + medium_list_ = node; + } else if (size_in_bytes <= kLargeListMax) { + node->set_next(large_list_); + large_list_ = node; + } else { + node->set_next(huge_list_); + huge_list_ = node; } - - // Insert other blocks at the head of an exact free list. - int index = size_in_bytes >> kPointerSizeLog2; - node->set_next(heap_, free_[index].head_node_); - free_[index].head_node_ = node->address(); available_ += size_in_bytes; - needs_rebuild_ = true; + ASSERT(IsVeryLong() || available_ == SumFreeLists()); return 0; } -MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) { - ASSERT(0 < size_in_bytes); - ASSERT(size_in_bytes <= kMaxBlockSize); - ASSERT(IsAligned(size_in_bytes, kPointerSize)); +FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) { + FreeListNode* node = *list; - if (needs_rebuild_) RebuildSizeList(); - int index = size_in_bytes >> kPointerSizeLog2; - // Check for a perfect fit. - if (free_[index].head_node_ != NULL) { - FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_); - // If this was the last block of its size, remove the size. - if ((free_[index].head_node_ = node->next(heap_)) == NULL) - RemoveSize(index); - available_ -= size_in_bytes; - *wasted_bytes = 0; - ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. - return node; - } - // Search the size list for the best fit. - int prev = finger_ < index ? finger_ : kHead; - int cur = FindSize(index, &prev); - ASSERT(index < cur); - if (cur == kEnd) { - // No large enough size in list. - *wasted_bytes = 0; - return Failure::RetryAfterGC(owner_); - } - ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. - int rem = cur - index; - int rem_bytes = rem << kPointerSizeLog2; - FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_); - ASSERT(cur_node->Size() == (cur << kPointerSizeLog2)); - FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ + - size_in_bytes); - // Distinguish the cases prev < rem < cur and rem <= prev < cur - // to avoid many redundant tests and calls to Insert/RemoveSize. - if (prev < rem) { - // Simple case: insert rem between prev and cur. - finger_ = prev; - free_[prev].next_size_ = rem; - // If this was the last block of size cur, remove the size. - if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) { - free_[rem].next_size_ = free_[cur].next_size_; - } else { - free_[rem].next_size_ = cur; - } - // Add the remainder block. - rem_node->set_size(heap_, rem_bytes); - rem_node->set_next(heap_, free_[rem].head_node_); - free_[rem].head_node_ = rem_node->address(); + if (node == NULL) return NULL; + + while (node != NULL && + Page::FromAddress(node->address())->IsEvacuationCandidate()) { + available_ -= node->Size(); + node = node->next(); + } + + if (node != NULL) { + *node_size = node->Size(); + *list = node->next(); } else { - // If this was the last block of size cur, remove the size. - if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) { - finger_ = prev; - free_[prev].next_size_ = free_[cur].next_size_; - } - if (rem_bytes < kMinBlockSize) { - // Too-small remainder is wasted. - rem_node->set_size(heap_, rem_bytes); - available_ -= size_in_bytes + rem_bytes; - *wasted_bytes = rem_bytes; - return cur_node; - } - // Add the remainder block and, if needed, insert its size. - rem_node->set_size(heap_, rem_bytes); - rem_node->set_next(heap_, free_[rem].head_node_); - free_[rem].head_node_ = rem_node->address(); - if (rem_node->next(heap_) == NULL) InsertSize(rem); + *list = NULL; } - available_ -= size_in_bytes; - *wasted_bytes = 0; - return cur_node; + + return node; } -void OldSpaceFreeList::MarkNodes() { - for (int i = 0; i < kFreeListsLength; i++) { - Address cur_addr = free_[i].head_node_; - while (cur_addr != NULL) { - FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); - cur_addr = cur_node->next(heap_); - cur_node->SetMark(); - } - } -} +FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { + FreeListNode* node = NULL; + if (size_in_bytes <= kSmallAllocationMax) { + node = PickNodeFromList(&small_list_, node_size); + if (node != NULL) return node; + } -#ifdef DEBUG -bool OldSpaceFreeList::Contains(FreeListNode* node) { - for (int i = 0; i < kFreeListsLength; i++) { - Address cur_addr = free_[i].head_node_; - while (cur_addr != NULL) { - FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); - if (cur_node == node) return true; - cur_addr = cur_node->next(heap_); - } + if (size_in_bytes <= kMediumAllocationMax) { + node = PickNodeFromList(&medium_list_, node_size); + if (node != NULL) return node; } - return false; -} -#endif + if (size_in_bytes <= kLargeAllocationMax) { + node = PickNodeFromList(&large_list_, node_size); + if (node != NULL) return node; + } -FixedSizeFreeList::FixedSizeFreeList(Heap* heap, - AllocationSpace owner, - int object_size) - : heap_(heap), owner_(owner), object_size_(object_size) { - Reset(); -} + for (FreeListNode** cur = &huge_list_; + *cur != NULL; + cur = (*cur)->next_address()) { + FreeListNode* cur_node = *cur; + while (cur_node != NULL && + Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { + available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size(); + cur_node = cur_node->next(); + } + *cur = cur_node; + if (cur_node == NULL) break; + + ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map()); + FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); + int size = cur_as_free_space->Size(); + if (size >= size_in_bytes) { + // Large enough node found. Unlink it from the list. + node = *cur; + *node_size = size; + *cur = node->next(); + break; + } + } -void FixedSizeFreeList::Reset() { - available_ = 0; - head_ = tail_ = NULL; + return node; } -void FixedSizeFreeList::Free(Address start) { -#ifdef DEBUG - Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_); -#endif - // We only use the freelists with mark-sweep. - ASSERT(!HEAP->mark_compact_collector()->IsCompacting()); - FreeListNode* node = FreeListNode::FromAddress(start); - node->set_size(heap_, object_size_); - node->set_next(heap_, NULL); - if (head_ == NULL) { - tail_ = head_ = node->address(); +// Allocation on the old space free list. If it succeeds then a new linear +// allocation space has been set up with the top and limit of the space. If +// the allocation fails then NULL is returned, and the caller can perform a GC +// or allocate a new page before retrying. +HeapObject* FreeList::Allocate(int size_in_bytes) { + ASSERT(0 < size_in_bytes); + ASSERT(size_in_bytes <= kMaxBlockSize); + ASSERT(IsAligned(size_in_bytes, kPointerSize)); + // Don't free list allocate if there is linear space available. + ASSERT(owner_->limit() - owner_->top() < size_in_bytes); + + int new_node_size = 0; + FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); + if (new_node == NULL) return NULL; + + available_ -= new_node_size; + ASSERT(IsVeryLong() || available_ == SumFreeLists()); + + int bytes_left = new_node_size - size_in_bytes; + ASSERT(bytes_left >= 0); + + int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); + // Mark the old linear allocation area with a free space map so it can be + // skipped when scanning the heap. This also puts it back in the free list + // if it is big enough. + owner_->Free(owner_->top(), old_linear_size); + owner_->heap()->incremental_marking()->OldSpaceStep( + size_in_bytes - old_linear_size); + + // The old-space-step might have finished sweeping and restarted marking. + // Verify that it did not turn the page of the new node into an evacuation + // candidate. + ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); + + const int kThreshold = IncrementalMarking::kAllocatedThreshold; + + // Memory in the linear allocation area is counted as allocated. We may free + // a little of this again immediately - see below. + owner_->Allocate(new_node_size); + + if (bytes_left > kThreshold && + owner_->heap()->incremental_marking()->IsMarkingIncomplete() && + FLAG_incremental_marking_steps) { + int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); + // We don't want to give too large linear areas to the allocator while + // incremental marking is going on, because we won't check again whether + // we want to do another increment until the linear area is used up. + owner_->Free(new_node->address() + size_in_bytes + linear_size, + new_node_size - size_in_bytes - linear_size); + owner_->SetTop(new_node->address() + size_in_bytes, + new_node->address() + size_in_bytes + linear_size); + } else if (bytes_left > 0) { + // Normally we give the rest of the node to the allocator as its new + // linear allocation area. + owner_->SetTop(new_node->address() + size_in_bytes, + new_node->address() + new_node_size); } else { - FreeListNode::FromAddress(tail_)->set_next(heap_, node->address()); - tail_ = node->address(); + // TODO(gc) Try not freeing linear allocation region when bytes_left + // are zero. + owner_->SetTop(NULL, NULL); } - available_ += object_size_; + + return new_node; } -MaybeObject* FixedSizeFreeList::Allocate() { - if (head_ == NULL) { - return Failure::RetryAfterGC(owner_); +static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { + intptr_t sum = 0; + while (n != NULL) { + if (Page::FromAddress(n->address()) == p) { + FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n); + sum += free_space->Size(); + } + n = n->next(); } - - ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. - FreeListNode* node = FreeListNode::FromAddress(head_); - head_ = node->next(heap_); - available_ -= object_size_; - return node; + return sum; } -void FixedSizeFreeList::MarkNodes() { - Address cur_addr = head_; - while (cur_addr != NULL && cur_addr != tail_) { - FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); - cur_addr = cur_node->next(heap_); - cur_node->SetMark(); +void FreeList::CountFreeListItems(Page* p, intptr_t* sizes) { + sizes[0] = CountFreeListItemsInList(small_list_, p); + sizes[1] = CountFreeListItemsInList(medium_list_, p); + sizes[2] = CountFreeListItemsInList(large_list_, p); + sizes[3] = CountFreeListItemsInList(huge_list_, p); +} + +#ifdef DEBUG +intptr_t FreeList::SumFreeList(FreeListNode* cur) { + intptr_t sum = 0; + while (cur != NULL) { + ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map()); + FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); + sum += cur_as_free_space->Size(); + cur = cur->next(); } + return sum; } -// ----------------------------------------------------------------------------- -// OldSpace implementation +static const int kVeryLongFreeList = 500; -void OldSpace::PrepareForMarkCompact(bool will_compact) { - // Call prepare of the super class. - PagedSpace::PrepareForMarkCompact(will_compact); - - if (will_compact) { - // Reset relocation info. During a compacting collection, everything in - // the space is considered 'available' and we will rediscover live data - // and waste during the collection. - MCResetRelocationInfo(); - ASSERT(Available() == Capacity()); - } else { - // During a non-compacting collection, everything below the linear - // allocation pointer is considered allocated (everything above is - // available) and we will rediscover available and wasted bytes during - // the collection. - accounting_stats_.AllocateBytes(free_list_.available()); - accounting_stats_.FillWastedBytes(Waste()); - } - // Clear the free list before a full GC---it will be rebuilt afterward. - free_list_.Reset(); +int FreeList::FreeListLength(FreeListNode* cur) { + int length = 0; + while (cur != NULL) { + length++; + cur = cur->next(); + if (length == kVeryLongFreeList) return length; + } + return length; } -void OldSpace::MCCommitRelocationInfo() { - // Update fast allocation info. - allocation_info_.top = mc_forwarding_info_.top; - allocation_info_.limit = mc_forwarding_info_.limit; - ASSERT(allocation_info_.VerifyPagedAllocation()); +bool FreeList::IsVeryLong() { + if (FreeListLength(small_list_) == kVeryLongFreeList) return true; + if (FreeListLength(medium_list_) == kVeryLongFreeList) return true; + if (FreeListLength(large_list_) == kVeryLongFreeList) return true; + if (FreeListLength(huge_list_) == kVeryLongFreeList) return true; + return false; +} - // The space is compacted and we haven't yet built free lists or - // wasted any space. - ASSERT(Waste() == 0); - ASSERT(AvailableFree() == 0); - // Build the free list for the space. - int computed_size = 0; - PageIterator it(this, PageIterator::PAGES_USED_BY_MC); - while (it.has_next()) { - Page* p = it.next(); - // Space below the relocation pointer is allocated. - computed_size += - static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart()); - if (it.has_next()) { - // Free the space at the top of the page. - int extra_size = - static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark()); - if (extra_size > 0) { - int wasted_bytes = free_list_.Free(p->AllocationWatermark(), - extra_size); - // The bytes we have just "freed" to add to the free list were - // already accounted as available. - accounting_stats_.WasteBytes(wasted_bytes); - } - } - } - - // Make sure the computed size - based on the used portion of the pages in - // use - matches the size obtained while computing forwarding addresses. - ASSERT(computed_size == Size()); +// This can take a very long time because it is linear in the number of entries +// on the free list, so it should not be called if FreeListLength returns +// kVeryLongFreeList. +intptr_t FreeList::SumFreeLists() { + intptr_t sum = SumFreeList(small_list_); + sum += SumFreeList(medium_list_); + sum += SumFreeList(large_list_); + sum += SumFreeList(huge_list_); + return sum; } +#endif + +// ----------------------------------------------------------------------------- +// OldSpace implementation bool NewSpace::ReserveSpace(int bytes) { // We can't reliably unpack a partial snapshot that needs more new space @@ -2050,200 +1917,119 @@ bool NewSpace::ReserveSpace(int bytes) { } -void PagedSpace::FreePages(Page* prev, Page* last) { - if (last == AllocationTopPage()) { - // Pages are already at the end of used pages. - return; +void PagedSpace::PrepareForMarkCompact() { + // We don't have a linear allocation area while sweeping. It will be restored + // on the first allocation after the sweep. + // Mark the old linear allocation area with a free space map so it can be + // skipped when scanning the heap. + int old_linear_size = static_cast<int>(limit() - top()); + Free(top(), old_linear_size); + SetTop(NULL, NULL); + + // Stop lazy sweeping and clear marking bits for unswept pages. + if (first_unswept_page_ != NULL) { + Page* last = last_unswept_page_; + Page* p = first_unswept_page_; + do { + // Do not use ShouldBeSweptLazily predicate here. + // New evacuation candidates were selected but they still have + // to be swept before collection starts. + if (!p->WasSwept()) { + Bitmap::Clear(p); + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n", + reinterpret_cast<intptr_t>(p)); + } + } + p = p->next_page(); + } while (p != last); } + first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL); - Page* first = NULL; - - // Remove pages from the list. - if (prev == NULL) { - first = first_page_; - first_page_ = last->next_page(); - } else { - first = prev->next_page(); - heap()->isolate()->memory_allocator()->SetNextPage( - prev, last->next_page()); - } + // Clear the free list before a full GC---it will be rebuilt afterward. + free_list_.Reset(); +} - // Attach it after the last page. - heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first); - last_page_ = last; - heap()->isolate()->memory_allocator()->SetNextPage(last, NULL); - // Clean them up. - do { - first->InvalidateWatermark(true); - first->SetAllocationWatermark(first->ObjectAreaStart()); - first->SetCachedAllocationWatermark(first->ObjectAreaStart()); - first->SetRegionMarks(Page::kAllRegionsCleanMarks); - first = first->next_page(); - } while (first != NULL); - - // Order of pages in this space might no longer be consistent with - // order of pages in chunks. - page_list_is_chunk_ordered_ = false; -} - - -void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) { - const bool add_to_freelist = true; - - // Mark used and unused pages to properly fill unused pages - // after reordering. - PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES); - Page* last_in_use = AllocationTopPage(); - bool in_use = true; - - while (all_pages_iterator.has_next()) { - Page* p = all_pages_iterator.next(); - p->SetWasInUseBeforeMC(in_use); - if (p == last_in_use) { - // We passed a page containing allocation top. All consequent - // pages are not used. - in_use = false; - } - } +bool PagedSpace::ReserveSpace(int size_in_bytes) { + ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize); + ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); + Address current_top = allocation_info_.top; + Address new_top = current_top + size_in_bytes; + if (new_top <= allocation_info_.limit) return true; - if (page_list_is_chunk_ordered_) return; + HeapObject* new_area = free_list_.Allocate(size_in_bytes); + if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); + if (new_area == NULL) return false; - Page* new_last_in_use = Page::FromAddress(NULL); - heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder( - this, &first_page_, &last_page_, &new_last_in_use); - ASSERT(new_last_in_use->is_valid()); + int old_linear_size = static_cast<int>(limit() - top()); + // Mark the old linear allocation area with a free space so it can be + // skipped when scanning the heap. This also puts it back in the free list + // if it is big enough. + Free(top(), old_linear_size); - if (new_last_in_use != last_in_use) { - // Current allocation top points to a page which is now in the middle - // of page list. We should move allocation top forward to the new last - // used page so various object iterators will continue to work properly. - int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - - last_in_use->AllocationTop()); + SetTop(new_area->address(), new_area->address() + size_in_bytes); + Allocate(size_in_bytes); + return true; +} - last_in_use->SetAllocationWatermark(last_in_use->AllocationTop()); - if (size_in_bytes > 0) { - Address start = last_in_use->AllocationTop(); - if (deallocate_blocks) { - accounting_stats_.AllocateBytes(size_in_bytes); - DeallocateBlock(start, size_in_bytes, add_to_freelist); - } else { - heap()->CreateFillerObjectAt(start, size_in_bytes); - } - } - // New last in use page was in the middle of the list before - // sorting so it full. - SetTop(new_last_in_use->AllocationTop()); +// You have to call this last, since the implementation from PagedSpace +// doesn't know that memory was 'promised' to large object space. +bool LargeObjectSpace::ReserveSpace(int bytes) { + return heap()->OldGenerationSpaceAvailable() >= bytes; +} - ASSERT(AllocationTopPage() == new_last_in_use); - ASSERT(AllocationTopPage()->WasInUseBeforeMC()); - } - PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); - while (pages_in_use_iterator.has_next()) { - Page* p = pages_in_use_iterator.next(); - if (!p->WasInUseBeforeMC()) { - // Empty page is in the middle of a sequence of used pages. - // Allocate it as a whole and deallocate immediately. - int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - - p->ObjectAreaStart()); +bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { + if (IsSweepingComplete()) return true; - p->SetAllocationWatermark(p->ObjectAreaStart()); - Address start = p->ObjectAreaStart(); - if (deallocate_blocks) { - accounting_stats_.AllocateBytes(size_in_bytes); - DeallocateBlock(start, size_in_bytes, add_to_freelist); - } else { - heap()->CreateFillerObjectAt(start, size_in_bytes); + intptr_t freed_bytes = 0; + Page* last = last_unswept_page_; + Page* p = first_unswept_page_; + do { + Page* next_page = p->next_page(); + if (ShouldBeSweptLazily(p)) { + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", + reinterpret_cast<intptr_t>(p)); } + freed_bytes += MarkCompactCollector::SweepConservatively(this, p); } + p = next_page; + } while (p != last && freed_bytes < bytes_to_sweep); + + if (p == last) { + last_unswept_page_ = first_unswept_page_ = Page::FromAddress(NULL); + } else { + first_unswept_page_ = p; } - page_list_is_chunk_ordered_ = true; -} + heap()->LowerOldGenLimits(freed_bytes); + heap()->FreeQueuedChunks(); -void PagedSpace::PrepareForMarkCompact(bool will_compact) { - if (will_compact) { - RelinkPageListInChunkOrder(false); - } + return IsSweepingComplete(); } -bool PagedSpace::ReserveSpace(int bytes) { - Address limit = allocation_info_.limit; - Address top = allocation_info_.top; - if (limit - top >= bytes) return true; - - // There wasn't enough space in the current page. Lets put the rest - // of the page on the free list and start a fresh page. - PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_)); - - Page* reserved_page = TopPageOf(allocation_info_); - int bytes_left_to_reserve = bytes; - while (bytes_left_to_reserve > 0) { - if (!reserved_page->next_page()->is_valid()) { - if (heap()->OldGenerationAllocationLimitReached()) return false; - Expand(reserved_page); - } - bytes_left_to_reserve -= Page::kPageSize; - reserved_page = reserved_page->next_page(); - if (!reserved_page->is_valid()) return false; - } - ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid()); - TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true); - SetAllocationInfo(&allocation_info_, - TopPageOf(allocation_info_)->next_page()); - return true; -} +void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { + if (allocation_info_.top >= allocation_info_.limit) return; + if (Page::FromAddress(allocation_info_.top)->IsEvacuationCandidate()) { + // Create filler object to keep page iterable if it was iterable. + int remaining = + static_cast<int>(allocation_info_.limit - allocation_info_.top); + heap()->CreateFillerObjectAt(allocation_info_.top, remaining); -// You have to call this last, since the implementation from PagedSpace -// doesn't know that memory was 'promised' to large object space. -bool LargeObjectSpace::ReserveSpace(int bytes) { - return heap()->OldGenerationSpaceAvailable() >= bytes; + allocation_info_.top = NULL; + allocation_info_.limit = NULL; + } } -// Slow case for normal allocation. Try in order: (1) allocate in the next -// page in the space, (2) allocate off the space's free list, (3) expand the -// space, (4) fail. -HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { - // Linear allocation in this space has failed. If there is another page - // in the space, move to that page and allocate there. This allocation - // should succeed (size_in_bytes should not be greater than a page's - // object area size). - Page* current_page = TopPageOf(allocation_info_); - if (current_page->next_page()->is_valid()) { - return AllocateInNextPage(current_page, size_in_bytes); - } - - // There is no next page in this space. Try free list allocation unless that - // is currently forbidden. - if (!heap()->linear_allocation()) { - int wasted_bytes; - Object* result; - MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes); - accounting_stats_.WasteBytes(wasted_bytes); - if (maybe->ToObject(&result)) { - accounting_stats_.AllocateBytes(size_in_bytes); - - HeapObject* obj = HeapObject::cast(result); - Page* p = Page::FromAddress(obj->address()); - - if (obj->address() >= p->AllocationWatermark()) { - // There should be no hole between the allocation watermark - // and allocated object address. - // Memory above the allocation watermark was not swept and - // might contain garbage pointers to new space. - ASSERT(obj->address() == p->AllocationWatermark()); - p->SetAllocationWatermark(obj->address() + size_in_bytes); - } - - return obj; - } - } +HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { + // Allocation in this space has failed. // Free list allocation failed and there is no next page. Fail if we have // hit the old generation size limit that should cause a garbage @@ -2253,61 +2039,30 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { return NULL; } - // Try to expand the space and allocate in the new next page. - ASSERT(!current_page->next_page()->is_valid()); - if (Expand(current_page)) { - return AllocateInNextPage(current_page, size_in_bytes); - } + // If there are unswept pages advance lazy sweeper. + if (first_unswept_page_->is_valid()) { + AdvanceSweeper(size_in_bytes); - // Finally, fail. - return NULL; -} + // Retry the free list allocation. + HeapObject* object = free_list_.Allocate(size_in_bytes); + if (object != NULL) return object; + if (!IsSweepingComplete()) { + AdvanceSweeper(kMaxInt); -void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { - current_page->SetAllocationWatermark(allocation_info_.top); - int free_size = - static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); - if (free_size > 0) { - int wasted_bytes = free_list_.Free(allocation_info_.top, free_size); - accounting_stats_.WasteBytes(wasted_bytes); + // Retry the free list allocation. + object = free_list_.Allocate(size_in_bytes); + if (object != NULL) return object; + } } -} - -void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { - current_page->SetAllocationWatermark(allocation_info_.top); - int free_size = - static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); - // In the fixed space free list all the free list items have the right size. - // We use up the rest of the page while preserving this invariant. - while (free_size >= object_size_in_bytes_) { - free_list_.Free(allocation_info_.top); - allocation_info_.top += object_size_in_bytes_; - free_size -= object_size_in_bytes_; - accounting_stats_.WasteBytes(object_size_in_bytes_); + // Try to expand the space and allocate in the new next page. + if (Expand()) { + return free_list_.Allocate(size_in_bytes); } -} - -// Add the block at the top of the page to the space's free list, set the -// allocation info to the next page (assumed to be one), and allocate -// linearly there. -HeapObject* OldSpace::AllocateInNextPage(Page* current_page, - int size_in_bytes) { - ASSERT(current_page->next_page()->is_valid()); - Page* next_page = current_page->next_page(); - next_page->ClearGCFields(); - PutRestOfCurrentPageOnFreeList(current_page); - SetAllocationInfo(&allocation_info_, next_page); - return AllocateLinearly(&allocation_info_, size_in_bytes); -} - - -void OldSpace::DeallocateBlock(Address start, - int size_in_bytes, - bool add_to_freelist) { - Free(start, size_in_bytes, add_to_freelist); + // Finally, fail. + return NULL; } @@ -2413,7 +2168,7 @@ static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) { void PagedSpace::CollectCodeStatistics() { Isolate* isolate = heap()->isolate(); HeapObjectIterator obj_it(this); - for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { + for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { if (obj->IsCode()) { Code* code = Code::cast(obj); isolate->code_kind_statistics()[code->kind()] += code->Size(); @@ -2438,16 +2193,17 @@ void PagedSpace::CollectCodeStatistics() { } -void OldSpace::ReportStatistics() { +void PagedSpace::ReportStatistics() { int pct = static_cast<int>(Available() * 100 / Capacity()); PrintF(" capacity: %" V8_PTR_PREFIX "d" ", waste: %" V8_PTR_PREFIX "d" ", available: %" V8_PTR_PREFIX "d, %%%d\n", Capacity(), Waste(), Available(), pct); + if (was_swept_conservatively_) return; ClearHistograms(); HeapObjectIterator obj_it(this); - for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) + for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) CollectHistogramInfo(obj); ReportHistogram(true); } @@ -2456,192 +2212,28 @@ void OldSpace::ReportStatistics() { // ----------------------------------------------------------------------------- // FixedSpace implementation -void FixedSpace::PrepareForMarkCompact(bool will_compact) { +void FixedSpace::PrepareForMarkCompact() { // Call prepare of the super class. - PagedSpace::PrepareForMarkCompact(will_compact); - - if (will_compact) { - // Reset relocation info. - MCResetRelocationInfo(); + PagedSpace::PrepareForMarkCompact(); - // During a compacting collection, everything in the space is considered - // 'available' (set by the call to MCResetRelocationInfo) and we will - // rediscover live and wasted bytes during the collection. - ASSERT(Available() == Capacity()); - } else { - // During a non-compacting collection, everything below the linear - // allocation pointer except wasted top-of-page blocks is considered - // allocated and we will rediscover available bytes during the - // collection. - accounting_stats_.AllocateBytes(free_list_.available()); - } + // During a non-compacting collection, everything below the linear + // allocation pointer except wasted top-of-page blocks is considered + // allocated and we will rediscover available bytes during the + // collection. + accounting_stats_.AllocateBytes(free_list_.available()); // Clear the free list before a full GC---it will be rebuilt afterward. free_list_.Reset(); } -void FixedSpace::MCCommitRelocationInfo() { - // Update fast allocation info. - allocation_info_.top = mc_forwarding_info_.top; - allocation_info_.limit = mc_forwarding_info_.limit; - ASSERT(allocation_info_.VerifyPagedAllocation()); - - // The space is compacted and we haven't yet wasted any space. - ASSERT(Waste() == 0); - - // Update allocation_top of each page in use and compute waste. - int computed_size = 0; - PageIterator it(this, PageIterator::PAGES_USED_BY_MC); - while (it.has_next()) { - Page* page = it.next(); - Address page_top = page->AllocationTop(); - computed_size += static_cast<int>(page_top - page->ObjectAreaStart()); - if (it.has_next()) { - accounting_stats_.WasteBytes( - static_cast<int>(page->ObjectAreaEnd() - page_top)); - page->SetAllocationWatermark(page_top); - } - } - - // Make sure the computed size - based on the used portion of the - // pages in use - matches the size we adjust during allocation. - ASSERT(computed_size == Size()); -} - - -// Slow case for normal allocation. Try in order: (1) allocate in the next -// page in the space, (2) allocate off the space's free list, (3) expand the -// space, (4) fail. -HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) { - ASSERT_EQ(object_size_in_bytes_, size_in_bytes); - // Linear allocation in this space has failed. If there is another page - // in the space, move to that page and allocate there. This allocation - // should succeed. - Page* current_page = TopPageOf(allocation_info_); - if (current_page->next_page()->is_valid()) { - return AllocateInNextPage(current_page, size_in_bytes); - } - - // There is no next page in this space. Try free list allocation unless - // that is currently forbidden. The fixed space free list implicitly assumes - // that all free blocks are of the fixed size. - if (!heap()->linear_allocation()) { - Object* result; - MaybeObject* maybe = free_list_.Allocate(); - if (maybe->ToObject(&result)) { - accounting_stats_.AllocateBytes(size_in_bytes); - HeapObject* obj = HeapObject::cast(result); - Page* p = Page::FromAddress(obj->address()); - - if (obj->address() >= p->AllocationWatermark()) { - // There should be no hole between the allocation watermark - // and allocated object address. - // Memory above the allocation watermark was not swept and - // might contain garbage pointers to new space. - ASSERT(obj->address() == p->AllocationWatermark()); - p->SetAllocationWatermark(obj->address() + size_in_bytes); - } - - return obj; - } - } - - // Free list allocation failed and there is no next page. Fail if we have - // hit the old generation size limit that should cause a garbage - // collection. - if (!heap()->always_allocate() && - heap()->OldGenerationAllocationLimitReached()) { - return NULL; - } - - // Try to expand the space and allocate in the new next page. - ASSERT(!current_page->next_page()->is_valid()); - if (Expand(current_page)) { - return AllocateInNextPage(current_page, size_in_bytes); - } - - // Finally, fail. - return NULL; -} - - -// Move to the next page (there is assumed to be one) and allocate there. -// The top of page block is always wasted, because it is too small to hold a -// map. -HeapObject* FixedSpace::AllocateInNextPage(Page* current_page, - int size_in_bytes) { - ASSERT(current_page->next_page()->is_valid()); - ASSERT(allocation_info_.top == PageAllocationLimit(current_page)); - ASSERT_EQ(object_size_in_bytes_, size_in_bytes); - Page* next_page = current_page->next_page(); - next_page->ClearGCFields(); - current_page->SetAllocationWatermark(allocation_info_.top); - accounting_stats_.WasteBytes(page_extra_); - SetAllocationInfo(&allocation_info_, next_page); - return AllocateLinearly(&allocation_info_, size_in_bytes); -} - - -void FixedSpace::DeallocateBlock(Address start, - int size_in_bytes, - bool add_to_freelist) { - // Free-list elements in fixed space are assumed to have a fixed size. - // We break the free block into chunks and add them to the free list - // individually. - int size = object_size_in_bytes(); - ASSERT(size_in_bytes % size == 0); - Address end = start + size_in_bytes; - for (Address a = start; a < end; a += size) { - Free(a, add_to_freelist); - } -} - - -#ifdef DEBUG -void FixedSpace::ReportStatistics() { - int pct = static_cast<int>(Available() * 100 / Capacity()); - PrintF(" capacity: %" V8_PTR_PREFIX "d" - ", waste: %" V8_PTR_PREFIX "d" - ", available: %" V8_PTR_PREFIX "d, %%%d\n", - Capacity(), Waste(), Available(), pct); - - ClearHistograms(); - HeapObjectIterator obj_it(this); - for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) - CollectHistogramInfo(obj); - ReportHistogram(false); -} -#endif - - // ----------------------------------------------------------------------------- // MapSpace implementation -void MapSpace::PrepareForMarkCompact(bool will_compact) { - // Call prepare of the super class. - FixedSpace::PrepareForMarkCompact(will_compact); - - if (will_compact) { - // Initialize map index entry. - int page_count = 0; - PageIterator it(this, PageIterator::ALL_PAGES); - while (it.has_next()) { - ASSERT_MAP_PAGE_INDEX(page_count); - - Page* p = it.next(); - ASSERT(p->mc_page_index == page_count); - - page_addresses_[page_count++] = p->address(); - } - } -} - - #ifdef DEBUG void MapSpace::VerifyObject(HeapObject* object) { // The object should be a map or a free-list node. - ASSERT(object->IsMap() || object->IsByteArray()); + ASSERT(object->IsMap() || object->IsFreeSpace()); } #endif @@ -2662,107 +2254,40 @@ void CellSpace::VerifyObject(HeapObject* object) { // LargeObjectIterator LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { - current_ = space->first_chunk_; + current_ = space->first_page_; size_func_ = NULL; } LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func) { - current_ = space->first_chunk_; + current_ = space->first_page_; size_func_ = size_func; } -HeapObject* LargeObjectIterator::next() { +HeapObject* LargeObjectIterator::Next() { if (current_ == NULL) return NULL; HeapObject* object = current_->GetObject(); - current_ = current_->next(); + current_ = current_->next_page(); return object; } // ----------------------------------------------------------------------------- -// LargeObjectChunk - -LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, - Executability executable) { - size_t requested = ChunkSizeFor(size_in_bytes); - size_t size; - size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0; - Isolate* isolate = Isolate::Current(); - void* mem = isolate->memory_allocator()->AllocateRawMemory( - requested + guard_size, &size, executable); - if (mem == NULL) return NULL; - - // The start of the chunk may be overlayed with a page so we have to - // make sure that the page flags fit in the size field. - ASSERT((size & Page::kPageFlagMask) == 0); - - LOG(isolate, NewEvent("LargeObjectChunk", mem, size)); - if (size < requested + guard_size) { - isolate->memory_allocator()->FreeRawMemory( - mem, size, executable); - LOG(isolate, DeleteEvent("LargeObjectChunk", mem)); - return NULL; - } - - if (guard_size != 0) { - OS::Guard(mem, guard_size); - size -= guard_size; - mem = static_cast<Address>(mem) + guard_size; - } - - ObjectSpace space = (executable == EXECUTABLE) - ? kObjectSpaceCodeSpace - : kObjectSpaceLoSpace; - isolate->memory_allocator()->PerformAllocationCallback( - space, kAllocationActionAllocate, size); - - LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem); - chunk->size_ = size; - chunk->GetPage()->heap_ = isolate->heap(); - return chunk; -} - - -void LargeObjectChunk::Free(Executability executable) { - size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0; - ObjectSpace space = - (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace; - // Do not access instance fields after FreeRawMemory! - Address my_address = address(); - size_t my_size = size(); - Isolate* isolate = GetPage()->heap_->isolate(); - MemoryAllocator* a = isolate->memory_allocator(); - a->FreeRawMemory(my_address - guard_size, my_size + guard_size, executable); - a->PerformAllocationCallback(space, kAllocationActionFree, my_size); - LOG(isolate, DeleteEvent("LargeObjectChunk", my_address)); -} - - -int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { - int os_alignment = static_cast<int>(OS::AllocateAlignment()); - if (os_alignment < Page::kPageSize) { - size_in_bytes += (Page::kPageSize - os_alignment); - } - return size_in_bytes + Page::kObjectStartOffset; -} - -// ----------------------------------------------------------------------------- // LargeObjectSpace LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis - first_chunk_(NULL), + first_page_(NULL), size_(0), page_count_(0), objects_size_(0) {} bool LargeObjectSpace::Setup() { - first_chunk_ = NULL; + first_page_ = NULL; size_ = 0; page_count_ = 0; objects_size_ = 0; @@ -2771,20 +2296,22 @@ bool LargeObjectSpace::Setup() { void LargeObjectSpace::TearDown() { - while (first_chunk_ != NULL) { - LargeObjectChunk* chunk = first_chunk_; - first_chunk_ = first_chunk_->next(); - chunk->Free(chunk->GetPage()->PageExecutability()); + while (first_page_ != NULL) { + LargePage* page = first_page_; + first_page_ = first_page_->next_page(); + LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); + + ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); + heap()->isolate()->memory_allocator()->PerformAllocationCallback( + space, kAllocationActionFree, page->size()); + heap()->isolate()->memory_allocator()->Free(page); } Setup(); } -MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size, - int object_size, - Executability executable) { - ASSERT(0 < object_size && object_size <= requested_size); - +MaybeObject* LargeObjectSpace::AllocateRaw(int object_size, + Executability executable) { // Check if we want to force a GC before growing the old space further. // If so, fail the allocation. if (!heap()->always_allocate() && @@ -2792,75 +2319,42 @@ MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size, return Failure::RetryAfterGC(identity()); } - LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable); - if (chunk == NULL) { - return Failure::RetryAfterGC(identity()); - } + LargePage* page = heap()->isolate()->memory_allocator()-> + AllocateLargePage(object_size, executable, this); + if (page == NULL) return Failure::RetryAfterGC(identity()); + ASSERT(page->body_size() >= object_size); - size_ += static_cast<int>(chunk->size()); - objects_size_ += requested_size; + size_ += static_cast<int>(page->size()); + objects_size_ += object_size; page_count_++; - chunk->set_next(first_chunk_); - first_chunk_ = chunk; - - // Initialize page header. - Page* page = chunk->GetPage(); - Address object_address = page->ObjectAreaStart(); - - // Clear the low order bit of the second word in the page to flag it as a - // large object page. If the chunk_size happened to be written there, its - // low order bit should already be clear. - page->SetIsLargeObjectPage(true); - page->SetPageExecutability(executable); - page->SetRegionMarks(Page::kAllRegionsCleanMarks); - return HeapObject::FromAddress(object_address); -} - - -MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) { - ASSERT(0 < size_in_bytes); - return AllocateRawInternal(size_in_bytes, - size_in_bytes, - EXECUTABLE); -} - + page->set_next_page(first_page_); + first_page_ = page; -MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) { - ASSERT(0 < size_in_bytes); - return AllocateRawInternal(size_in_bytes, - size_in_bytes, - NOT_EXECUTABLE); -} - - -MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) { - ASSERT(0 < size_in_bytes); - return AllocateRawInternal(size_in_bytes, - size_in_bytes, - NOT_EXECUTABLE); + heap()->incremental_marking()->OldSpaceStep(object_size); + return page->GetObject(); } // GC support MaybeObject* LargeObjectSpace::FindObject(Address a) { - for (LargeObjectChunk* chunk = first_chunk_; - chunk != NULL; - chunk = chunk->next()) { - Address chunk_address = chunk->address(); - if (chunk_address <= a && a < chunk_address + chunk->size()) { - return chunk->GetObject(); + for (LargePage* page = first_page_; + page != NULL; + page = page->next_page()) { + Address page_address = page->address(); + if (page_address <= a && a < page_address + page->size()) { + return page->GetObject(); } } return Failure::Exception(); } -LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) { +LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) { // TODO(853): Change this implementation to only find executable // chunks and use some kind of hash-based approach to speed it up. - for (LargeObjectChunk* chunk = first_chunk_; + for (LargePage* chunk = first_page_; chunk != NULL; - chunk = chunk->next()) { + chunk = chunk->next_page()) { Address chunk_address = chunk->address(); if (chunk_address <= pc && pc < chunk_address + chunk->size()) { return chunk; @@ -2870,112 +2364,57 @@ LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) { } -void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) { - LargeObjectIterator it(this); - for (HeapObject* object = it.next(); object != NULL; object = it.next()) { - // We only have code, sequential strings, or fixed arrays in large - // object space, and only fixed arrays can possibly contain pointers to - // the young generation. - if (object->IsFixedArray()) { - Page* page = Page::FromAddress(object->address()); - uint32_t marks = page->GetRegionMarks(); - uint32_t newmarks = Page::kAllRegionsCleanMarks; - - if (marks != Page::kAllRegionsCleanMarks) { - // For a large page a single dirty mark corresponds to several - // regions (modulo 32). So we treat a large page as a sequence of - // normal pages of size Page::kPageSize having same dirty marks - // and subsequently iterate dirty regions on each of these pages. - Address start = object->address(); - Address end = page->ObjectAreaEnd(); - Address object_end = start + object->Size(); - - // Iterate regions of the first normal page covering object. - uint32_t first_region_number = page->GetRegionNumberForAddress(start); - newmarks |= - heap()->IterateDirtyRegions(marks >> first_region_number, - start, - end, - &Heap::IteratePointersInDirtyRegion, - copy_object) << first_region_number; - - start = end; - end = start + Page::kPageSize; - while (end <= object_end) { - // Iterate next 32 regions. - newmarks |= - heap()->IterateDirtyRegions(marks, - start, - end, - &Heap::IteratePointersInDirtyRegion, - copy_object); - start = end; - end = start + Page::kPageSize; - } - - if (start != object_end) { - // Iterate the last piece of an object which is less than - // Page::kPageSize. - newmarks |= - heap()->IterateDirtyRegions(marks, - start, - object_end, - &Heap::IteratePointersInDirtyRegion, - copy_object); - } - - page->SetRegionMarks(newmarks); - } - } - } -} - - void LargeObjectSpace::FreeUnmarkedObjects() { - LargeObjectChunk* previous = NULL; - LargeObjectChunk* current = first_chunk_; + LargePage* previous = NULL; + LargePage* current = first_page_; while (current != NULL) { HeapObject* object = current->GetObject(); - if (object->IsMarked()) { - object->ClearMark(); - heap()->mark_compact_collector()->tracer()->decrement_marked_count(); + // Can this large page contain pointers to non-trivial objects. No other + // pointer object is this big. + bool is_pointer_object = object->IsFixedArray(); + MarkBit mark_bit = Marking::MarkBitFrom(object); + if (mark_bit.Get()) { + mark_bit.Clear(); + MemoryChunk::IncrementLiveBytes(object->address(), -object->Size()); previous = current; - current = current->next(); + current = current->next_page(); } else { + LargePage* page = current; // Cut the chunk out from the chunk list. - LargeObjectChunk* current_chunk = current; - current = current->next(); + current = current->next_page(); if (previous == NULL) { - first_chunk_ = current; + first_page_ = current; } else { - previous->set_next(current); + previous->set_next_page(current); } // Free the chunk. heap()->mark_compact_collector()->ReportDeleteIfNeeded( object, heap()->isolate()); - LiveObjectList::ProcessNonLive(object); - - size_ -= static_cast<int>(current_chunk->size()); + size_ -= static_cast<int>(page->size()); objects_size_ -= object->Size(); page_count_--; - current_chunk->Free(current_chunk->GetPage()->PageExecutability()); + + if (is_pointer_object) { + heap()->QueueMemoryChunkForFree(page); + } else { + heap()->isolate()->memory_allocator()->Free(page); + } } } + heap()->FreeQueuedChunks(); } bool LargeObjectSpace::Contains(HeapObject* object) { Address address = object->address(); - if (heap()->new_space()->Contains(address)) { - return false; - } - Page* page = Page::FromAddress(address); + MemoryChunk* chunk = MemoryChunk::FromAddress(address); + + bool owned = (chunk->owner() == this); - SLOW_ASSERT(!page->IsLargeObjectPage() - || !FindObject(address)->IsFailure()); + SLOW_ASSERT(!owned || !FindObject(address)->IsFailure()); - return page->IsLargeObjectPage(); + return owned; } @@ -2983,9 +2422,9 @@ bool LargeObjectSpace::Contains(HeapObject* object) { // We do not assume that the large object iterator works, because it depends // on the invariants we are checking during verification. void LargeObjectSpace::Verify() { - for (LargeObjectChunk* chunk = first_chunk_; + for (LargePage* chunk = first_page_; chunk != NULL; - chunk = chunk->next()) { + chunk = chunk->next_page()) { // Each chunk contains an object that starts at the large object page's // object area start. HeapObject* object = chunk->GetObject(); @@ -3015,9 +2454,6 @@ void LargeObjectSpace::Verify() { object->Size(), &code_visitor); } else if (object->IsFixedArray()) { - // We loop over fixed arrays ourselves, rather then using the visitor, - // because the visitor doesn't support the start/offset iteration - // needed for IsRegionDirty. FixedArray* array = FixedArray::cast(object); for (int j = 0; j < array->length(); j++) { Object* element = array->get(j); @@ -3025,13 +2461,6 @@ void LargeObjectSpace::Verify() { HeapObject* element_object = HeapObject::cast(element); ASSERT(heap()->Contains(element_object)); ASSERT(element_object->map()->IsMap()); - if (heap()->InNewSpace(element_object)) { - Address array_addr = object->address(); - Address element_addr = array_addr + FixedArray::kHeaderSize + - j * kPointerSize; - - ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr)); - } } } } @@ -3041,7 +2470,7 @@ void LargeObjectSpace::Verify() { void LargeObjectSpace::Print() { LargeObjectIterator it(this); - for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { obj->Print(); } } @@ -3052,7 +2481,7 @@ void LargeObjectSpace::ReportStatistics() { int num_objects = 0; ClearHistograms(); LargeObjectIterator it(this); - for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { + for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { num_objects++; CollectHistogramInfo(obj); } @@ -3066,13 +2495,38 @@ void LargeObjectSpace::ReportStatistics() { void LargeObjectSpace::CollectCodeStatistics() { Isolate* isolate = heap()->isolate(); LargeObjectIterator obj_it(this); - for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { + for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { if (obj->IsCode()) { Code* code = Code::cast(obj); isolate->code_kind_statistics()[code->kind()] += code->Size(); } } } + + +void Page::Print() { + // Make a best-effort to print the objects in the page. + PrintF("Page@%p in %s\n", + this->address(), + AllocationSpaceName(this->owner()->identity())); + printf(" --------------------------------------\n"); + HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction()); + unsigned mark_size = 0; + for (HeapObject* object = objects.Next(); + object != NULL; + object = objects.Next()) { + bool is_marked = Marking::MarkBitFrom(object).Get(); + PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little. + if (is_marked) { + mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object); + } + object->ShortPrint(); + PrintF("\n"); + } + printf(" --------------------------------------\n"); + printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); +} + #endif // DEBUG } } // namespace v8::internal diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h index f1564967e..ce8e382aa 100644 --- a/deps/v8/src/spaces.h +++ b/deps/v8/src/spaces.h @@ -49,45 +49,47 @@ class Isolate; // // The semispaces of the young generation are contiguous. The old and map // spaces consists of a list of pages. A page has a page header and an object -// area. A page size is deliberately chosen as 8K bytes. -// The first word of a page is an opaque page header that has the -// address of the next page and its ownership information. The second word may -// have the allocation top address of this page. Heap objects are aligned to the -// pointer size. +// area. // // There is a separate large object space for objects larger than // Page::kMaxHeapObjectSize, so that they do not have to move during // collection. The large object space is paged. Pages in large object space -// may be larger than 8K. +// may be larger than the page size. // -// A card marking write barrier is used to keep track of intergenerational -// references. Old space pages are divided into regions of Page::kRegionSize -// size. Each region has a corresponding dirty bit in the page header which is -// set if the region might contain pointers to new space. For details about -// dirty bits encoding see comments in the Page::GetRegionNumberForAddress() -// method body. +// A store-buffer based write barrier is used to keep track of intergenerational +// references. See store-buffer.h. // -// During scavenges and mark-sweep collections we iterate intergenerational -// pointers without decoding heap object maps so if the page belongs to old -// pointer space or large object space it is essential to guarantee that -// the page does not contain any garbage pointers to new space: every pointer -// aligned word which satisfies the Heap::InNewSpace() predicate must be a -// pointer to a live heap object in new space. Thus objects in old pointer -// and large object spaces should have a special layout (e.g. no bare integer -// fields). This requirement does not apply to map space which is iterated in -// a special fashion. However we still require pointer fields of dead maps to -// be cleaned. +// During scavenges and mark-sweep collections we sometimes (after a store +// buffer overflow) iterate intergenerational pointers without decoding heap +// object maps so if the page belongs to old pointer space or large object +// space it is essential to guarantee that the page does not contain any +// garbage pointers to new space: every pointer aligned word which satisfies +// the Heap::InNewSpace() predicate must be a pointer to a live heap object in +// new space. Thus objects in old pointer and large object spaces should have a +// special layout (e.g. no bare integer fields). This requirement does not +// apply to map space which is iterated in a special fashion. However we still +// require pointer fields of dead maps to be cleaned. // -// To enable lazy cleaning of old space pages we use a notion of allocation -// watermark. Every pointer under watermark is considered to be well formed. -// Page allocation watermark is not necessarily equal to page allocation top but -// all alive objects on page should reside under allocation watermark. -// During scavenge allocation watermark might be bumped and invalid pointers -// might appear below it. To avoid following them we store a valid watermark -// into special field in the page header and set a page WATERMARK_INVALIDATED -// flag. For details see comments in the Page::SetAllocationWatermark() method -// body. +// To enable lazy cleaning of old space pages we can mark chunks of the page +// as being garbage. Garbage sections are marked with a special map. These +// sections are skipped when scanning the page, even if we are otherwise +// scanning without regard for object boundaries. Garbage sections are chained +// together to form a free list after a GC. Garbage sections created outside +// of GCs by object trunctation etc. may not be in the free list chain. Very +// small free spaces are ignored, they need only be cleaned of bogus pointers +// into new space. // +// Each page may have up to one special garbage section. The start of this +// section is denoted by the top field in the space. The end of the section +// is denoted by the limit field in the space. This special garbage section +// is not marked with a free space map in the data. The point of this section +// is to enable linear allocation without having to constantly update the byte +// array every time the top field is updated and a new object is created. The +// special garbage section is not in the chain of garbage sections. +// +// Since the top and limit fields are in the space, not the page, only one page +// has a special garbage section, and if the top and limit are equal then there +// is no special garbage section. // Some assertion macros used in the debugging mode. @@ -114,30 +116,522 @@ class Isolate; class PagedSpace; class MemoryAllocator; class AllocationInfo; +class Space; +class FreeList; +class MemoryChunk; + +class MarkBit { + public: + typedef uint32_t CellType; + + inline MarkBit(CellType* cell, CellType mask, bool data_only) + : cell_(cell), mask_(mask), data_only_(data_only) { } + + inline CellType* cell() { return cell_; } + inline CellType mask() { return mask_; } + +#ifdef DEBUG + bool operator==(const MarkBit& other) { + return cell_ == other.cell_ && mask_ == other.mask_; + } +#endif + + inline void Set() { *cell_ |= mask_; } + inline bool Get() { return (*cell_ & mask_) != 0; } + inline void Clear() { *cell_ &= ~mask_; } + + inline bool data_only() { return data_only_; } + + inline MarkBit Next() { + CellType new_mask = mask_ << 1; + if (new_mask == 0) { + return MarkBit(cell_ + 1, 1, data_only_); + } else { + return MarkBit(cell_, new_mask, data_only_); + } + } + + private: + CellType* cell_; + CellType mask_; + // This boolean indicates that the object is in a data-only space with no + // pointers. This enables some optimizations when marking. + // It is expected that this field is inlined and turned into control flow + // at the place where the MarkBit object is created. + bool data_only_; +}; + + +// Bitmap is a sequence of cells each containing fixed number of bits. +class Bitmap { + public: + static const uint32_t kBitsPerCell = 32; + static const uint32_t kBitsPerCellLog2 = 5; + static const uint32_t kBitIndexMask = kBitsPerCell - 1; + static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; + static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2; + + static const size_t kLength = + (1 << kPageSizeBits) >> (kPointerSizeLog2); + + static const size_t kSize = + (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); + + + static int CellsForLength(int length) { + return (length + kBitsPerCell - 1) >> kBitsPerCellLog2; + } + + int CellsCount() { + return CellsForLength(kLength); + } + + static int SizeFor(int cells_count) { + return sizeof(MarkBit::CellType) * cells_count; + } + + INLINE(static uint32_t IndexToCell(uint32_t index)) { + return index >> kBitsPerCellLog2; + } + + INLINE(static uint32_t CellToIndex(uint32_t index)) { + return index << kBitsPerCellLog2; + } + + INLINE(static uint32_t CellAlignIndex(uint32_t index)) { + return (index + kBitIndexMask) & ~kBitIndexMask; + } + + INLINE(MarkBit::CellType* cells()) { + return reinterpret_cast<MarkBit::CellType*>(this); + } + + INLINE(Address address()) { + return reinterpret_cast<Address>(this); + } + + INLINE(static Bitmap* FromAddress(Address addr)) { + return reinterpret_cast<Bitmap*>(addr); + } + + inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) { + MarkBit::CellType mask = 1 << (index & kBitIndexMask); + MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); + return MarkBit(cell, mask, data_only); + } + + static inline void Clear(MemoryChunk* chunk); + + static void PrintWord(uint32_t word, uint32_t himask = 0) { + for (uint32_t mask = 1; mask != 0; mask <<= 1) { + if ((mask & himask) != 0) PrintF("["); + PrintF((mask & word) ? "1" : "0"); + if ((mask & himask) != 0) PrintF("]"); + } + } + + class CellPrinter { + public: + CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { } + + void Print(uint32_t pos, uint32_t cell) { + if (cell == seq_type) { + seq_length++; + return; + } + + Flush(); + + if (IsSeq(cell)) { + seq_start = pos; + seq_length = 0; + seq_type = cell; + return; + } + + PrintF("%d: ", pos); + PrintWord(cell); + PrintF("\n"); + } + + void Flush() { + if (seq_length > 0) { + PrintF("%d: %dx%d\n", + seq_start, + seq_type == 0 ? 0 : 1, + seq_length * kBitsPerCell); + seq_length = 0; + } + } + + static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; } + + private: + uint32_t seq_start; + uint32_t seq_type; + uint32_t seq_length; + }; + + void Print() { + CellPrinter printer; + for (int i = 0; i < CellsCount(); i++) { + printer.Print(i, cells()[i]); + } + printer.Flush(); + PrintF("\n"); + } + + bool IsClean() { + for (int i = 0; i < CellsCount(); i++) { + if (cells()[i] != 0) return false; + } + return true; + } +}; + + +class SkipList; +class SlotsBuffer; + +// MemoryChunk represents a memory region owned by a specific space. +// It is divided into the header and the body. Chunk start is always +// 1MB aligned. Start of the body is aligned so it can accomodate +// any heap object. +class MemoryChunk { + public: + // Only works if the pointer is in the first kPageSize of the MemoryChunk. + static MemoryChunk* FromAddress(Address a) { + return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); + } + + // Only works for addresses in pointer spaces, not data or code spaces. + static inline MemoryChunk* FromAnyPointerAddress(Address addr); + + Address address() { return reinterpret_cast<Address>(this); } + + bool is_valid() { return address() != NULL; } + + MemoryChunk* next_chunk() const { return next_chunk_; } + MemoryChunk* prev_chunk() const { return prev_chunk_; } + + void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; } + void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; } + + Space* owner() const { + if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == + kFailureTag) { + return reinterpret_cast<Space*>(owner_ - kFailureTag); + } else { + return NULL; + } + } + + void set_owner(Space* space) { + ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0); + owner_ = reinterpret_cast<Address>(space) + kFailureTag; + ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == + kFailureTag); + } + + VirtualMemory* reserved_memory() { + return &reservation_; + } + + void InitializeReservedMemory() { + reservation_.Reset(); + } + + void set_reserved_memory(VirtualMemory* reservation) { + ASSERT_NOT_NULL(reservation); + reservation_.TakeControl(reservation); + } + + bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } + void initialize_scan_on_scavenge(bool scan) { + if (scan) { + SetFlag(SCAN_ON_SCAVENGE); + } else { + ClearFlag(SCAN_ON_SCAVENGE); + } + } + inline void set_scan_on_scavenge(bool scan); + + int store_buffer_counter() { return store_buffer_counter_; } + void set_store_buffer_counter(int counter) { + store_buffer_counter_ = counter; + } + + Address body() { return address() + kObjectStartOffset; } + + Address body_limit() { return address() + size(); } + + int body_size() { return static_cast<int>(size() - kObjectStartOffset); } + + bool Contains(Address addr) { + return addr >= body() && addr < address() + size(); + } + + // Checks whether addr can be a limit of addresses in this page. + // It's a limit if it's in the page, or if it's just after the + // last byte of the page. + bool ContainsLimit(Address addr) { + return addr >= body() && addr <= address() + size(); + } + + enum MemoryChunkFlags { + IS_EXECUTABLE, + ABOUT_TO_BE_FREED, + POINTERS_TO_HERE_ARE_INTERESTING, + POINTERS_FROM_HERE_ARE_INTERESTING, + SCAN_ON_SCAVENGE, + IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. + IN_TO_SPACE, // All pages in new space has one of these two set. + NEW_SPACE_BELOW_AGE_MARK, + CONTAINS_ONLY_DATA, + EVACUATION_CANDIDATE, + RESCAN_ON_EVACUATION, + + // Pages swept precisely can be iterated, hitting only the live objects. + // Whereas those swept conservatively cannot be iterated over. Both flags + // indicate that marking bits have been cleared by the sweeper, otherwise + // marking bits are still intact. + WAS_SWEPT_PRECISELY, + WAS_SWEPT_CONSERVATIVELY, + + // Last flag, keep at bottom. + NUM_MEMORY_CHUNK_FLAGS + }; + + + static const int kPointersToHereAreInterestingMask = + 1 << POINTERS_TO_HERE_ARE_INTERESTING; + + static const int kPointersFromHereAreInterestingMask = + 1 << POINTERS_FROM_HERE_ARE_INTERESTING; + + static const int kEvacuationCandidateMask = + 1 << EVACUATION_CANDIDATE; + + static const int kSkipEvacuationSlotsRecordingMask = + (1 << EVACUATION_CANDIDATE) | + (1 << RESCAN_ON_EVACUATION) | + (1 << IN_FROM_SPACE) | + (1 << IN_TO_SPACE); + + + void SetFlag(int flag) { + flags_ |= static_cast<uintptr_t>(1) << flag; + } + + void ClearFlag(int flag) { + flags_ &= ~(static_cast<uintptr_t>(1) << flag); + } + + void SetFlagTo(int flag, bool value) { + if (value) { + SetFlag(flag); + } else { + ClearFlag(flag); + } + } + + bool IsFlagSet(int flag) { + return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0; + } + + // Set or clear multiple flags at a time. The flags in the mask + // are set to the value in "flags", the rest retain the current value + // in flags_. + void SetFlags(intptr_t flags, intptr_t mask) { + flags_ = (flags_ & ~mask) | (flags & mask); + } + + // Return all current flags. + intptr_t GetFlags() { return flags_; } + + // Manage live byte count (count of bytes known to be live, + // because they are marked black). + void ResetLiveBytes() { + if (FLAG_gc_verbose) { + PrintF("ResetLiveBytes:%p:%x->0\n", + static_cast<void*>(this), live_byte_count_); + } + live_byte_count_ = 0; + } + void IncrementLiveBytes(int by) { + ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_); + if (FLAG_gc_verbose) { + printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", + static_cast<void*>(this), live_byte_count_, + ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by), + live_byte_count_ + by); + } + live_byte_count_ += by; + ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_); + } + int LiveBytes() { + ASSERT(static_cast<unsigned>(live_byte_count_) <= size_); + return live_byte_count_; + } + static void IncrementLiveBytes(Address address, int by) { + MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); + } + + static const intptr_t kAlignment = + (static_cast<uintptr_t>(1) << kPageSizeBits); + + static const intptr_t kAlignmentMask = kAlignment - 1; + + static const intptr_t kSizeOffset = kPointerSize + kPointerSize; + + static const intptr_t kLiveBytesOffset = + kSizeOffset + kPointerSize + kPointerSize + kPointerSize + + kPointerSize + kPointerSize + kPointerSize + kIntSize; + + static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; + + static const size_t kHeaderSize = + kSlotsBufferOffset + kPointerSize + kPointerSize; + + static const int kBodyOffset = + CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); + + // The start offset of the object area in a page. Aligned to both maps and + // code alignment to be suitable for both. Also aligned to 32 words because + // the marking bitmap is arranged in 32 bit chunks. + static const int kObjectStartAlignment = 32 * kPointerSize; + static const int kObjectStartOffset = kBodyOffset - 1 + + (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); + + size_t size() const { return size_; } + + Executability executable() { + return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; + } + + bool ContainsOnlyData() { + return IsFlagSet(CONTAINS_ONLY_DATA); + } + + bool InNewSpace() { + return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; + } + + bool InToSpace() { + return IsFlagSet(IN_TO_SPACE); + } + + bool InFromSpace() { + return IsFlagSet(IN_FROM_SPACE); + } + + // --------------------------------------------------------------------- + // Markbits support + + inline Bitmap* markbits() { + return Bitmap::FromAddress(address() + kHeaderSize); + } + + void PrintMarkbits() { markbits()->Print(); } + + inline uint32_t AddressToMarkbitIndex(Address addr) { + return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; + } + + inline static uint32_t FastAddressToMarkbitIndex(Address addr) { + const intptr_t offset = + reinterpret_cast<intptr_t>(addr) & kAlignmentMask; + + return static_cast<uint32_t>(offset) >> kPointerSizeLog2; + } + + inline Address MarkbitIndexToAddress(uint32_t index) { + return this->address() + (index << kPointerSizeLog2); + } + + void InsertAfter(MemoryChunk* other); + void Unlink(); + + inline Heap* heap() { return heap_; } + + static const int kFlagsOffset = kPointerSize * 3; + + bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); } + + bool ShouldSkipEvacuationSlotRecording() { + return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; + } + + inline SkipList* skip_list() { + return skip_list_; + } + + inline void set_skip_list(SkipList* skip_list) { + skip_list_ = skip_list; + } + + inline SlotsBuffer* slots_buffer() { + return slots_buffer_; + } + + inline SlotsBuffer** slots_buffer_address() { + return &slots_buffer_; + } + + void MarkEvacuationCandidate() { + ASSERT(slots_buffer_ == NULL); + SetFlag(EVACUATION_CANDIDATE); + } + + void ClearEvacuationCandidate() { + ASSERT(slots_buffer_ == NULL); + ClearFlag(EVACUATION_CANDIDATE); + } + + + protected: + MemoryChunk* next_chunk_; + MemoryChunk* prev_chunk_; + size_t size_; + intptr_t flags_; + // If the chunk needs to remember its memory reservation, it is stored here. + VirtualMemory reservation_; + // The identity of the owning space. This is tagged as a failure pointer, but + // no failure can be in an object, so this can be distinguished from any entry + // in a fixed array. + Address owner_; + Heap* heap_; + // Used by the store buffer to keep track of which pages to mark scan-on- + // scavenge. + int store_buffer_counter_; + // Count of bytes marked black on page. + int live_byte_count_; + SlotsBuffer* slots_buffer_; + SkipList* skip_list_; + + static MemoryChunk* Initialize(Heap* heap, + Address base, + size_t size, + Executability executable, + Space* owner); + + friend class MemoryAllocator; +}; + +STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); // ----------------------------------------------------------------------------- -// A page normally has 8K bytes. Large object pages may be larger. A page -// address is always aligned to the 8K page size. -// -// Each page starts with a header of Page::kPageHeaderSize size which contains -// bookkeeping data. -// -// The mark-compact collector transforms a map pointer into a page index and a -// page offset. The exact encoding is described in the comments for -// class MapWord in objects.h. +// A page is a memory chunk of a size 1MB. Large object pages may be larger. // // The only way to get a page pointer is by calling factory methods: // Page* p = Page::FromAddress(addr); or // Page* p = Page::FromAllocationTop(top); -class Page { +class Page : public MemoryChunk { public: // Returns the page containing a given address. The address ranges // from [page_addr .. page_addr + kPageSize[ - // - // Note that this function only works for addresses in normal paged - // spaces and addresses in the first 8K of large object pages (i.e., - // the start of large objects but not necessarily derived pointers - // within them). + // This only works if the object is in fact in a page. See also MemoryChunk:: + // FromAddress() and FromAnyAddress(). INLINE(static Page* FromAddress(Address a)) { return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); } @@ -152,30 +646,11 @@ class Page { return p; } - // Returns the start address of this page. - Address address() { return reinterpret_cast<Address>(this); } - - // Checks whether this is a valid page address. - bool is_valid() { return address() != NULL; } - - // Returns the next page of this page. + // Returns the next page in the chain of pages owned by a space. inline Page* next_page(); - - // Return the end of allocation in this page. Undefined for unused pages. - inline Address AllocationTop(); - - // Return the allocation watermark for the page. - // For old space pages it is guaranteed that the area under the watermark - // does not contain any garbage pointers to new space. - inline Address AllocationWatermark(); - - // Return the allocation watermark offset from the beginning of the page. - inline uint32_t AllocationWatermarkOffset(); - - inline void SetAllocationWatermark(Address allocation_watermark); - - inline void SetCachedAllocationWatermark(Address allocation_watermark); - inline Address CachedAllocationWatermark(); + inline Page* prev_page(); + inline void set_next_page(Page* page); + inline void set_prev_page(Page* page); // Returns the start address of the object area in this page. Address ObjectAreaStart() { return address() + kObjectStartOffset; } @@ -188,22 +663,6 @@ class Page { return 0 == (OffsetFrom(a) & kPageAlignmentMask); } - // True if this page was in use before current compaction started. - // Result is valid only for pages owned by paged spaces and - // only after PagedSpace::PrepareForMarkCompact was called. - inline bool WasInUseBeforeMC(); - - inline void SetWasInUseBeforeMC(bool was_in_use); - - // True if this page is a large object page. - inline bool IsLargeObjectPage(); - - inline void SetIsLargeObjectPage(bool is_large_object_page); - - inline Executability PageExecutability(); - - inline void SetPageExecutability(Executability executable); - // Returns the offset of a given address to this page. INLINE(int Offset(Address a)) { int offset = static_cast<int>(a - address()); @@ -218,24 +677,6 @@ class Page { } // --------------------------------------------------------------------- - // Card marking support - - static const uint32_t kAllRegionsCleanMarks = 0x0; - static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF; - - inline uint32_t GetRegionMarks(); - inline void SetRegionMarks(uint32_t dirty); - - inline uint32_t GetRegionMaskForAddress(Address addr); - inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes); - inline int GetRegionNumberForAddress(Address addr); - - inline void MarkRegionDirty(Address addr); - inline bool IsRegionDirty(Address addr); - - inline void ClearRegionMarks(Address start, - Address end, - bool reaches_limit); // Page size in bytes. This must be a multiple of the OS page size. static const int kPageSize = 1 << kPageSizeBits; @@ -243,118 +684,69 @@ class Page { // Page size mask. static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; - static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize + - kIntSize + kPointerSize + kPointerSize; - - // The start offset of the object area in a page. Aligned to both maps and - // code alignment to be suitable for both. - static const int kObjectStartOffset = - CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize)); - // Object area size in bytes. static const int kObjectAreaSize = kPageSize - kObjectStartOffset; // Maximum object size that fits in a page. static const int kMaxHeapObjectSize = kObjectAreaSize; - static const int kDirtyFlagOffset = 2 * kPointerSize; - static const int kRegionSizeLog2 = 8; - static const int kRegionSize = 1 << kRegionSizeLog2; - static const intptr_t kRegionAlignmentMask = (kRegionSize - 1); + static const int kFirstUsedCell = + (kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2; - STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt); + static const int kLastUsedCell = + ((kPageSize - kPointerSize)/kPointerSize) >> + Bitmap::kBitsPerCellLog2; - enum PageFlag { - IS_NORMAL_PAGE = 0, - WAS_IN_USE_BEFORE_MC, + inline void ClearGCFields(); - // Page allocation watermark was bumped by preallocation during scavenge. - // Correct watermark can be retrieved by CachedAllocationWatermark() method - WATERMARK_INVALIDATED, - IS_EXECUTABLE, - NUM_PAGE_FLAGS // Must be last - }; - static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1; - - // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during - // scavenge we just invalidate the watermark on each old space page after - // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED - // flag at the beginning of the next scavenge and each page becomes marked as - // having a valid watermark. - // - // The following invariant must hold for pages in old pointer and map spaces: - // If page is in use then page is marked as having invalid watermark at - // the beginning and at the end of any GC. - // - // This invariant guarantees that after flipping flag meaning at the - // beginning of scavenge all pages in use will be marked as having valid - // watermark. - static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap); - - // Returns true if the page allocation watermark was not altered during - // scavenge. - inline bool IsWatermarkValid(); + static inline Page* Initialize(Heap* heap, + MemoryChunk* chunk, + Executability executable, + PagedSpace* owner); - inline void InvalidateWatermark(bool value); + void InitializeAsAnchor(PagedSpace* owner); - inline bool GetPageFlag(PageFlag flag); - inline void SetPageFlag(PageFlag flag, bool value); - inline void ClearPageFlags(); + bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } + bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } + bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } - inline void ClearGCFields(); + void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } + void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } - static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1; - static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1; - static const uint32_t kAllocationWatermarkOffsetMask = - ((1 << kAllocationWatermarkOffsetBits) - 1) << - kAllocationWatermarkOffsetShift; - - static const uint32_t kFlagsMask = - ((1 << kAllocationWatermarkOffsetShift) - 1); - - STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >= - kAllocationWatermarkOffsetBits); - - //--------------------------------------------------------------------------- - // Page header description. - // - // If a page is not in the large object space, the first word, - // opaque_header, encodes the next page address (aligned to kPageSize 8K) - // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use - // opaque_header. The value range of the opaque_header is [0..kPageSize[, - // or [next_page_start, next_page_end[. It cannot point to a valid address - // in the current page. If a page is in the large object space, the first - // word *may* (if the page start and large object chunk start are the - // same) contain the address of the next large object chunk. - intptr_t opaque_header; - - // If the page is not in the large object space, the low-order bit of the - // second word is set. If the page is in the large object space, the - // second word *may* (if the page start and large object chunk start are - // the same) contain the large object chunk size. In either case, the - // low-order bit for large object pages will be cleared. - // For normal pages this word is used to store page flags and - // offset of allocation top. - intptr_t flags_; + void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); } + void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); } - // This field contains dirty marks for regions covering the page. Only dirty - // regions might contain intergenerational references. - // Only 32 dirty marks are supported so for large object pages several regions - // might be mapped to a single dirty mark. - uint32_t dirty_regions_; +#ifdef DEBUG + void Print(); +#endif // DEBUG + + friend class MemoryAllocator; +}; - // The index of the page in its owner space. - int mc_page_index; - // During mark-compact collections this field contains the forwarding address - // of the first live object in this page. - // During scavenge collection this field is used to store allocation watermark - // if it is altered during scavenge. - Address mc_first_forwarded; +STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); - Heap* heap_; + +class LargePage : public MemoryChunk { + public: + HeapObject* GetObject() { + return HeapObject::FromAddress(body()); + } + + inline LargePage* next_page() const { + return static_cast<LargePage*>(next_chunk()); + } + + inline void set_next_page(LargePage* page) { + set_next_chunk(page); + } + private: + static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); + + friend class MemoryAllocator; }; +STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize); // ---------------------------------------------------------------------------- // Space is the abstract superclass for all allocation spaces. @@ -380,6 +772,14 @@ class Space : public Malloced { // (e.g. see LargeObjectSpace). virtual intptr_t SizeOfObjects() { return Size(); } + virtual int RoundSizeDownToObjectAlignment(int size) { + if (id_ == CODE_SPACE) { + return RoundDown(size, kCodeAlignment); + } else { + return RoundDown(size, kPointerSize); + } + } + #ifdef DEBUG virtual void Print() = 0; #endif @@ -430,9 +830,9 @@ class CodeRange { // Allocates a chunk of memory from the large-object portion of // the code range. On platforms with no separate code range, should // not be called. - MUST_USE_RESULT void* AllocateRawMemory(const size_t requested, - size_t* allocated); - void FreeRawMemory(void* buf, size_t length); + MUST_USE_RESULT Address AllocateRawMemory(const size_t requested, + size_t* allocated); + void FreeRawMemory(Address buf, size_t length); private: Isolate* isolate_; @@ -443,9 +843,15 @@ class CodeRange { class FreeBlock { public: FreeBlock(Address start_arg, size_t size_arg) - : start(start_arg), size(size_arg) {} + : start(start_arg), size(size_arg) { + ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); + ASSERT(size >= static_cast<size_t>(Page::kPageSize)); + } FreeBlock(void* start_arg, size_t size_arg) - : start(static_cast<Address>(start_arg)), size(size_arg) {} + : start(static_cast<Address>(start_arg)), size(size_arg) { + ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); + ASSERT(size >= static_cast<size_t>(Page::kPageSize)); + } Address start; size_t size; @@ -473,30 +879,63 @@ class CodeRange { }; +class SkipList { + public: + SkipList() { + Clear(); + } + + void Clear() { + for (int idx = 0; idx < kSize; idx++) { + starts_[idx] = reinterpret_cast<Address>(-1); + } + } + + Address StartFor(Address addr) { + return starts_[RegionNumber(addr)]; + } + + void AddObject(Address addr, int size) { + int start_region = RegionNumber(addr); + int end_region = RegionNumber(addr + size - kPointerSize); + for (int idx = start_region; idx <= end_region; idx++) { + if (starts_[idx] > addr) starts_[idx] = addr; + } + } + + static inline int RegionNumber(Address addr) { + return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2; + } + + static void Update(Address addr, int size) { + Page* page = Page::FromAddress(addr); + SkipList* list = page->skip_list(); + if (list == NULL) { + list = new SkipList(); + page->set_skip_list(list); + } + + list->AddObject(addr, size); + } + + private: + static const int kRegionSizeLog2 = 13; + static const int kRegionSize = 1 << kRegionSizeLog2; + static const int kSize = Page::kPageSize / kRegionSize; + + STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); + + Address starts_[kSize]; +}; + + // ---------------------------------------------------------------------------- // A space acquires chunks of memory from the operating system. The memory -// allocator manages chunks for the paged heap spaces (old space and map -// space). A paged chunk consists of pages. Pages in a chunk have contiguous -// addresses and are linked as a list. -// -// The allocator keeps an initial chunk which is used for the new space. The -// leftover regions of the initial chunk are used for the initial chunks of -// old space and map space if they are big enough to hold at least one page. -// The allocator assumes that there is one old space and one map space, each -// expands the space by allocating kPagesPerChunk pages except the last -// expansion (before running out of space). The first chunk may contain fewer -// than kPagesPerChunk pages as well. -// -// The memory allocator also allocates chunks for the large object space, but -// they are managed by the space itself. The new space does not expand. +// allocator allocated and deallocates pages for the paged heap spaces and large +// pages for large object space. // -// The fact that pages for paged spaces are allocated and deallocated in chunks -// induces a constraint on the order of pages in a linked lists. We say that -// pages are linked in the chunk-order if and only if every two consecutive -// pages from the same chunk are consecutive in the linked list. +// Each space has to manage it's own pages. // - - class MemoryAllocator { public: explicit MemoryAllocator(Isolate* isolate); @@ -505,91 +944,15 @@ class MemoryAllocator { // Max capacity of the total space and executable memory limit. bool Setup(intptr_t max_capacity, intptr_t capacity_executable); - // Deletes valid chunks. void TearDown(); - // Reserves an initial address range of virtual memory to be split between - // the two new space semispaces, the old space, and the map space. The - // memory is not yet committed or assigned to spaces and split into pages. - // The initial chunk is unmapped when the memory allocator is torn down. - // This function should only be called when there is not already a reserved - // initial chunk (initial_chunk_ should be NULL). It returns the start - // address of the initial chunk if successful, with the side effect of - // setting the initial chunk, or else NULL if unsuccessful and leaves the - // initial chunk NULL. - void* ReserveInitialChunk(const size_t requested); - - // Commits pages from an as-yet-unmanaged block of virtual memory into a - // paged space. The block should be part of the initial chunk reserved via - // a call to ReserveInitialChunk. The number of pages is always returned in - // the output parameter num_pages. This function assumes that the start - // address is non-null and that it is big enough to hold at least one - // page-aligned page. The call always succeeds, and num_pages is always - // greater than zero. - Page* CommitPages(Address start, size_t size, PagedSpace* owner, - int* num_pages); - - // Commit a contiguous block of memory from the initial chunk. Assumes that - // the address is not NULL, the size is greater than zero, and that the - // block is contained in the initial chunk. Returns true if it succeeded - // and false otherwise. - bool CommitBlock(Address start, size_t size, Executability executable); + Page* AllocatePage(PagedSpace* owner, Executability executable); - // Uncommit a contiguous block of memory [start..(start+size)[. - // start is not NULL, the size is greater than zero, and the - // block is contained in the initial chunk. Returns true if it succeeded - // and false otherwise. - bool UncommitBlock(Address start, size_t size); + LargePage* AllocateLargePage(intptr_t object_size, + Executability executable, + Space* owner); - // Zaps a contiguous block of memory [start..(start+size)[ thus - // filling it up with a recognizable non-NULL bit pattern. - void ZapBlock(Address start, size_t size); - - // Attempts to allocate the requested (non-zero) number of pages from the - // OS. Fewer pages might be allocated than requested. If it fails to - // allocate memory for the OS or cannot allocate a single page, this - // function returns an invalid page pointer (NULL). The caller must check - // whether the returned page is valid (by calling Page::is_valid()). It is - // guaranteed that allocated pages have contiguous addresses. The actual - // number of allocated pages is returned in the output parameter - // allocated_pages. If the PagedSpace owner is executable and there is - // a code range, the pages are allocated from the code range. - Page* AllocatePages(int requested_pages, int* allocated_pages, - PagedSpace* owner); - - // Frees pages from a given page and after. Requires pages to be - // linked in chunk-order (see comment for class). - // If 'p' is the first page of a chunk, pages from 'p' are freed - // and this function returns an invalid page pointer. - // Otherwise, the function searches a page after 'p' that is - // the first page of a chunk. Pages after the found page - // are freed and the function returns 'p'. - Page* FreePages(Page* p); - - // Frees all pages owned by given space. - void FreeAllPages(PagedSpace* space); - - // Allocates and frees raw memory of certain size. - // These are just thin wrappers around OS::Allocate and OS::Free, - // but keep track of allocated bytes as part of heap. - // If the flag is EXECUTABLE and a code range exists, the requested - // memory is allocated from the code range. If a code range exists - // and the freed memory is in it, the code range manages the freed memory. - MUST_USE_RESULT void* AllocateRawMemory(const size_t requested, - size_t* allocated, - Executability executable); - void FreeRawMemory(void* buf, - size_t length, - Executability executable); - void PerformAllocationCallback(ObjectSpace space, - AllocationAction action, - size_t size); - - void AddMemoryAllocationCallback(MemoryAllocationCallback callback, - ObjectSpace space, - AllocationAction action); - void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback); - bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback); + void Free(MemoryChunk* chunk); // Returns the maximum available bytes of heaps. intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } @@ -611,67 +974,68 @@ class MemoryAllocator { return (Available() / Page::kPageSize) * Page::kObjectAreaSize; } - // Links two pages. - inline void SetNextPage(Page* prev, Page* next); +#ifdef DEBUG + // Reports statistic info of the space. + void ReportStatistics(); +#endif - // Returns the next page of a given page. - inline Page* GetNextPage(Page* p); + MemoryChunk* AllocateChunk(intptr_t body_size, + Executability executable, + Space* space); - // Checks whether a page belongs to a space. - inline bool IsPageInSpace(Page* p, PagedSpace* space); + Address ReserveAlignedMemory(size_t requested, + size_t alignment, + VirtualMemory* controller); + Address AllocateAlignedMemory(size_t requested, + size_t alignment, + Executability executable, + VirtualMemory* controller); - // Returns the space that owns the given page. - inline PagedSpace* PageOwner(Page* page); + void FreeMemory(VirtualMemory* reservation, Executability executable); + void FreeMemory(Address addr, size_t size, Executability executable); - // Finds the first/last page in the same chunk as a given page. - Page* FindFirstPageInSameChunk(Page* p); - Page* FindLastPageInSameChunk(Page* p); + // Commit a contiguous block of memory from the initial chunk. Assumes that + // the address is not NULL, the size is greater than zero, and that the + // block is contained in the initial chunk. Returns true if it succeeded + // and false otherwise. + bool CommitBlock(Address start, size_t size, Executability executable); - // Relinks list of pages owned by space to make it chunk-ordered. - // Returns new first and last pages of space. - // Also returns last page in relinked list which has WasInUsedBeforeMC - // flag set. - void RelinkPageListInChunkOrder(PagedSpace* space, - Page** first_page, - Page** last_page, - Page** last_page_in_use); + // Uncommit a contiguous block of memory [start..(start+size)[. + // start is not NULL, the size is greater than zero, and the + // block is contained in the initial chunk. Returns true if it succeeded + // and false otherwise. + bool UncommitBlock(Address start, size_t size); -#ifdef DEBUG - // Reports statistic info of the space. - void ReportStatistics(); -#endif + // Zaps a contiguous block of memory [start..(start+size)[ thus + // filling it up with a recognizable non-NULL bit pattern. + void ZapBlock(Address start, size_t size); - // Due to encoding limitation, we can only have 8K chunks. - static const int kMaxNofChunks = 1 << kPageSizeBits; - // If a chunk has at least 16 pages, the maximum heap size is about - // 8K * 8K * 16 = 1G bytes. -#ifdef V8_TARGET_ARCH_X64 - static const int kPagesPerChunk = 32; - // On 64 bit the chunk table consists of 4 levels of 4096-entry tables. - static const int kChunkTableLevels = 4; - static const int kChunkTableBitsPerLevel = 12; -#else - static const int kPagesPerChunk = 16; - // On 32 bit the chunk table consists of 2 levels of 256-entry tables. - static const int kChunkTableLevels = 2; - static const int kChunkTableBitsPerLevel = 8; -#endif + void PerformAllocationCallback(ObjectSpace space, + AllocationAction action, + size_t size); - private: - static const int kChunkSize = kPagesPerChunk * Page::kPageSize; + void AddMemoryAllocationCallback(MemoryAllocationCallback callback, + ObjectSpace space, + AllocationAction action); + + void RemoveMemoryAllocationCallback( + MemoryAllocationCallback callback); + bool MemoryAllocationCallbackRegistered( + MemoryAllocationCallback callback); + + private: Isolate* isolate_; // Maximum space size in bytes. - intptr_t capacity_; + size_t capacity_; // Maximum subset of capacity_ that can be executable - intptr_t capacity_executable_; + size_t capacity_executable_; // Allocated space size in bytes. - intptr_t size_; - + size_t size_; // Allocated executable space size in bytes. - intptr_t size_executable_; + size_t size_executable_; struct MemoryAllocationCallbackRegistration { MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, @@ -683,64 +1047,11 @@ class MemoryAllocator { ObjectSpace space; AllocationAction action; }; + // A List of callback that are triggered when memory is allocated or free'd List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_; - // The initial chunk of virtual memory. - VirtualMemory* initial_chunk_; - - // Allocated chunk info: chunk start address, chunk size, and owning space. - class ChunkInfo BASE_EMBEDDED { - public: - ChunkInfo() : address_(NULL), - size_(0), - owner_(NULL), - executable_(NOT_EXECUTABLE), - owner_identity_(FIRST_SPACE) {} - inline void init(Address a, size_t s, PagedSpace* o); - Address address() { return address_; } - size_t size() { return size_; } - PagedSpace* owner() { return owner_; } - // We save executability of the owner to allow using it - // when collecting stats after the owner has been destroyed. - Executability executable() const { return executable_; } - AllocationSpace owner_identity() const { return owner_identity_; } - - private: - Address address_; - size_t size_; - PagedSpace* owner_; - Executability executable_; - AllocationSpace owner_identity_; - }; - - // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids. - List<ChunkInfo> chunks_; - List<int> free_chunk_ids_; - int max_nof_chunks_; - int top_; - - // Push/pop a free chunk id onto/from the stack. - void Push(int free_chunk_id); - int Pop(); - bool OutOfChunkIds() { return top_ == 0; } - - // Frees a chunk. - void DeleteChunk(int chunk_id); - - // Basic check whether a chunk id is in the valid range. - inline bool IsValidChunkId(int chunk_id); - - // Checks whether a chunk id identifies an allocated chunk. - inline bool IsValidChunk(int chunk_id); - - // Returns the chunk id that a page belongs to. - inline int GetChunkId(Page* p); - - // True if the address lies in the initial chunk. - inline bool InInitialChunk(Address address); - // Initializes pages in a chunk. Returns the first page address. // This function and GetChunkId() are provided for the mark-compact // collector to rebuild page headers in the from space, which is @@ -748,13 +1059,7 @@ class MemoryAllocator { Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, PagedSpace* owner); - Page* RelinkPagesInChunk(int chunk_id, - Address chunk_start, - size_t chunk_size, - Page* prev, - Page** last_page_in_use); - - DISALLOW_COPY_AND_ASSIGN(MemoryAllocator); + DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); }; @@ -777,71 +1082,58 @@ class ObjectIterator : public Malloced { // ----------------------------------------------------------------------------- // Heap object iterator in new/old/map spaces. // -// A HeapObjectIterator iterates objects from a given address to the -// top of a space. The given address must be below the current -// allocation pointer (space top). There are some caveats. -// -// (1) If the space top changes upward during iteration (because of -// allocating new objects), the iterator does not iterate objects -// above the original space top. The caller must create a new -// iterator starting from the old top in order to visit these new -// objects. -// -// (2) If new objects are allocated below the original allocation top -// (e.g., free-list allocation in paged spaces), the new objects -// may or may not be iterated depending on their position with -// respect to the current point of iteration. +// A HeapObjectIterator iterates objects from the bottom of the given space +// to its top or from the bottom of the given page to its top. // -// (3) The space top should not change downward during iteration, -// otherwise the iterator will return not-necessarily-valid -// objects. - +// If objects are allocated in the page during iteration the iterator may +// or may not iterate over those objects. The caller must create a new +// iterator in order to be sure to visit these new objects. class HeapObjectIterator: public ObjectIterator { public: - // Creates a new object iterator in a given space. If a start - // address is not given, the iterator starts from the space bottom. + // Creates a new object iterator in a given space. // If the size function is not given, the iterator calls the default // Object::Size(). explicit HeapObjectIterator(PagedSpace* space); HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); - HeapObjectIterator(PagedSpace* space, Address start); - HeapObjectIterator(PagedSpace* space, - Address start, - HeapObjectCallback size_func); HeapObjectIterator(Page* page, HeapObjectCallback size_func); - inline HeapObject* next() { - return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage(); + // Advance to the next object, skipping free spaces and other fillers and + // skipping the special garbage section of which there is one per space. + // Returns NULL when the iteration has ended. + inline HeapObject* Next() { + do { + HeapObject* next_obj = FromCurrentPage(); + if (next_obj != NULL) return next_obj; + } while (AdvanceToNextPage()); + return NULL; } - // implementation of ObjectIterator. - virtual HeapObject* next_object() { return next(); } + virtual HeapObject* next_object() { + return Next(); + } private: - Address cur_addr_; // current iteration point - Address end_addr_; // end iteration point - Address cur_limit_; // current page limit - HeapObjectCallback size_func_; // size function - Page* end_page_; // caches the page of the end address + enum PageMode { kOnePageOnly, kAllPagesInSpace }; - HeapObject* FromCurrentPage() { - ASSERT(cur_addr_ < cur_limit_); - - HeapObject* obj = HeapObject::FromAddress(cur_addr_); - int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); - ASSERT_OBJECT_SIZE(obj_size); - - cur_addr_ += obj_size; - ASSERT(cur_addr_ <= cur_limit_); + Address cur_addr_; // Current iteration point. + Address cur_end_; // End iteration point. + HeapObjectCallback size_func_; // Size function or NULL. + PagedSpace* space_; + PageMode page_mode_; - return obj; - } + // Fast (inlined) path of next(). + inline HeapObject* FromCurrentPage(); - // Slow path of next, goes into the next page. - HeapObject* FromNextPage(); + // Slow path of next(), goes into the next page. Returns false if the + // iteration has ended. + bool AdvanceToNextPage(); // Initializes fields. - void Initialize(Address start, Address end, HeapObjectCallback size_func); + inline void Initialize(PagedSpace* owner, + Address start, + Address end, + PageMode mode, + HeapObjectCallback size_func); #ifdef DEBUG // Verifies whether fields have valid values. @@ -852,36 +1144,10 @@ class HeapObjectIterator: public ObjectIterator { // ----------------------------------------------------------------------------- // A PageIterator iterates the pages in a paged space. -// -// The PageIterator class provides three modes for iterating pages in a space: -// PAGES_IN_USE iterates pages containing allocated objects. -// PAGES_USED_BY_MC iterates pages that hold relocated objects during a -// mark-compact collection. -// ALL_PAGES iterates all pages in the space. -// -// There are some caveats. -// -// (1) If the space expands during iteration, new pages will not be -// returned by the iterator in any mode. -// -// (2) If new objects are allocated during iteration, they will appear -// in pages returned by the iterator. Allocation may cause the -// allocation pointer or MC allocation pointer in the last page to -// change between constructing the iterator and iterating the last -// page. -// -// (3) The space should not shrink during iteration, otherwise the -// iterator will return deallocated pages. class PageIterator BASE_EMBEDDED { public: - enum Mode { - PAGES_IN_USE, - PAGES_USED_BY_MC, - ALL_PAGES - }; - - PageIterator(PagedSpace* space, Mode mode); + explicit inline PageIterator(PagedSpace* space); inline bool has_next(); inline Page* next(); @@ -889,21 +1155,25 @@ class PageIterator BASE_EMBEDDED { private: PagedSpace* space_; Page* prev_page_; // Previous page returned. - Page* stop_page_; // Page to stop at (last page returned by the iterator). + // Next page that will be returned. Cached here so that we can use this + // iterator for operations that deallocate pages. + Page* next_page_; }; // ----------------------------------------------------------------------------- -// A space has a list of pages. The next page can be accessed via -// Page::next_page() call. The next page of the last page is an -// invalid page pointer. A space can expand and shrink dynamically. +// A space has a circular list of pages. The next page can be accessed via +// Page::next_page() call. // An abstraction of allocation and relocation pointers in a page-structured // space. class AllocationInfo { public: - Address top; // current allocation top - Address limit; // current allocation limit + AllocationInfo() : top(NULL), limit(NULL) { + } + + Address top; // Current allocation top. + Address limit; // Current allocation limit. #ifdef DEBUG bool VerifyPagedAllocation() { @@ -935,70 +1205,199 @@ class AllocationStats BASE_EMBEDDED { // Zero out all the allocation statistics (ie, no capacity). void Clear() { capacity_ = 0; - available_ = 0; size_ = 0; waste_ = 0; } + void ClearSizeWaste() { + size_ = capacity_; + waste_ = 0; + } + // Reset the allocation statistics (ie, available = capacity with no // wasted or allocated bytes). void Reset() { - available_ = capacity_; size_ = 0; waste_ = 0; } // Accessors for the allocation statistics. intptr_t Capacity() { return capacity_; } - intptr_t Available() { return available_; } intptr_t Size() { return size_; } intptr_t Waste() { return waste_; } - // Grow the space by adding available bytes. + // Grow the space by adding available bytes. They are initially marked as + // being in use (part of the size), but will normally be immediately freed, + // putting them on the free list and removing them from size_. void ExpandSpace(int size_in_bytes) { capacity_ += size_in_bytes; - available_ += size_in_bytes; + size_ += size_in_bytes; + ASSERT(size_ >= 0); } - // Shrink the space by removing available bytes. + // Shrink the space by removing available bytes. Since shrinking is done + // during sweeping, bytes have been marked as being in use (part of the size) + // and are hereby freed. void ShrinkSpace(int size_in_bytes) { capacity_ -= size_in_bytes; - available_ -= size_in_bytes; + size_ -= size_in_bytes; + ASSERT(size_ >= 0); } // Allocate from available bytes (available -> size). void AllocateBytes(intptr_t size_in_bytes) { - available_ -= size_in_bytes; size_ += size_in_bytes; + ASSERT(size_ >= 0); } // Free allocated bytes, making them available (size -> available). void DeallocateBytes(intptr_t size_in_bytes) { size_ -= size_in_bytes; - available_ += size_in_bytes; + ASSERT(size_ >= 0); } // Waste free bytes (available -> waste). void WasteBytes(int size_in_bytes) { - available_ -= size_in_bytes; + size_ -= size_in_bytes; waste_ += size_in_bytes; - } - - // Consider the wasted bytes to be allocated, as they contain filler - // objects (waste -> size). - void FillWastedBytes(intptr_t size_in_bytes) { - waste_ -= size_in_bytes; - size_ += size_in_bytes; + ASSERT(size_ >= 0); } private: intptr_t capacity_; - intptr_t available_; intptr_t size_; intptr_t waste_; }; +// ----------------------------------------------------------------------------- +// Free lists for old object spaces +// +// Free-list nodes are free blocks in the heap. They look like heap objects +// (free-list node pointers have the heap object tag, and they have a map like +// a heap object). They have a size and a next pointer. The next pointer is +// the raw address of the next free list node (or NULL). +class FreeListNode: public HeapObject { + public: + // Obtain a free-list node from a raw address. This is not a cast because + // it does not check nor require that the first word at the address is a map + // pointer. + static FreeListNode* FromAddress(Address address) { + return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); + } + + static inline bool IsFreeListNode(HeapObject* object); + + // Set the size in bytes, which can be read with HeapObject::Size(). This + // function also writes a map to the first word of the block so that it + // looks like a heap object to the garbage collector and heap iteration + // functions. + void set_size(Heap* heap, int size_in_bytes); + + // Accessors for the next field. + inline FreeListNode* next(); + inline FreeListNode** next_address(); + inline void set_next(FreeListNode* next); + + inline void Zap(); + + private: + static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize); + + DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); +}; + + +// The free list for the old space. The free list is organized in such a way +// as to encourage objects allocated around the same time to be near each +// other. The normal way to allocate is intended to be by bumping a 'top' +// pointer until it hits a 'limit' pointer. When the limit is hit we need to +// find a new space to allocate from. This is done with the free list, which +// is divided up into rough categories to cut down on waste. Having finer +// categories would scatter allocation more. + +// The old space free list is organized in categories. +// 1-31 words: Such small free areas are discarded for efficiency reasons. +// They can be reclaimed by the compactor. However the distance between top +// and limit may be this small. +// 32-255 words: There is a list of spaces this large. It is used for top and +// limit when the object we need to allocate is 1-31 words in size. These +// spaces are called small. +// 256-2047 words: There is a list of spaces this large. It is used for top and +// limit when the object we need to allocate is 32-255 words in size. These +// spaces are called medium. +// 1048-16383 words: There is a list of spaces this large. It is used for top +// and limit when the object we need to allocate is 256-2047 words in size. +// These spaces are call large. +// At least 16384 words. This list is for objects of 2048 words or larger. +// Empty pages are added to this list. These spaces are called huge. +class FreeList BASE_EMBEDDED { + public: + explicit FreeList(PagedSpace* owner); + + // Clear the free list. + void Reset(); + + // Return the number of bytes available on the free list. + intptr_t available() { return available_; } + + // Place a node on the free list. The block of size 'size_in_bytes' + // starting at 'start' is placed on the free list. The return value is the + // number of bytes that have been lost due to internal fragmentation by + // freeing the block. Bookkeeping information will be written to the block, + // ie, its contents will be destroyed. The start address should be word + // aligned, and the size should be a non-zero multiple of the word size. + int Free(Address start, int size_in_bytes); + + // Allocate a block of size 'size_in_bytes' from the free list. The block + // is unitialized. A failure is returned if no block is available. The + // number of bytes lost to fragmentation is returned in the output parameter + // 'wasted_bytes'. The size should be a non-zero multiple of the word size. + MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); + + void MarkNodes(); + +#ifdef DEBUG + void Zap(); + static intptr_t SumFreeList(FreeListNode* node); + static int FreeListLength(FreeListNode* cur); + intptr_t SumFreeLists(); + bool IsVeryLong(); +#endif + + void CountFreeListItems(Page* p, intptr_t* sizes); + + private: + // The size range of blocks, in bytes. + static const int kMinBlockSize = 3 * kPointerSize; + static const int kMaxBlockSize = Page::kMaxHeapObjectSize; + + FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size); + + FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); + + PagedSpace* owner_; + Heap* heap_; + + // Total available bytes in all blocks on this free list. + int available_; + + static const int kSmallListMin = 0x20 * kPointerSize; + static const int kSmallListMax = 0xff * kPointerSize; + static const int kMediumListMax = 0x7ff * kPointerSize; + static const int kLargeListMax = 0x3fff * kPointerSize; + static const int kSmallAllocationMax = kSmallListMin - kPointerSize; + static const int kMediumAllocationMax = kSmallListMax; + static const int kLargeAllocationMax = kMediumListMax; + FreeListNode* small_list_; + FreeListNode* medium_list_; + FreeListNode* large_list_; + FreeListNode* huge_list_; + + DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); +}; + + class PagedSpace : public Space { public: // Creates a space with a maximum capacity, and an id. @@ -1013,7 +1412,7 @@ class PagedSpace : public Space { // the memory allocator's initial chunk) if possible. If the block of // addresses is not big enough to contain a single page-aligned page, a // fresh chunk will be allocated. - bool Setup(Address start, size_t size); + bool Setup(); // Returns true if the space has been successfully set up and not // subsequently torn down. @@ -1026,8 +1425,6 @@ class PagedSpace : public Space { // Checks whether an object/address is in this space. inline bool Contains(Address a); bool Contains(HeapObject* o) { return Contains(o->address()); } - // Never crashes even if a is not a valid pointer. - inline bool SafeContains(Address a); // Given an address occupied by a live object, return that object if it is // in this space, or Failure::Exception() if it is not. The implementation @@ -1035,104 +1432,91 @@ class PagedSpace : public Space { // linear in the number of objects in the page. It may be slow. MUST_USE_RESULT MaybeObject* FindObject(Address addr); - // Checks whether page is currently in use by this space. - bool IsUsed(Page* page); - - void MarkAllPagesClean(); - // Prepares for a mark-compact GC. - virtual void PrepareForMarkCompact(bool will_compact); + virtual void PrepareForMarkCompact(); - // The top of allocation in a page in this space. Undefined if page is unused. - Address PageAllocationTop(Page* page) { - return page == TopPageOf(allocation_info_) ? top() - : PageAllocationLimit(page); - } - - // The limit of allocation for a page in this space. - virtual Address PageAllocationLimit(Page* page) = 0; - - void FlushTopPageWatermark() { - AllocationTopPage()->SetCachedAllocationWatermark(top()); - AllocationTopPage()->InvalidateWatermark(true); - } - - // Current capacity without growing (Size() + Available() + Waste()). + // Current capacity without growing (Size() + Available()). intptr_t Capacity() { return accounting_stats_.Capacity(); } // Total amount of memory committed for this space. For paged // spaces this equals the capacity. intptr_t CommittedMemory() { return Capacity(); } - // Available bytes without growing. - intptr_t Available() { return accounting_stats_.Available(); } + // Sets the capacity, the available space and the wasted space to zero. + // The stats are rebuilt during sweeping by adding each page to the + // capacity and the size when it is encountered. As free spaces are + // discovered during the sweeping they are subtracted from the size and added + // to the available and wasted totals. + void ClearStats() { + accounting_stats_.ClearSizeWaste(); + } + + // Available bytes without growing. These are the bytes on the free list. + // The bytes in the linear allocation area are not included in this total + // because updating the stats would slow down allocation. New pages are + // immediately added to the free list so they show up here. + intptr_t Available() { return free_list_.available(); } - // Allocated bytes in this space. + // Allocated bytes in this space. Garbage bytes that were not found due to + // lazy sweeping are counted as being allocated! The bytes in the current + // linear allocation area (between top and limit) are also counted here. virtual intptr_t Size() { return accounting_stats_.Size(); } - // Wasted bytes due to fragmentation and not recoverable until the - // next GC of this space. - intptr_t Waste() { return accounting_stats_.Waste(); } + // As size, but the bytes in the current linear allocation area are not + // included. + virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); } - // Returns the address of the first object in this space. - Address bottom() { return first_page_->ObjectAreaStart(); } + // Wasted bytes in this space. These are just the bytes that were thrown away + // due to being too small to use for allocation. They do not include the + // free bytes that were not found at all due to lazy sweeping. + virtual intptr_t Waste() { return accounting_stats_.Waste(); } // Returns the allocation pointer in this space. - Address top() { return allocation_info_.top; } + Address top() { + return allocation_info_.top; + } + Address limit() { return allocation_info_.limit; } // Allocate the requested number of bytes in the space if possible, return a // failure object if not. MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); - // Allocate the requested number of bytes for relocation during mark-compact - // collection. - MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes); - virtual bool ReserveSpace(int bytes); - // Used by ReserveSpace. - virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0; - - // Free all pages in range from prev (exclusive) to last (inclusive). - // Freed pages are moved to the end of page list. - void FreePages(Page* prev, Page* last); - - // Deallocates a block. - virtual void DeallocateBlock(Address start, - int size_in_bytes, - bool add_to_freelist) = 0; + // Give a block of memory to the space's free list. It might be added to + // the free list or accounted as waste. + // If add_to_freelist is false then just accounting stats are updated and + // no attempt to add area to free list is made. + int Free(Address start, int size_in_bytes) { + int wasted = free_list_.Free(start, size_in_bytes); + accounting_stats_.DeallocateBytes(size_in_bytes - wasted); + return size_in_bytes - wasted; + } // Set space allocation info. - void SetTop(Address top) { + void SetTop(Address top, Address limit) { + ASSERT(top == limit || + Page::FromAddress(top) == Page::FromAddress(limit - 1)); allocation_info_.top = top; - allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top)); + allocation_info_.limit = limit; } - // --------------------------------------------------------------------------- - // Mark-compact collection support functions - - // Set the relocation point to the beginning of the space. - void MCResetRelocationInfo(); - - // Writes relocation info to the top page. - void MCWriteRelocationInfoToPage() { - TopPageOf(mc_forwarding_info_)-> - SetAllocationWatermark(mc_forwarding_info_.top); + void Allocate(int bytes) { + accounting_stats_.AllocateBytes(bytes); } - // Computes the offset of a given address in this space to the beginning - // of the space. - int MCSpaceOffsetForAddress(Address addr); + void IncreaseCapacity(int size) { + accounting_stats_.ExpandSpace(size); + } - // Updates the allocation pointer to the relocation top after a mark-compact - // collection. - virtual void MCCommitRelocationInfo() = 0; + // Releases an unused page and shrinks the space. + void ReleasePage(Page* page); - // Releases half of unused pages. - void Shrink(); + // Releases all of the unused pages. + void ReleaseAllUnusedPages(); - // Ensures that the capacity is at least 'capacity'. Returns false on failure. - bool EnsureCapacity(int capacity); + // The dummy page that anchors the linked list of pages. + Page* anchor() { return &anchor_; } #ifdef DEBUG // Print meta info and objects in this space. @@ -1141,6 +1525,9 @@ class PagedSpace : public Space { // Verify integrity of this space. virtual void Verify(ObjectVisitor* visitor); + // Reports statistics for the space + void ReportStatistics(); + // Overridden by subclasses to verify space-specific object // properties (e.g., only maps or free-list nodes are in map space). virtual void VerifyObject(HeapObject* obj) {} @@ -1151,10 +1538,67 @@ class PagedSpace : public Space { static void ResetCodeStatistics(); #endif - // Returns the page of the allocation pointer. - Page* AllocationTopPage() { return TopPageOf(allocation_info_); } + bool was_swept_conservatively() { return was_swept_conservatively_; } + void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; } + + // Evacuation candidates are swept by evacuator. Needs to return a valid + // result before _and_ after evacuation has finished. + static bool ShouldBeSweptLazily(Page* p) { + return !p->IsEvacuationCandidate() && + !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && + !p->WasSweptPrecisely(); + } + + void SetPagesToSweep(Page* first, Page* last) { + first_unswept_page_ = first; + last_unswept_page_ = last; + } + + bool AdvanceSweeper(intptr_t bytes_to_sweep); + + bool IsSweepingComplete() { + return !first_unswept_page_->is_valid(); + } + + Page* FirstPage() { return anchor_.next_page(); } + Page* LastPage() { return anchor_.prev_page(); } + + bool IsFragmented(Page* p) { + intptr_t sizes[4]; + free_list_.CountFreeListItems(p, sizes); + + intptr_t ratio; + intptr_t ratio_threshold; + if (identity() == CODE_SPACE) { + ratio = (sizes[1] * 10 + sizes[2] * 2) * 100 / Page::kObjectAreaSize; + ratio_threshold = 10; + } else { + ratio = (sizes[0] * 5 + sizes[1]) * 100 / Page::kObjectAreaSize; + ratio_threshold = 15; + } + + if (FLAG_trace_fragmentation) { + PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", + reinterpret_cast<void*>(p), + identity(), + static_cast<int>(sizes[0]), + static_cast<double>(sizes[0] * 100) / Page::kObjectAreaSize, + static_cast<int>(sizes[1]), + static_cast<double>(sizes[1] * 100) / Page::kObjectAreaSize, + static_cast<int>(sizes[2]), + static_cast<double>(sizes[2] * 100) / Page::kObjectAreaSize, + static_cast<int>(sizes[3]), + static_cast<double>(sizes[3] * 100) / Page::kObjectAreaSize, + (ratio > ratio_threshold) ? "[fragmented]" : ""); + } - void RelinkPageListInChunkOrder(bool deallocate_blocks); + return (ratio > ratio_threshold) || + (FLAG_always_compact && sizes[3] != Page::kObjectAreaSize); + } + + void EvictEvacuationCandidatesFromFreeLists(); + + bool CanExpand(); protected: // Maximum capacity of this space. @@ -1163,80 +1607,42 @@ class PagedSpace : public Space { // Accounting information for this space. AllocationStats accounting_stats_; - // The first page in this space. - Page* first_page_; - - // The last page in this space. Initially set in Setup, updated in - // Expand and Shrink. - Page* last_page_; + // The dummy page that anchors the double linked list of pages. + Page anchor_; - // True if pages owned by this space are linked in chunk-order. - // See comment for class MemoryAllocator for definition of chunk-order. - bool page_list_is_chunk_ordered_; + // The space's free list. + FreeList free_list_; // Normal allocation information. AllocationInfo allocation_info_; - // Relocation information during mark-compact collections. - AllocationInfo mc_forwarding_info_; - // Bytes of each page that cannot be allocated. Possibly non-zero // for pages in spaces with only fixed-size objects. Always zero // for pages in spaces with variable sized objects (those pages are // padded with free-list nodes). int page_extra_; - // Sets allocation pointer to a page bottom. - static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p); + bool was_swept_conservatively_; - // Returns the top page specified by an allocation info structure. - static Page* TopPageOf(AllocationInfo alloc_info) { - return Page::FromAllocationTop(alloc_info.limit); - } - - int CountPagesToTop() { - Page* p = Page::FromAllocationTop(allocation_info_.top); - PageIterator it(this, PageIterator::ALL_PAGES); - int counter = 1; - while (it.has_next()) { - if (it.next() == p) return counter; - counter++; - } - UNREACHABLE(); - return -1; - } + Page* first_unswept_page_; + Page* last_unswept_page_; // Expands the space by allocating a fixed number of pages. Returns false if - // it cannot allocate requested number of pages from OS. Newly allocated - // pages are append to the last_page; - bool Expand(Page* last_page); - - // Generic fast case allocation function that tries linear allocation in - // the top page of 'alloc_info'. Returns NULL on failure. - inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info, - int size_in_bytes); + // it cannot allocate requested number of pages from OS. + bool Expand(); - // During normal allocation or deserialization, roll to the next page in - // the space (there is assumed to be one) and allocate there. This - // function is space-dependent. - virtual HeapObject* AllocateInNextPage(Page* current_page, - int size_in_bytes) = 0; + // Generic fast case allocation function that tries linear allocation at the + // address denoted by top in allocation_info_. + inline HeapObject* AllocateLinearly(int size_in_bytes); // Slow path of AllocateRaw. This function is space-dependent. - MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0; - - // Slow path of MCAllocateRaw. - MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes); + MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); #ifdef DEBUG // Returns the number of total pages in this space. int CountTotalPages(); #endif - private: - // Returns a pointer to the page of the relocation pointer. - Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); } - friend class PageIterator; }; @@ -1276,20 +1682,113 @@ class HistogramInfo: public NumberAndSizeInfo { }; +enum SemiSpaceId { + kFromSpace = 0, + kToSpace = 1 +}; + + +class SemiSpace; + + +class NewSpacePage : public MemoryChunk { + public: + // GC related flags copied from from-space to to-space when + // flipping semispaces. + static const intptr_t kCopyOnFlipFlagsMask = + (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | + (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | + (1 << MemoryChunk::SCAN_ON_SCAVENGE); + + inline NewSpacePage* next_page() const { + return static_cast<NewSpacePage*>(next_chunk()); + } + + inline void set_next_page(NewSpacePage* page) { + set_next_chunk(page); + } + + inline NewSpacePage* prev_page() const { + return static_cast<NewSpacePage*>(prev_chunk()); + } + + inline void set_prev_page(NewSpacePage* page) { + set_prev_chunk(page); + } + + SemiSpace* semi_space() { + return reinterpret_cast<SemiSpace*>(owner()); + } + + bool is_anchor() { return !this->InNewSpace(); } + + static bool IsAtStart(Address addr) { + return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) + == kObjectStartOffset; + } + + static bool IsAtEnd(Address addr) { + return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0; + } + + Address address() { + return reinterpret_cast<Address>(this); + } + + // Finds the NewSpacePage containg the given address. + static inline NewSpacePage* FromAddress(Address address_in_page) { + Address page_start = + reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) & + ~Page::kPageAlignmentMask); + NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start); + ASSERT(page->InNewSpace()); + return page; + } + + // Find the page for a limit address. A limit address is either an address + // inside a page, or the address right after the last byte of a page. + static inline NewSpacePage* FromLimit(Address address_limit) { + return NewSpacePage::FromAddress(address_limit - 1); + } + + private: + // Create a NewSpacePage object that is only used as anchor + // for the doubly-linked list of real pages. + explicit NewSpacePage(SemiSpace* owner) { + InitializeAsAnchor(owner); + } + + static NewSpacePage* Initialize(Heap* heap, + Address start, + SemiSpace* semi_space); + + // Intialize a fake NewSpacePage used as sentinel at the ends + // of a doubly-linked list of real NewSpacePages. + // Only uses the prev/next links, and sets flags to not be in new-space. + void InitializeAsAnchor(SemiSpace* owner); + + friend class SemiSpace; + friend class SemiSpaceIterator; +}; + + // ----------------------------------------------------------------------------- // SemiSpace in young generation // -// A semispace is a contiguous chunk of memory. The mark-compact collector -// uses the memory in the from space as a marking stack when tracing live -// objects. +// A semispace is a contiguous chunk of memory holding page-like memory +// chunks. The mark-compact collector uses the memory of the first page in +// the from space as a marking stack when tracing live objects. class SemiSpace : public Space { public: // Constructor. - explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) { - start_ = NULL; - age_mark_ = NULL; - } + SemiSpace(Heap* heap, SemiSpaceId semispace) + : Space(heap, NEW_SPACE, NOT_EXECUTABLE), + start_(NULL), + age_mark_(NULL), + id_(semispace), + anchor_(this), + current_page_(NULL) { } // Sets up the semispace using the given chunk. bool Setup(Address start, int initial_capacity, int maximum_capacity); @@ -1301,14 +1800,9 @@ class SemiSpace : public Space { // True if the space has been set up but not torn down. bool HasBeenSetup() { return start_ != NULL; } - // Grow the size of the semispace by committing extra virtual memory. - // Assumes that the caller has checked that the semispace has not reached - // its maximum capacity (and thus there is space available in the reserved - // address range to grow). - bool Grow(); - // Grow the semispace to the new capacity. The new capacity - // requested must be larger than the current capacity. + // requested must be larger than the current capacity and less than + // the maximum capacity. bool GrowTo(int new_capacity); // Shrinks the semispace to the new capacity. The new capacity @@ -1316,14 +1810,41 @@ class SemiSpace : public Space { // semispace and less than the current capacity. bool ShrinkTo(int new_capacity); - // Returns the start address of the space. - Address low() { return start_; } + // Returns the start address of the first page of the space. + Address space_start() { + ASSERT(anchor_.next_page() != &anchor_); + return anchor_.next_page()->body(); + } + + // Returns the start address of the current page of the space. + Address page_low() { + ASSERT(anchor_.next_page() != &anchor_); + return current_page_->body(); + } + // Returns one past the end address of the space. - Address high() { return low() + capacity_; } + Address space_end() { + return anchor_.prev_page()->body_limit(); + } + + // Returns one past the end address of the current page of the space. + Address page_high() { + return current_page_->body_limit(); + } + + bool AdvancePage() { + NewSpacePage* next_page = current_page_->next_page(); + if (next_page == anchor()) return false; + current_page_ = next_page; + return true; + } + + // Resets the space to using the first page. + void Reset(); // Age mark accessors. Address age_mark() { return age_mark_; } - void set_age_mark(Address mark) { age_mark_ = mark; } + void set_age_mark(Address mark); // True if the address is in the address range of this semispace (not // necessarily below the allocation pointer). @@ -1338,11 +1859,6 @@ class SemiSpace : public Space { return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; } - // The offset of an address from the beginning of the space. - int SpaceOffsetForAddress(Address addr) { - return static_cast<int>(addr - low()); - } - // If we don't have these here then SemiSpace will be abstract. However // they should never be called. virtual intptr_t Size() { @@ -1359,9 +1875,19 @@ class SemiSpace : public Space { bool Commit(); bool Uncommit(); + NewSpacePage* first_page() { return anchor_.next_page(); } + NewSpacePage* current_page() { return current_page_; } + #ifdef DEBUG virtual void Print(); virtual void Verify(); + // Validate a range of of addresses in a SemiSpace. + // The "from" address must be on a page prior to the "to" address, + // in the linked page order, or it must be earlier on the same page. + static void AssertValidRange(Address from, Address to); +#else + // Do nothing. + inline static void AssertValidRange(Address from, Address to) {} #endif // Returns the current capacity of the semi space. @@ -1373,7 +1899,17 @@ class SemiSpace : public Space { // Returns the initial capacity of the semi space. int InitialCapacity() { return initial_capacity_; } + SemiSpaceId id() { return id_; } + + static void Swap(SemiSpace* from, SemiSpace* to); + private: + // Flips the semispace between being from-space and to-space. + // Copies the flags into the masked positions on all pages in the space. + void FlipPages(intptr_t flags, intptr_t flag_mask); + + NewSpacePage* anchor() { return &anchor_; } + // The current and maximum capacity of the space. int capacity_; int maximum_capacity_; @@ -1390,7 +1926,13 @@ class SemiSpace : public Space { uintptr_t object_expected_; bool committed_; + SemiSpaceId id_; + NewSpacePage anchor_; + NewSpacePage* current_page_; + + friend class SemiSpaceIterator; + friend class NewSpacePageIterator; public: TRACK_MEMORY("SemiSpace") }; @@ -1406,12 +1948,26 @@ class SemiSpaceIterator : public ObjectIterator { // Create an iterator over the objects in the given space. If no start // address is given, the iterator starts from the bottom of the space. If // no size function is given, the iterator calls Object::Size(). + + // Iterate over all of allocated to-space. explicit SemiSpaceIterator(NewSpace* space); + // Iterate over all of allocated to-space, with a custome size function. SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func); + // Iterate over part of allocated to-space, from start to the end + // of allocation. SemiSpaceIterator(NewSpace* space, Address start); + // Iterate from one address to another in the same semi-space. + SemiSpaceIterator(Address from, Address to); - HeapObject* next() { + HeapObject* Next() { if (current_ == limit_) return NULL; + if (NewSpacePage::IsAtEnd(current_)) { + NewSpacePage* page = NewSpacePage::FromLimit(current_); + page = page->next_page(); + ASSERT(!page->is_anchor()); + current_ = page->body(); + if (current_ == limit_) return NULL; + } HeapObject* object = HeapObject::FromAddress(current_); int size = (size_func_ == NULL) ? object->Size() : size_func_(object); @@ -1421,14 +1977,13 @@ class SemiSpaceIterator : public ObjectIterator { } // Implementation of the ObjectIterator functions. - virtual HeapObject* next_object() { return next(); } + virtual HeapObject* next_object() { return Next(); } private: - void Initialize(NewSpace* space, Address start, Address end, + void Initialize(Address start, + Address end, HeapObjectCallback size_func); - // The semispace. - SemiSpace* space_; // The current iteration point. Address current_; // The end of iteration. @@ -1439,6 +1994,34 @@ class SemiSpaceIterator : public ObjectIterator { // ----------------------------------------------------------------------------- +// A PageIterator iterates the pages in a semi-space. +class NewSpacePageIterator BASE_EMBEDDED { + public: + // Make an iterator that runs over all pages in to-space. + explicit inline NewSpacePageIterator(NewSpace* space); + + // Make an iterator that runs over all pages in the given semispace, + // even those not used in allocation. + explicit inline NewSpacePageIterator(SemiSpace* space); + + // Make iterator that iterates from the page containing start + // to the page that contains limit in the same semispace. + inline NewSpacePageIterator(Address start, Address limit); + + inline bool has_next(); + inline NewSpacePage* next(); + + private: + NewSpacePage* prev_page_; // Previous page returned. + // Next page that will be returned. Cached here so that we can use this + // iterator for operations that deallocate pages. + NewSpacePage* next_page_; + // Last page returned. + NewSpacePage* last_page_; +}; + + +// ----------------------------------------------------------------------------- // The young generation space. // // The new space consists of a contiguous pair of semispaces. It simply @@ -1449,11 +2032,13 @@ class NewSpace : public Space { // Constructor. explicit NewSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE), - to_space_(heap), - from_space_(heap) {} + to_space_(heap, kToSpace), + from_space_(heap, kFromSpace), + reservation_(), + inline_allocation_limit_step_(0) {} // Sets up the new space using the given chunk. - bool Setup(Address start, int size); + bool Setup(int reserved_semispace_size_, int max_semispace_size); // Tears down the space. Heap memory was not allocated by the space, so it // is not deallocated here. @@ -1480,18 +2065,30 @@ class NewSpace : public Space { return (reinterpret_cast<uintptr_t>(a) & address_mask_) == reinterpret_cast<uintptr_t>(start_); } + bool Contains(Object* o) { - return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; + Address a = reinterpret_cast<Address>(o); + return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_; } // Return the allocated bytes in the active semispace. - virtual intptr_t Size() { return static_cast<int>(top() - bottom()); } + virtual intptr_t Size() { + return pages_used_ * Page::kObjectAreaSize + + static_cast<int>(top() - to_space_.page_low()); + } + // The same, but returning an int. We have to have the one that returns // intptr_t because it is inherited, but if we know we are dealing with the // new space, which can't get as big as the other spaces then this is useful: int SizeAsInt() { return static_cast<int>(Size()); } // Return the current capacity of a semispace. + intptr_t EffectiveCapacity() { + ASSERT(to_space_.Capacity() == from_space_.Capacity()); + return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize; + } + + // Return the current capacity of a semispace. intptr_t Capacity() { ASSERT(to_space_.Capacity() == from_space_.Capacity()); return to_space_.Capacity(); @@ -1503,8 +2100,11 @@ class NewSpace : public Space { return Capacity(); } - // Return the available bytes without growing in the active semispace. - intptr_t Available() { return Capacity() - Size(); } + // Return the available bytes without growing or switching page in the + // active semispace. + intptr_t Available() { + return allocation_info_.limit - allocation_info_.top; + } // Return the maximum capacity of a semispace. int MaximumCapacity() { @@ -1519,9 +2119,12 @@ class NewSpace : public Space { } // Return the address of the allocation pointer in the active semispace. - Address top() { return allocation_info_.top; } + Address top() { + ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top)); + return allocation_info_.top; + } // Return the address of the first object in the active semispace. - Address bottom() { return to_space_.low(); } + Address bottom() { return to_space_.space_start(); } // Get the age mark of the inactive semispace. Address age_mark() { return from_space_.age_mark(); } @@ -1533,54 +2136,70 @@ class NewSpace : public Space { Address start() { return start_; } uintptr_t mask() { return address_mask_; } + INLINE(uint32_t AddressToMarkbitIndex(Address addr)) { + ASSERT(Contains(addr)); + ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) || + IsAligned(OffsetFrom(addr) - 1, kPointerSize)); + return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2; + } + + INLINE(Address MarkbitIndexToAddress(uint32_t index)) { + return reinterpret_cast<Address>(index << kPointerSizeLog2); + } + // The allocation top and limit addresses. Address* allocation_top_address() { return &allocation_info_.top; } Address* allocation_limit_address() { return &allocation_info_.limit; } MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) { - return AllocateRawInternal(size_in_bytes, &allocation_info_); - } - - // Allocate the requested number of bytes for relocation during mark-compact - // collection. - MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) { - return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_); + return AllocateRawInternal(size_in_bytes); } // Reset the allocation pointer to the beginning of the active semispace. void ResetAllocationInfo(); - // Reset the reloction pointer to the bottom of the inactive semispace in - // preparation for mark-compact collection. - void MCResetRelocationInfo(); - // Update the allocation pointer in the active semispace after a - // mark-compact collection. - void MCCommitRelocationInfo(); - // Get the extent of the inactive semispace (for use as a marking stack). - Address FromSpaceLow() { return from_space_.low(); } - Address FromSpaceHigh() { return from_space_.high(); } + void LowerInlineAllocationLimit(intptr_t step) { + inline_allocation_limit_step_ = step; + if (step == 0) { + allocation_info_.limit = to_space_.page_high(); + } else { + allocation_info_.limit = Min( + allocation_info_.top + inline_allocation_limit_step_, + allocation_info_.limit); + } + top_on_previous_step_ = allocation_info_.top; + } + + // Get the extent of the inactive semispace (for use as a marking stack, + // or to zap it). Notice: space-addresses are not necessarily on the + // same page, so FromSpaceStart() might be above FromSpaceEnd(). + Address FromSpacePageLow() { return from_space_.page_low(); } + Address FromSpacePageHigh() { return from_space_.page_high(); } + Address FromSpaceStart() { return from_space_.space_start(); } + Address FromSpaceEnd() { return from_space_.space_end(); } - // Get the extent of the active semispace (to sweep newly copied objects - // during a scavenge collection). - Address ToSpaceLow() { return to_space_.low(); } - Address ToSpaceHigh() { return to_space_.high(); } + // Get the extent of the active semispace's pages' memory. + Address ToSpaceStart() { return to_space_.space_start(); } + Address ToSpaceEnd() { return to_space_.space_end(); } - // Offsets from the beginning of the semispaces. - int ToSpaceOffsetForAddress(Address a) { - return to_space_.SpaceOffsetForAddress(a); + inline bool ToSpaceContains(Address address) { + return to_space_.Contains(address); } - int FromSpaceOffsetForAddress(Address a) { - return from_space_.SpaceOffsetForAddress(a); + inline bool FromSpaceContains(Address address) { + return from_space_.Contains(address); } // True if the object is a heap object in the address range of the // respective semispace (not necessarily below the allocation pointer of the // semispace). - bool ToSpaceContains(Object* o) { return to_space_.Contains(o); } - bool FromSpaceContains(Object* o) { return from_space_.Contains(o); } + inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); } + inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); } - bool ToSpaceContains(Address a) { return to_space_.Contains(a); } - bool FromSpaceContains(Address a) { return from_space_.Contains(a); } + // Try to switch the active semispace to a new, empty, page. + // Returns false if this isn't possible or reasonable (i.e., there + // are no pages, or the current page is already empty), or true + // if successful. + bool AddFreshPage(); virtual bool ReserveSpace(int bytes); @@ -1620,10 +2239,24 @@ class NewSpace : public Space { return from_space_.Uncommit(); } + inline intptr_t inline_allocation_limit_step() { + return inline_allocation_limit_step_; + } + + SemiSpace* active_space() { return &to_space_; } + private: + // Update allocation info to match the current to-space page. + void UpdateAllocationInfo(); + + Address chunk_base_; + uintptr_t chunk_size_; + // The semispaces. SemiSpace to_space_; SemiSpace from_space_; + VirtualMemory reservation_; + int pages_used_; // Start address and bit mask for containment testing. Address start_; @@ -1634,15 +2267,20 @@ class NewSpace : public Space { // Allocation pointer and limit for normal allocation and allocation during // mark-compact collection. AllocationInfo allocation_info_; - AllocationInfo mc_forwarding_info_; + + // When incremental marking is active we will set allocation_info_.limit + // to be lower than actual limit and then will gradually increase it + // in steps to guarantee that we do incremental marking steps even + // when all allocation is performed from inlined generated code. + intptr_t inline_allocation_limit_step_; + + Address top_on_previous_step_; HistogramInfo* allocated_histogram_; HistogramInfo* promoted_histogram_; - // Implementation of AllocateRaw and MCAllocateRaw. - MUST_USE_RESULT inline MaybeObject* AllocateRawInternal( - int size_in_bytes, - AllocationInfo* alloc_info); + // Implementation of AllocateRaw. + MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(int size_in_bytes); friend class SemiSpaceIterator; @@ -1652,193 +2290,6 @@ class NewSpace : public Space { // ----------------------------------------------------------------------------- -// Free lists for old object spaces -// -// Free-list nodes are free blocks in the heap. They look like heap objects -// (free-list node pointers have the heap object tag, and they have a map like -// a heap object). They have a size and a next pointer. The next pointer is -// the raw address of the next free list node (or NULL). -class FreeListNode: public HeapObject { - public: - // Obtain a free-list node from a raw address. This is not a cast because - // it does not check nor require that the first word at the address is a map - // pointer. - static FreeListNode* FromAddress(Address address) { - return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); - } - - static inline bool IsFreeListNode(HeapObject* object); - - // Set the size in bytes, which can be read with HeapObject::Size(). This - // function also writes a map to the first word of the block so that it - // looks like a heap object to the garbage collector and heap iteration - // functions. - void set_size(Heap* heap, int size_in_bytes); - - // Accessors for the next field. - inline Address next(Heap* heap); - inline void set_next(Heap* heap, Address next); - - private: - static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize); - - DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); -}; - - -// The free list for the old space. -class OldSpaceFreeList BASE_EMBEDDED { - public: - OldSpaceFreeList(Heap* heap, AllocationSpace owner); - - // Clear the free list. - void Reset(); - - // Return the number of bytes available on the free list. - intptr_t available() { return available_; } - - // Place a node on the free list. The block of size 'size_in_bytes' - // starting at 'start' is placed on the free list. The return value is the - // number of bytes that have been lost due to internal fragmentation by - // freeing the block. Bookkeeping information will be written to the block, - // ie, its contents will be destroyed. The start address should be word - // aligned, and the size should be a non-zero multiple of the word size. - int Free(Address start, int size_in_bytes); - - // Allocate a block of size 'size_in_bytes' from the free list. The block - // is unitialized. A failure is returned if no block is available. The - // number of bytes lost to fragmentation is returned in the output parameter - // 'wasted_bytes'. The size should be a non-zero multiple of the word size. - MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes); - - void MarkNodes(); - - private: - // The size range of blocks, in bytes. (Smaller allocations are allowed, but - // will always result in waste.) - static const int kMinBlockSize = 2 * kPointerSize; - static const int kMaxBlockSize = Page::kMaxHeapObjectSize; - - Heap* heap_; - - // The identity of the owning space, for building allocation Failure - // objects. - AllocationSpace owner_; - - // Total available bytes in all blocks on this free list. - int available_; - - // Blocks are put on exact free lists in an array, indexed by size in words. - // The available sizes are kept in an increasingly ordered list. Entries - // corresponding to sizes < kMinBlockSize always have an empty free list - // (but index kHead is used for the head of the size list). - struct SizeNode { - // Address of the head FreeListNode of the implied block size or NULL. - Address head_node_; - // Size (words) of the next larger available size if head_node_ != NULL. - int next_size_; - }; - static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1; - SizeNode free_[kFreeListsLength]; - - // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[. - static const int kHead = kMinBlockSize / kPointerSize - 1; - static const int kEnd = kMaxInt; - - // We keep a "finger" in the size list to speed up a common pattern: - // repeated requests for the same or increasing sizes. - int finger_; - - // Starting from *prev, find and return the smallest size >= index (words), - // or kEnd. Update *prev to be the largest size < index, or kHead. - int FindSize(int index, int* prev) { - int cur = free_[*prev].next_size_; - while (cur < index) { - *prev = cur; - cur = free_[cur].next_size_; - } - return cur; - } - - // Remove an existing element from the size list. - void RemoveSize(int index) { - int prev = kHead; - int cur = FindSize(index, &prev); - ASSERT(cur == index); - free_[prev].next_size_ = free_[cur].next_size_; - finger_ = prev; - } - - // Insert a new element into the size list. - void InsertSize(int index) { - int prev = kHead; - int cur = FindSize(index, &prev); - ASSERT(cur != index); - free_[prev].next_size_ = index; - free_[index].next_size_ = cur; - } - - // The size list is not updated during a sequence of calls to Free, but is - // rebuilt before the next allocation. - void RebuildSizeList(); - bool needs_rebuild_; - -#ifdef DEBUG - // Does this free list contain a free block located at the address of 'node'? - bool Contains(FreeListNode* node); -#endif - - DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList); -}; - - -// The free list for the map space. -class FixedSizeFreeList BASE_EMBEDDED { - public: - FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size); - - // Clear the free list. - void Reset(); - - // Return the number of bytes available on the free list. - intptr_t available() { return available_; } - - // Place a node on the free list. The block starting at 'start' (assumed to - // have size object_size_) is placed on the free list. Bookkeeping - // information will be written to the block, ie, its contents will be - // destroyed. The start address should be word aligned. - void Free(Address start); - - // Allocate a fixed sized block from the free list. The block is unitialized. - // A failure is returned if no block is available. - MUST_USE_RESULT MaybeObject* Allocate(); - - void MarkNodes(); - - private: - Heap* heap_; - - // Available bytes on the free list. - intptr_t available_; - - // The head of the free list. - Address head_; - - // The tail of the free list. - Address tail_; - - // The identity of the owning space, for building allocation Failure - // objects. - AllocationSpace owner_; - - // The size of the objects in this space. - int object_size_; - - DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList); -}; - - -// ----------------------------------------------------------------------------- // Old object space (excluding map objects) class OldSpace : public PagedSpace { @@ -1849,71 +2300,28 @@ class OldSpace : public PagedSpace { intptr_t max_capacity, AllocationSpace id, Executability executable) - : PagedSpace(heap, max_capacity, id, executable), - free_list_(heap, id) { + : PagedSpace(heap, max_capacity, id, executable) { page_extra_ = 0; } - // The bytes available on the free list (ie, not above the linear allocation - // pointer). - intptr_t AvailableFree() { return free_list_.available(); } - // The limit of allocation for a page in this space. virtual Address PageAllocationLimit(Page* page) { return page->ObjectAreaEnd(); } - // Give a block of memory to the space's free list. It might be added to - // the free list or accounted as waste. - // If add_to_freelist is false then just accounting stats are updated and - // no attempt to add area to free list is made. - void Free(Address start, int size_in_bytes, bool add_to_freelist) { - accounting_stats_.DeallocateBytes(size_in_bytes); - - if (add_to_freelist) { - int wasted_bytes = free_list_.Free(start, size_in_bytes); - accounting_stats_.WasteBytes(wasted_bytes); - } - } - - virtual void DeallocateBlock(Address start, - int size_in_bytes, - bool add_to_freelist); - - // Prepare for full garbage collection. Resets the relocation pointer and - // clears the free list. - virtual void PrepareForMarkCompact(bool will_compact); - - // Updates the allocation pointer to the relocation top after a mark-compact - // collection. - virtual void MCCommitRelocationInfo(); - - virtual void PutRestOfCurrentPageOnFreeList(Page* current_page); - - void MarkFreeListNodes() { free_list_.MarkNodes(); } - -#ifdef DEBUG - // Reports statistics for the space - void ReportStatistics(); -#endif - - protected: - // Virtual function in the superclass. Slow path of AllocateRaw. - MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); - - // Virtual function in the superclass. Allocate linearly at the start of - // the page after current_page (there is assumed to be one). - HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); - - private: - // The space's free list. - OldSpaceFreeList free_list_; - public: TRACK_MEMORY("OldSpace") }; +// For contiguous spaces, top should be in the space (or at the end) and limit +// should be the end of the space. +#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ + ASSERT((space).page_low() <= (info).top \ + && (info).top <= (space).page_high() \ + && (info).limit <= (space).page_high()) + + // ----------------------------------------------------------------------------- // Old space for objects of a fixed size @@ -1926,8 +2334,7 @@ class FixedSpace : public PagedSpace { const char* name) : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), object_size_in_bytes_(object_size_in_bytes), - name_(name), - free_list_(heap, id, object_size_in_bytes) { + name_(name) { page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; } @@ -1938,44 +2345,12 @@ class FixedSpace : public PagedSpace { int object_size_in_bytes() { return object_size_in_bytes_; } - // Give a fixed sized block of memory to the space's free list. - // If add_to_freelist is false then just accounting stats are updated and - // no attempt to add area to free list is made. - void Free(Address start, bool add_to_freelist) { - if (add_to_freelist) { - free_list_.Free(start); - } - accounting_stats_.DeallocateBytes(object_size_in_bytes_); - } - // Prepares for a mark-compact GC. - virtual void PrepareForMarkCompact(bool will_compact); - - // Updates the allocation pointer to the relocation top after a mark-compact - // collection. - virtual void MCCommitRelocationInfo(); - - virtual void PutRestOfCurrentPageOnFreeList(Page* current_page); - - virtual void DeallocateBlock(Address start, - int size_in_bytes, - bool add_to_freelist); + virtual void PrepareForMarkCompact(); void MarkFreeListNodes() { free_list_.MarkNodes(); } -#ifdef DEBUG - // Reports statistic info of the space - void ReportStatistics(); -#endif - protected: - // Virtual function in the superclass. Slow path of AllocateRaw. - MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); - - // Virtual function in the superclass. Allocate linearly at the start of - // the page after current_page (there is assumed to be one). - HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); - void ResetFreeList() { free_list_.Reset(); } @@ -1986,9 +2361,6 @@ class FixedSpace : public PagedSpace { // The name of this space. const char* name_; - - // The space's free list. - FixedSizeFreeList free_list_; }; @@ -2004,83 +2376,18 @@ class MapSpace : public FixedSpace { AllocationSpace id) : FixedSpace(heap, max_capacity, id, Map::kSize, "map"), max_map_space_pages_(max_map_space_pages) { - ASSERT(max_map_space_pages < kMaxMapPageIndex); } - // Prepares for a mark-compact GC. - virtual void PrepareForMarkCompact(bool will_compact); - // Given an index, returns the page address. - Address PageAddress(int page_index) { return page_addresses_[page_index]; } - - static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits; - - // Are map pointers encodable into map word? - bool MapPointersEncodable() { - if (!FLAG_use_big_map_space) { - ASSERT(CountPagesToTop() <= kMaxMapPageIndex); - return true; + // TODO(1600): this limit is artifical just to keep code compilable + static const int kMaxMapPageIndex = 1 << 16; + + virtual int RoundSizeDownToObjectAlignment(int size) { + if (IsPowerOf2(Map::kSize)) { + return RoundDown(size, Map::kSize); + } else { + return (size / Map::kSize) * Map::kSize; } - return CountPagesToTop() <= max_map_space_pages_; - } - - // Should be called after forced sweep to find out if map space needs - // compaction. - bool NeedsCompaction(int live_maps) { - return !MapPointersEncodable() && live_maps <= CompactionThreshold(); - } - - Address TopAfterCompaction(int live_maps) { - ASSERT(NeedsCompaction(live_maps)); - - int pages_left = live_maps / kMapsPerPage; - PageIterator it(this, PageIterator::ALL_PAGES); - while (pages_left-- > 0) { - ASSERT(it.has_next()); - it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); - } - ASSERT(it.has_next()); - Page* top_page = it.next(); - top_page->SetRegionMarks(Page::kAllRegionsCleanMarks); - ASSERT(top_page->is_valid()); - - int offset = live_maps % kMapsPerPage * Map::kSize; - Address top = top_page->ObjectAreaStart() + offset; - ASSERT(top < top_page->ObjectAreaEnd()); - ASSERT(Contains(top)); - - return top; - } - - void FinishCompaction(Address new_top, int live_maps) { - Page* top_page = Page::FromAddress(new_top); - ASSERT(top_page->is_valid()); - - SetAllocationInfo(&allocation_info_, top_page); - allocation_info_.top = new_top; - - int new_size = live_maps * Map::kSize; - accounting_stats_.DeallocateBytes(accounting_stats_.Size()); - accounting_stats_.AllocateBytes(new_size); - - // Flush allocation watermarks. - for (Page* p = first_page_; p != top_page; p = p->next_page()) { - p->SetAllocationWatermark(p->AllocationTop()); - } - top_page->SetAllocationWatermark(new_top); - -#ifdef DEBUG - if (FLAG_enable_slow_asserts) { - intptr_t actual_size = 0; - for (Page* p = first_page_; p != top_page; p = p->next_page()) - actual_size += kMapsPerPage * Map::kSize; - actual_size += (new_top - top_page->ObjectAreaStart()); - ASSERT(accounting_stats_.Size() == actual_size); - } -#endif - - Shrink(); - ResetFreeList(); } protected: @@ -2098,9 +2405,6 @@ class MapSpace : public FixedSpace { const int max_map_space_pages_; - // An array of page start address in a map space. - Address page_addresses_[kMaxMapPageIndex]; - public: TRACK_MEMORY("MapSpace") }; @@ -2116,6 +2420,14 @@ class CellSpace : public FixedSpace { : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {} + virtual int RoundSizeDownToObjectAlignment(int size) { + if (IsPowerOf2(JSGlobalPropertyCell::kSize)) { + return RoundDown(size, JSGlobalPropertyCell::kSize); + } else { + return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize; + } + } + protected: #ifdef DEBUG virtual void VerifyObject(HeapObject* obj); @@ -2133,64 +2445,6 @@ class CellSpace : public FixedSpace { // A large object always starts at Page::kObjectStartOffset to a page. // Large objects do not move during garbage collections. -// A LargeObjectChunk holds exactly one large object page with exactly one -// large object. -class LargeObjectChunk { - public: - // Allocates a new LargeObjectChunk that contains a large object page - // (Page::kPageSize aligned) that has at least size_in_bytes (for a large - // object) bytes after the object area start of that page. - static LargeObjectChunk* New(int size_in_bytes, Executability executable); - - // Free the memory associated with the chunk. - void Free(Executability executable); - - // Interpret a raw address as a large object chunk. - static LargeObjectChunk* FromAddress(Address address) { - return reinterpret_cast<LargeObjectChunk*>(address); - } - - // Returns the address of this chunk. - Address address() { return reinterpret_cast<Address>(this); } - - Page* GetPage() { - return Page::FromAddress(RoundUp(address(), Page::kPageSize)); - } - - // Accessors for the fields of the chunk. - LargeObjectChunk* next() { return next_; } - void set_next(LargeObjectChunk* chunk) { next_ = chunk; } - size_t size() { return size_ & ~Page::kPageFlagMask; } - - // Compute the start address in the chunk. - Address GetStartAddress() { return GetPage()->ObjectAreaStart(); } - - // Returns the object in this chunk. - HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); } - - // Given a requested size returns the physical size of a chunk to be - // allocated. - static int ChunkSizeFor(int size_in_bytes); - - // Given a chunk size, returns the object size it can accommodate. Used by - // LargeObjectSpace::Available. - static intptr_t ObjectSizeFor(intptr_t chunk_size) { - if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; - return chunk_size - Page::kPageSize - Page::kObjectStartOffset; - } - - private: - // A pointer to the next large object chunk in the space or NULL. - LargeObjectChunk* next_; - - // The total size of this chunk. - size_t size_; - - public: - TRACK_MEMORY("LargeObjectChunk") -}; - - class LargeObjectSpace : public Space { public: LargeObjectSpace(Heap* heap, AllocationSpace id); @@ -2202,12 +2456,15 @@ class LargeObjectSpace : public Space { // Releases internal resources, frees objects in this space. void TearDown(); - // Allocates a (non-FixedArray, non-Code) large object. - MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes); - // Allocates a large Code object. - MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes); - // Allocates a large FixedArray. - MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes); + static intptr_t ObjectSizeFor(intptr_t chunk_size) { + if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; + return chunk_size - Page::kPageSize - Page::kObjectStartOffset; + } + + // Shared implementation of AllocateRaw, AllocateRawCode and + // AllocateRawFixedArray. + MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size, + Executability executable); // Available bytes for objects in this space. inline intptr_t Available(); @@ -2231,10 +2488,7 @@ class LargeObjectSpace : public Space { // Finds a large object page containing the given pc, returns NULL // if such a page doesn't exist. - LargeObjectChunk* FindChunkContainingPc(Address pc); - - // Iterates objects covered by dirty regions. - void IterateDirtyRegions(ObjectSlotCallback func); + LargePage* FindPageContainingPc(Address pc); // Frees unmarked objects. void FreeUnmarkedObjects(); @@ -2243,13 +2497,15 @@ class LargeObjectSpace : public Space { bool Contains(HeapObject* obj); // Checks whether the space is empty. - bool IsEmpty() { return first_chunk_ == NULL; } + bool IsEmpty() { return first_page_ == NULL; } // See the comments for ReserveSpace in the Space class. This has to be // called after ReserveSpace has been called on the paged spaces, since they // may use some memory, leaving less for large objects. virtual bool ReserveSpace(int bytes); + LargePage* first_page() { return first_page_; } + #ifdef DEBUG virtual void Verify(); virtual void Print(); @@ -2262,17 +2518,11 @@ class LargeObjectSpace : public Space { private: // The head of the linked list of large object chunks. - LargeObjectChunk* first_chunk_; + LargePage* first_page_; intptr_t size_; // allocated bytes int page_count_; // number of chunks intptr_t objects_size_; // size of objects - // Shared implementation of AllocateRaw, AllocateRawCode and - // AllocateRawFixedArray. - MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size, - int object_size, - Executability executable); - friend class LargeObjectIterator; public: @@ -2285,17 +2535,78 @@ class LargeObjectIterator: public ObjectIterator { explicit LargeObjectIterator(LargeObjectSpace* space); LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); - HeapObject* next(); + HeapObject* Next(); // implementation of ObjectIterator. - virtual HeapObject* next_object() { return next(); } + virtual HeapObject* next_object() { return Next(); } private: - LargeObjectChunk* current_; + LargePage* current_; HeapObjectCallback size_func_; }; +// Iterates over the chunks (pages and large object pages) that can contain +// pointers to new space. +class PointerChunkIterator BASE_EMBEDDED { + public: + inline explicit PointerChunkIterator(Heap* heap); + + // Return NULL when the iterator is done. + MemoryChunk* next() { + switch (state_) { + case kOldPointerState: { + if (old_pointer_iterator_.has_next()) { + return old_pointer_iterator_.next(); + } + state_ = kMapState; + // Fall through. + } + case kMapState: { + if (map_iterator_.has_next()) { + return map_iterator_.next(); + } + state_ = kLargeObjectState; + // Fall through. + } + case kLargeObjectState: { + HeapObject* heap_object; + do { + heap_object = lo_iterator_.Next(); + if (heap_object == NULL) { + state_ = kFinishedState; + return NULL; + } + // Fixed arrays are the only pointer-containing objects in large + // object space. + } while (!heap_object->IsFixedArray()); + MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address()); + return answer; + } + case kFinishedState: + return NULL; + default: + break; + } + UNREACHABLE(); + return NULL; + } + + + private: + enum State { + kOldPointerState, + kMapState, + kLargeObjectState, + kFinishedState + }; + State state_; + PageIterator old_pointer_iterator_; + PageIterator map_iterator_; + LargeObjectIterator lo_iterator_; +}; + + #ifdef DEBUG struct CommentStatistic { const char* comment; diff --git a/deps/v8/src/splay-tree-inl.h b/deps/v8/src/splay-tree-inl.h index 9c2287eab..4640ed5b0 100644 --- a/deps/v8/src/splay-tree-inl.h +++ b/deps/v8/src/splay-tree-inl.h @@ -45,7 +45,7 @@ template<typename Config, class Allocator> bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) { if (is_empty()) { // If the tree is empty, insert the new node. - root_ = new Node(key, Config::kNoValue); + root_ = new Node(key, Config::NoValue()); } else { // Splay on the key to move the last node on the search path // for the key to the root of the tree. @@ -57,7 +57,7 @@ bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) { return false; } // Insert the new node. - Node* node = new Node(key, Config::kNoValue); + Node* node = new Node(key, Config::NoValue()); InsertInternal(cmp, node); } locator->bind(root_); @@ -226,7 +226,7 @@ template<typename Config, class Allocator> void SplayTree<Config, Allocator>::Splay(const Key& key) { if (is_empty()) return; - Node dummy_node(Config::kNoKey, Config::kNoValue); + Node dummy_node(Config::kNoKey, Config::NoValue()); // Create a dummy node. The use of the dummy node is a bit // counter-intuitive: The right child of the dummy node will hold // the L tree of the algorithm. The left child of the dummy node diff --git a/deps/v8/src/store-buffer-inl.h b/deps/v8/src/store-buffer-inl.h new file mode 100644 index 000000000..34f35a487 --- /dev/null +++ b/deps/v8/src/store-buffer-inl.h @@ -0,0 +1,79 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_STORE_BUFFER_INL_H_ +#define V8_STORE_BUFFER_INL_H_ + +#include "store-buffer.h" + +namespace v8 { +namespace internal { + +Address StoreBuffer::TopAddress() { + return reinterpret_cast<Address>(heap_->store_buffer_top_address()); +} + + +void StoreBuffer::Mark(Address addr) { + ASSERT(!heap_->cell_space()->Contains(addr)); + ASSERT(!heap_->code_space()->Contains(addr)); + Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); + *top++ = addr; + heap_->public_set_store_buffer_top(top); + if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) { + ASSERT(top == limit_); + Compact(); + } else { + ASSERT(top < limit_); + } +} + + +void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) { + if (store_buffer_rebuilding_enabled_) { + ASSERT(!heap_->cell_space()->Contains(addr)); + ASSERT(!heap_->code_space()->Contains(addr)); + ASSERT(!heap_->old_data_space()->Contains(addr)); + ASSERT(!heap_->new_space()->Contains(addr)); + Address* top = old_top_; + *top++ = addr; + old_top_ = top; + old_buffer_is_sorted_ = false; + old_buffer_is_filtered_ = false; + if (top >= old_limit_) { + ASSERT(callback_ != NULL); + (*callback_)(heap_, + MemoryChunk::FromAnyPointerAddress(addr), + kStoreBufferFullEvent); + } + } +} + + +} } // namespace v8::internal + +#endif // V8_STORE_BUFFER_INL_H_ diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc new file mode 100644 index 000000000..ab810e400 --- /dev/null +++ b/deps/v8/src/store-buffer.cc @@ -0,0 +1,694 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "store-buffer.h" +#include "store-buffer-inl.h" +#include "v8-counters.h" + +namespace v8 { +namespace internal { + +StoreBuffer::StoreBuffer(Heap* heap) + : heap_(heap), + start_(NULL), + limit_(NULL), + old_start_(NULL), + old_limit_(NULL), + old_top_(NULL), + old_buffer_is_sorted_(false), + old_buffer_is_filtered_(false), + during_gc_(false), + store_buffer_rebuilding_enabled_(false), + callback_(NULL), + may_move_store_buffer_entries_(true), + virtual_memory_(NULL), + hash_map_1_(NULL), + hash_map_2_(NULL) { +} + + +void StoreBuffer::Setup() { + virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); + uintptr_t start_as_int = + reinterpret_cast<uintptr_t>(virtual_memory_->address()); + start_ = + reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); + limit_ = start_ + (kStoreBufferSize / sizeof(*start_)); + + old_top_ = old_start_ = new Address[kOldStoreBufferLength]; + old_limit_ = old_start_ + kOldStoreBufferLength; + + ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); + ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); + Address* vm_limit = reinterpret_cast<Address*>( + reinterpret_cast<char*>(virtual_memory_->address()) + + virtual_memory_->size()); + ASSERT(start_ <= vm_limit); + ASSERT(limit_ <= vm_limit); + USE(vm_limit); + ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); + ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == + 0); + + virtual_memory_->Commit(reinterpret_cast<Address>(start_), + kStoreBufferSize, + false); // Not executable. + heap_->public_set_store_buffer_top(start_); + + hash_map_1_ = new uintptr_t[kHashMapLength]; + hash_map_2_ = new uintptr_t[kHashMapLength]; + + ZapHashTables(); +} + + +void StoreBuffer::TearDown() { + delete virtual_memory_; + delete[] hash_map_1_; + delete[] hash_map_2_; + delete[] old_start_; + old_start_ = old_top_ = old_limit_ = NULL; + start_ = limit_ = NULL; + heap_->public_set_store_buffer_top(start_); +} + + +void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { + isolate->heap()->store_buffer()->Compact(); +} + + +#if V8_TARGET_ARCH_X64 +static int CompareAddresses(const void* void_a, const void* void_b) { + intptr_t a = + reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a)); + intptr_t b = + reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b)); + // Unfortunately if int is smaller than intptr_t there is no branch-free + // way to return a number with the same sign as the difference between the + // pointers. + if (a == b) return 0; + if (a < b) return -1; + ASSERT(a > b); + return 1; +} +#else +static int CompareAddresses(const void* void_a, const void* void_b) { + intptr_t a = + reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a)); + intptr_t b = + reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b)); + ASSERT(sizeof(1) == sizeof(a)); + // Shift down to avoid wraparound. + return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2); +} +#endif + + +void StoreBuffer::Uniq() { + ASSERT(HashTablesAreZapped()); + // Remove adjacent duplicates and cells that do not point at new space. + Address previous = NULL; + Address* write = old_start_; + ASSERT(may_move_store_buffer_entries_); + for (Address* read = old_start_; read < old_top_; read++) { + Address current = *read; + if (current != previous) { + if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { + *write++ = current; + } + } + previous = current; + } + old_top_ = write; +} + + +void StoreBuffer::HandleFullness() { + if (old_buffer_is_filtered_) return; + ASSERT(may_move_store_buffer_entries_); + Compact(); + + old_buffer_is_filtered_ = true; + bool page_has_scan_on_scavenge_flag = false; + + PointerChunkIterator it(heap_); + MemoryChunk* chunk; + while ((chunk = it.next()) != NULL) { + if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true; + } + + if (page_has_scan_on_scavenge_flag) { + Filter(MemoryChunk::SCAN_ON_SCAVENGE); + } + + // If filtering out the entries from scan_on_scavenge pages got us down to + // less than half full, then we are satisfied with that. + if (old_limit_ - old_top_ > old_top_ - old_start_) return; + + // Sample 1 entry in 97 and filter out the pages where we estimate that more + // than 1 in 8 pointers are to new space. + static const int kSampleFinenesses = 5; + static const struct Samples { + int prime_sample_step; + int threshold; + } samples[kSampleFinenesses] = { + { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 }, + { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 }, + { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 }, + { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 }, + { 1, 0} + }; + for (int i = kSampleFinenesses - 1; i >= 0; i--) { + ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold); + // As a last resort we mark all pages as being exempt from the store buffer. + ASSERT(i != 0 || old_top_ == old_start_); + if (old_limit_ - old_top_ > old_top_ - old_start_) return; + } + UNREACHABLE(); +} + + +// Sample the store buffer to see if some pages are taking up a lot of space +// in the store buffer. +void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { + PointerChunkIterator it(heap_); + MemoryChunk* chunk; + while ((chunk = it.next()) != NULL) { + chunk->set_store_buffer_counter(0); + } + bool created_new_scan_on_scavenge_pages = false; + MemoryChunk* previous_chunk = NULL; + for (Address* p = old_start_; p < old_top_; p += prime_sample_step) { + Address addr = *p; + MemoryChunk* containing_chunk = NULL; + if (previous_chunk != NULL && previous_chunk->Contains(addr)) { + containing_chunk = previous_chunk; + } else { + containing_chunk = MemoryChunk::FromAnyPointerAddress(addr); + } + int old_counter = containing_chunk->store_buffer_counter(); + if (old_counter == threshold) { + containing_chunk->set_scan_on_scavenge(true); + created_new_scan_on_scavenge_pages = true; + } + containing_chunk->set_store_buffer_counter(old_counter + 1); + previous_chunk = containing_chunk; + } + if (created_new_scan_on_scavenge_pages) { + Filter(MemoryChunk::SCAN_ON_SCAVENGE); + } + old_buffer_is_filtered_ = true; +} + + +void StoreBuffer::Filter(int flag) { + Address* new_top = old_start_; + MemoryChunk* previous_chunk = NULL; + for (Address* p = old_start_; p < old_top_; p++) { + Address addr = *p; + MemoryChunk* containing_chunk = NULL; + if (previous_chunk != NULL && previous_chunk->Contains(addr)) { + containing_chunk = previous_chunk; + } else { + containing_chunk = MemoryChunk::FromAnyPointerAddress(addr); + previous_chunk = containing_chunk; + } + if (!containing_chunk->IsFlagSet(flag)) { + *new_top++ = addr; + } + } + old_top_ = new_top; +} + + +void StoreBuffer::SortUniq() { + Compact(); + if (old_buffer_is_sorted_) return; + ZapHashTables(); + qsort(reinterpret_cast<void*>(old_start_), + old_top_ - old_start_, + sizeof(*old_top_), + &CompareAddresses); + Uniq(); + + old_buffer_is_sorted_ = true; +} + + +bool StoreBuffer::PrepareForIteration() { + Compact(); + PointerChunkIterator it(heap_); + MemoryChunk* chunk; + bool page_has_scan_on_scavenge_flag = false; + while ((chunk = it.next()) != NULL) { + if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true; + } + + if (page_has_scan_on_scavenge_flag) { + Filter(MemoryChunk::SCAN_ON_SCAVENGE); + } + ZapHashTables(); + return page_has_scan_on_scavenge_flag; +} + + +#ifdef DEBUG +void StoreBuffer::Clean() { + ZapHashTables(); + Uniq(); // Also removes things that no longer point to new space. + CheckForFullBuffer(); +} + + +static bool Zapped(char* start, int size) { + for (int i = 0; i < size; i++) { + if (start[i] != 0) return false; + } + return true; +} + + +bool StoreBuffer::HashTablesAreZapped() { + return Zapped(reinterpret_cast<char*>(hash_map_1_), + sizeof(uintptr_t) * kHashMapLength) && + Zapped(reinterpret_cast<char*>(hash_map_2_), + sizeof(uintptr_t) * kHashMapLength); +} + + +static Address* in_store_buffer_1_element_cache = NULL; + + +bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { + if (!FLAG_enable_slow_asserts) return true; + if (in_store_buffer_1_element_cache != NULL && + *in_store_buffer_1_element_cache == cell_address) { + return true; + } + Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); + for (Address* current = top - 1; current >= start_; current--) { + if (*current == cell_address) { + in_store_buffer_1_element_cache = current; + return true; + } + } + for (Address* current = old_top_ - 1; current >= old_start_; current--) { + if (*current == cell_address) { + in_store_buffer_1_element_cache = current; + return true; + } + } + return false; +} +#endif + + +void StoreBuffer::ZapHashTables() { + memset(reinterpret_cast<void*>(hash_map_1_), + 0, + sizeof(uintptr_t) * kHashMapLength); + memset(reinterpret_cast<void*>(hash_map_2_), + 0, + sizeof(uintptr_t) * kHashMapLength); +} + + +void StoreBuffer::GCPrologue() { + ZapHashTables(); + during_gc_ = true; +} + + +#ifdef DEBUG +static void DummyScavengePointer(HeapObject** p, HeapObject* o) { + // Do nothing. +} + + +void StoreBuffer::VerifyPointers(PagedSpace* space, + RegionCallback region_callback) { + PageIterator it(space); + + while (it.has_next()) { + Page* page = it.next(); + FindPointersToNewSpaceOnPage( + reinterpret_cast<PagedSpace*>(page->owner()), + page, + region_callback, + &DummyScavengePointer); + } +} + + +void StoreBuffer::VerifyPointers(LargeObjectSpace* space) { + LargeObjectIterator it(space); + for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { + if (object->IsFixedArray()) { + Address slot_address = object->address(); + Address end = object->address() + object->Size(); + + while (slot_address < end) { + HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); + // When we are not in GC the Heap::InNewSpace() predicate + // checks that pointers which satisfy predicate point into + // the active semispace. + heap_->InNewSpace(*slot); + slot_address += kPointerSize; + } + } + } +} +#endif + + +void StoreBuffer::Verify() { +#ifdef DEBUG + VerifyPointers(heap_->old_pointer_space(), + &StoreBuffer::FindPointersToNewSpaceInRegion); + VerifyPointers(heap_->map_space(), + &StoreBuffer::FindPointersToNewSpaceInMapsRegion); + VerifyPointers(heap_->lo_space()); +#endif +} + + +void StoreBuffer::GCEpilogue() { + during_gc_ = false; + Verify(); +} + + +void StoreBuffer::FindPointersToNewSpaceInRegion( + Address start, Address end, ObjectSlotCallback slot_callback) { + for (Address slot_address = start; + slot_address < end; + slot_address += kPointerSize) { + Object** slot = reinterpret_cast<Object**>(slot_address); + if (heap_->InNewSpace(*slot)) { + HeapObject* object = reinterpret_cast<HeapObject*>(*slot); + ASSERT(object->IsHeapObject()); + slot_callback(reinterpret_cast<HeapObject**>(slot), object); + if (heap_->InNewSpace(*slot)) { + EnterDirectlyIntoStoreBuffer(slot_address); + } + } + } +} + + +// Compute start address of the first map following given addr. +static inline Address MapStartAlign(Address addr) { + Address page = Page::FromAddress(addr)->ObjectAreaStart(); + return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize); +} + + +// Compute end address of the first map preceding given addr. +static inline Address MapEndAlign(Address addr) { + Address page = Page::FromAllocationTop(addr)->ObjectAreaStart(); + return page + ((addr - page) / Map::kSize * Map::kSize); +} + + +void StoreBuffer::FindPointersToNewSpaceInMaps( + Address start, + Address end, + ObjectSlotCallback slot_callback) { + ASSERT(MapStartAlign(start) == start); + ASSERT(MapEndAlign(end) == end); + + Address map_address = start; + while (map_address < end) { + ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address))); + ASSERT(Memory::Object_at(map_address)->IsMap()); + + Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset; + Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset; + + FindPointersToNewSpaceInRegion(pointer_fields_start, + pointer_fields_end, + slot_callback); + map_address += Map::kSize; + } +} + + +void StoreBuffer::FindPointersToNewSpaceInMapsRegion( + Address start, + Address end, + ObjectSlotCallback slot_callback) { + Address map_aligned_start = MapStartAlign(start); + Address map_aligned_end = MapEndAlign(end); + + ASSERT(map_aligned_start == start); + ASSERT(map_aligned_end == end); + + FindPointersToNewSpaceInMaps(map_aligned_start, + map_aligned_end, + slot_callback); +} + + +// This function iterates over all the pointers in a paged space in the heap, +// looking for pointers into new space. Within the pages there may be dead +// objects that have not been overwritten by free spaces or fillers because of +// lazy sweeping. These dead objects may not contain pointers to new space. +// The garbage areas that have been swept properly (these will normally be the +// large ones) will be marked with free space and filler map words. In +// addition any area that has never been used at all for object allocation must +// be marked with a free space or filler. Because the free space and filler +// maps do not move we can always recognize these even after a compaction. +// Normal objects like FixedArrays and JSObjects should not contain references +// to these maps. The special garbage section (see comment in spaces.h) is +// skipped since it can contain absolutely anything. Any objects that are +// allocated during iteration may or may not be visited by the iteration, but +// they will not be partially visited. +void StoreBuffer::FindPointersToNewSpaceOnPage( + PagedSpace* space, + Page* page, + RegionCallback region_callback, + ObjectSlotCallback slot_callback) { + Address visitable_start = page->ObjectAreaStart(); + Address end_of_page = page->ObjectAreaEnd(); + + Address visitable_end = visitable_start; + + Object* free_space_map = heap_->free_space_map(); + Object* two_pointer_filler_map = heap_->two_pointer_filler_map(); + + while (visitable_end < end_of_page) { + Object* o = *reinterpret_cast<Object**>(visitable_end); + // Skip fillers but not things that look like fillers in the special + // garbage section which can contain anything. + if (o == free_space_map || + o == two_pointer_filler_map || + (visitable_end == space->top() && visitable_end != space->limit())) { + if (visitable_start != visitable_end) { + // After calling this the special garbage section may have moved. + (this->*region_callback)(visitable_start, + visitable_end, + slot_callback); + if (visitable_end >= space->top() && visitable_end < space->limit()) { + visitable_end = space->limit(); + visitable_start = visitable_end; + continue; + } + } + if (visitable_end == space->top() && visitable_end != space->limit()) { + visitable_start = visitable_end = space->limit(); + } else { + // At this point we are either at the start of a filler or we are at + // the point where the space->top() used to be before the + // visit_pointer_region call above. Either way we can skip the + // object at the current spot: We don't promise to visit objects + // allocated during heap traversal, and if space->top() moved then it + // must be because an object was allocated at this point. + visitable_start = + visitable_end + HeapObject::FromAddress(visitable_end)->Size(); + visitable_end = visitable_start; + } + } else { + ASSERT(o != free_space_map); + ASSERT(o != two_pointer_filler_map); + ASSERT(visitable_end < space->top() || visitable_end >= space->limit()); + visitable_end += kPointerSize; + } + } + ASSERT(visitable_end == end_of_page); + if (visitable_start != visitable_end) { + (this->*region_callback)(visitable_start, + visitable_end, + slot_callback); + } +} + + +void StoreBuffer::IteratePointersInStoreBuffer( + ObjectSlotCallback slot_callback) { + Address* limit = old_top_; + old_top_ = old_start_; + { + DontMoveStoreBufferEntriesScope scope(this); + for (Address* current = old_start_; current < limit; current++) { +#ifdef DEBUG + Address* saved_top = old_top_; +#endif + Object** slot = reinterpret_cast<Object**>(*current); + Object* object = *slot; + if (heap_->InFromSpace(object)) { + HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); + slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); + if (heap_->InNewSpace(*slot)) { + EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot)); + } + } + ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top); + } + } +} + + +void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { + // We do not sort or remove duplicated entries from the store buffer because + // we expect that callback will rebuild the store buffer thus removing + // all duplicates and pointers to old space. + bool some_pages_to_scan = PrepareForIteration(); + + // TODO(gc): we want to skip slots on evacuation candidates + // but we can't simply figure that out from slot address + // because slot can belong to a large object. + IteratePointersInStoreBuffer(slot_callback); + + // We are done scanning all the pointers that were in the store buffer, but + // there may be some pages marked scan_on_scavenge that have pointers to new + // space that are not in the store buffer. We must scan them now. As we + // scan, the surviving pointers to new space will be added to the store + // buffer. If there are still a lot of pointers to new space then we will + // keep the scan_on_scavenge flag on the page and discard the pointers that + // were added to the store buffer. If there are not many pointers to new + // space left on the page we will keep the pointers in the store buffer and + // remove the flag from the page. + if (some_pages_to_scan) { + if (callback_ != NULL) { + (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent); + } + PointerChunkIterator it(heap_); + MemoryChunk* chunk; + while ((chunk = it.next()) != NULL) { + if (chunk->scan_on_scavenge()) { + chunk->set_scan_on_scavenge(false); + if (callback_ != NULL) { + (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent); + } + if (chunk->owner() == heap_->lo_space()) { + LargePage* large_page = reinterpret_cast<LargePage*>(chunk); + HeapObject* array = large_page->GetObject(); + ASSERT(array->IsFixedArray()); + Address start = array->address(); + Address end = start + array->Size(); + FindPointersToNewSpaceInRegion(start, end, slot_callback); + } else { + Page* page = reinterpret_cast<Page*>(chunk); + PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); + FindPointersToNewSpaceOnPage( + owner, + page, + (owner == heap_->map_space() ? + &StoreBuffer::FindPointersToNewSpaceInMapsRegion : + &StoreBuffer::FindPointersToNewSpaceInRegion), + slot_callback); + } + } + } + if (callback_ != NULL) { + (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent); + } + } +} + + +void StoreBuffer::Compact() { + Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); + + if (top == start_) return; + + // There's no check of the limit in the loop below so we check here for + // the worst case (compaction doesn't eliminate any pointers). + ASSERT(top <= limit_); + heap_->public_set_store_buffer_top(start_); + if (top - start_ > old_limit_ - old_top_) { + HandleFullness(); + } + ASSERT(may_move_store_buffer_entries_); + // Goes through the addresses in the store buffer attempting to remove + // duplicates. In the interest of speed this is a lossy operation. Some + // duplicates will remain. We have two hash tables with different hash + // functions to reduce the number of unnecessary clashes. + for (Address* current = start_; current < top; current++) { + ASSERT(!heap_->cell_space()->Contains(*current)); + ASSERT(!heap_->code_space()->Contains(*current)); + ASSERT(!heap_->old_data_space()->Contains(*current)); + uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); + // Shift out the last bits including any tags. + int_addr >>= kPointerSizeLog2; + int hash1 = + ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); + if (hash_map_1_[hash1] == int_addr) continue; + int hash2 = + ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); + hash2 ^= hash2 >> (kHashMapLengthLog2 * 2); + if (hash_map_2_[hash2] == int_addr) continue; + if (hash_map_1_[hash1] == 0) { + hash_map_1_[hash1] = int_addr; + } else if (hash_map_2_[hash2] == 0) { + hash_map_2_[hash2] = int_addr; + } else { + // Rather than slowing down we just throw away some entries. This will + // cause some duplicates to remain undetected. + hash_map_1_[hash1] = int_addr; + hash_map_2_[hash2] = 0; + } + old_buffer_is_sorted_ = false; + old_buffer_is_filtered_ = false; + *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); + ASSERT(old_top_ <= old_limit_); + } + heap_->isolate()->counters()->store_buffer_compactions()->Increment(); + CheckForFullBuffer(); +} + + +void StoreBuffer::CheckForFullBuffer() { + if (old_limit_ - old_top_ < kStoreBufferSize * 2) { + HandleFullness(); + } +} + +} } // namespace v8::internal diff --git a/deps/v8/src/store-buffer.h b/deps/v8/src/store-buffer.h new file mode 100644 index 000000000..61b97d9e6 --- /dev/null +++ b/deps/v8/src/store-buffer.h @@ -0,0 +1,248 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_STORE_BUFFER_H_ +#define V8_STORE_BUFFER_H_ + +#include "allocation.h" +#include "checks.h" +#include "globals.h" +#include "platform.h" +#include "v8globals.h" + +namespace v8 { +namespace internal { + +class StoreBuffer; + +typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to); + +typedef void (StoreBuffer::*RegionCallback)( + Address start, Address end, ObjectSlotCallback slot_callback); + +// Used to implement the write barrier by collecting addresses of pointers +// between spaces. +class StoreBuffer { + public: + explicit StoreBuffer(Heap* heap); + + static void StoreBufferOverflow(Isolate* isolate); + + inline Address TopAddress(); + + void Setup(); + void TearDown(); + + // This is used by the mutator to enter addresses into the store buffer. + inline void Mark(Address addr); + + // This is used by the heap traversal to enter the addresses into the store + // buffer that should still be in the store buffer after GC. It enters + // addresses directly into the old buffer because the GC starts by wiping the + // old buffer and thereafter only visits each cell once so there is no need + // to attempt to remove any dupes. During the first part of a GC we + // are using the store buffer to access the old spaces and at the same time + // we are rebuilding the store buffer using this function. There is, however + // no issue of overwriting the buffer we are iterating over, because this + // stage of the scavenge can only reduce the number of addresses in the store + // buffer (some objects are promoted so pointers to them do not need to be in + // the store buffer). The later parts of the GC scan the pages that are + // exempt from the store buffer and process the promotion queue. These steps + // can overflow this buffer. We check for this and on overflow we call the + // callback set up with the StoreBufferRebuildScope object. + inline void EnterDirectlyIntoStoreBuffer(Address addr); + + // Iterates over all pointers that go from old space to new space. It will + // delete the store buffer as it starts so the callback should reenter + // surviving old-to-new pointers into the store buffer to rebuild it. + void IteratePointersToNewSpace(ObjectSlotCallback callback); + + static const int kStoreBufferOverflowBit = 1 << 16; + static const int kStoreBufferSize = kStoreBufferOverflowBit; + static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address); + static const int kOldStoreBufferLength = kStoreBufferLength * 16; + static const int kHashMapLengthLog2 = 12; + static const int kHashMapLength = 1 << kHashMapLengthLog2; + + void Compact(); + + void GCPrologue(); + void GCEpilogue(); + + Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); } + Object*** Start() { return reinterpret_cast<Object***>(old_start_); } + Object*** Top() { return reinterpret_cast<Object***>(old_top_); } + void SetTop(Object*** top) { + ASSERT(top >= Start()); + ASSERT(top <= Limit()); + old_top_ = reinterpret_cast<Address*>(top); + } + + bool old_buffer_is_sorted() { return old_buffer_is_sorted_; } + bool old_buffer_is_filtered() { return old_buffer_is_filtered_; } + + // Goes through the store buffer removing pointers to things that have + // been promoted. Rebuilds the store buffer completely if it overflowed. + void SortUniq(); + + void HandleFullness(); + void Verify(); + + bool PrepareForIteration(); + +#ifdef DEBUG + void Clean(); + // Slow, for asserts only. + bool CellIsInStoreBuffer(Address cell); +#endif + + void Filter(int flag); + + private: + Heap* heap_; + + // The store buffer is divided up into a new buffer that is constantly being + // filled by mutator activity and an old buffer that is filled with the data + // from the new buffer after compression. + Address* start_; + Address* limit_; + + Address* old_start_; + Address* old_limit_; + Address* old_top_; + + bool old_buffer_is_sorted_; + bool old_buffer_is_filtered_; + bool during_gc_; + // The garbage collector iterates over many pointers to new space that are not + // handled by the store buffer. This flag indicates whether the pointers + // found by the callbacks should be added to the store buffer or not. + bool store_buffer_rebuilding_enabled_; + StoreBufferCallback callback_; + bool may_move_store_buffer_entries_; + + VirtualMemory* virtual_memory_; + uintptr_t* hash_map_1_; + uintptr_t* hash_map_2_; + + void CheckForFullBuffer(); + void Uniq(); + void ZapHashTables(); + bool HashTablesAreZapped(); + void ExemptPopularPages(int prime_sample_step, int threshold); + + void FindPointersToNewSpaceInRegion(Address start, + Address end, + ObjectSlotCallback slot_callback); + + // For each region of pointers on a page in use from an old space call + // visit_pointer_region callback. + // If either visit_pointer_region or callback can cause an allocation + // in old space and changes in allocation watermark then + // can_preallocate_during_iteration should be set to true. + void IteratePointersOnPage( + PagedSpace* space, + Page* page, + RegionCallback region_callback, + ObjectSlotCallback slot_callback); + + void FindPointersToNewSpaceInMaps( + Address start, + Address end, + ObjectSlotCallback slot_callback); + + void FindPointersToNewSpaceInMapsRegion( + Address start, + Address end, + ObjectSlotCallback slot_callback); + + void FindPointersToNewSpaceOnPage( + PagedSpace* space, + Page* page, + RegionCallback region_callback, + ObjectSlotCallback slot_callback); + + void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback); + +#ifdef DEBUG + void VerifyPointers(PagedSpace* space, RegionCallback region_callback); + void VerifyPointers(LargeObjectSpace* space); +#endif + + friend class StoreBufferRebuildScope; + friend class DontMoveStoreBufferEntriesScope; +}; + + +class StoreBufferRebuildScope { + public: + explicit StoreBufferRebuildScope(Heap* heap, + StoreBuffer* store_buffer, + StoreBufferCallback callback) + : heap_(heap), + store_buffer_(store_buffer), + stored_state_(store_buffer->store_buffer_rebuilding_enabled_), + stored_callback_(store_buffer->callback_) { + store_buffer_->store_buffer_rebuilding_enabled_ = true; + store_buffer_->callback_ = callback; + (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent); + } + + ~StoreBufferRebuildScope() { + store_buffer_->callback_ = stored_callback_; + store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_; + store_buffer_->CheckForFullBuffer(); + } + + private: + Heap* heap_; + StoreBuffer* store_buffer_; + bool stored_state_; + StoreBufferCallback stored_callback_; +}; + + +class DontMoveStoreBufferEntriesScope { + public: + explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer) + : store_buffer_(store_buffer), + stored_state_(store_buffer->may_move_store_buffer_entries_) { + store_buffer_->may_move_store_buffer_entries_ = false; + } + + ~DontMoveStoreBufferEntriesScope() { + store_buffer_->may_move_store_buffer_entries_ = stored_state_; + } + + private: + StoreBuffer* store_buffer_; + bool stored_state_; +}; + +} } // namespace v8::internal + +#endif // V8_STORE_BUFFER_H_ diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js index 297105d04..be955c8c0 100644 --- a/deps/v8/src/string.js +++ b/deps/v8/src/string.js @@ -568,7 +568,6 @@ function StringSplit(separator, limit) { } var subject = TO_STRING_INLINE(this); limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit); - if (limit === 0) return []; // ECMA-262 says that if separator is undefined, the result should // be an array of size 1 containing the entire string. SpiderMonkey @@ -582,6 +581,9 @@ function StringSplit(separator, limit) { var length = subject.length; if (!IS_REGEXP(separator)) { separator = TO_STRING_INLINE(separator); + + if (limit === 0) return []; + var separator_length = separator.length; // If the separator string is empty then return the elements in the subject. @@ -592,6 +594,8 @@ function StringSplit(separator, limit) { return result; } + if (limit === 0) return []; + %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]); if (length === 0) { diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc index c89c8f333..be79c8008 100644 --- a/deps/v8/src/strtod.cc +++ b/deps/v8/src/strtod.cc @@ -27,7 +27,6 @@ #include <stdarg.h> #include <math.h> -#include <limits> #include "globals.h" #include "utils.h" diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc index 55963303c..67451f2b8 100644 --- a/deps/v8/src/stub-cache.cc +++ b/deps/v8/src/stub-cache.cc @@ -55,7 +55,15 @@ void StubCache::Initialize(bool create_heap_objects) { ASSERT(IsPowerOf2(kSecondaryTableSize)); if (create_heap_objects) { HandleScope scope; - Clear(); + Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal); + for (int i = 0; i < kPrimaryTableSize; i++) { + primary_[i].key = heap()->empty_string(); + primary_[i].value = empty; + } + for (int j = 0; j < kSecondaryTableSize; j++) { + secondary_[j].key = heap()->empty_string(); + secondary_[j].value = empty; + } } } @@ -489,38 +497,56 @@ MaybeObject* StubCache::ComputeStoreField(String* name, MaybeObject* StubCache::ComputeKeyedLoadOrStoreElement( JSObject* receiver, - bool is_store, + KeyedIC::StubKind stub_kind, StrictModeFlag strict_mode) { Code::Flags flags = Code::ComputeMonomorphicFlags( - is_store ? Code::KEYED_STORE_IC : - Code::KEYED_LOAD_IC, + stub_kind == KeyedIC::LOAD ? Code::KEYED_LOAD_IC + : Code::KEYED_STORE_IC, NORMAL, strict_mode); - String* name = is_store - ? isolate()->heap()->KeyedStoreElementMonomorphic_symbol() - : isolate()->heap()->KeyedLoadElementMonomorphic_symbol(); + String* name = NULL; + switch (stub_kind) { + case KeyedIC::LOAD: + name = isolate()->heap()->KeyedLoadElementMonomorphic_symbol(); + break; + case KeyedIC::STORE_NO_TRANSITION: + name = isolate()->heap()->KeyedStoreElementMonomorphic_symbol(); + break; + default: + UNREACHABLE(); + break; + } Object* maybe_code = receiver->map()->FindInCodeCache(name, flags); if (!maybe_code->IsUndefined()) return Code::cast(maybe_code); - MaybeObject* maybe_new_code = NULL; Map* receiver_map = receiver->map(); - if (is_store) { - KeyedStoreStubCompiler compiler(strict_mode); - maybe_new_code = compiler.CompileStoreElement(receiver_map); - } else { - KeyedLoadStubCompiler compiler; - maybe_new_code = compiler.CompileLoadElement(receiver_map); + MaybeObject* maybe_new_code = NULL; + switch (stub_kind) { + case KeyedIC::LOAD: { + KeyedLoadStubCompiler compiler; + maybe_new_code = compiler.CompileLoadElement(receiver_map); + break; + } + case KeyedIC::STORE_NO_TRANSITION: { + KeyedStoreStubCompiler compiler(strict_mode); + maybe_new_code = compiler.CompileStoreElement(receiver_map); + break; + } + default: + UNREACHABLE(); + break; } - Code* code; + Code* code = NULL; if (!maybe_new_code->To(&code)) return maybe_new_code; - if (is_store) { + + if (stub_kind == KeyedIC::LOAD) { PROFILE(isolate_, - CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, + CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0)); } else { PROFILE(isolate_, - CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, + CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0)); } ASSERT(code->IsCode()); @@ -1099,15 +1125,14 @@ MaybeObject* StubCache::ComputeCallDebugPrepareStepIn( void StubCache::Clear() { + Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal); for (int i = 0; i < kPrimaryTableSize; i++) { primary_[i].key = heap()->empty_string(); - primary_[i].value = isolate_->builtins()->builtin( - Builtins::kIllegal); + primary_[i].value = empty; } for (int j = 0; j < kSecondaryTableSize; j++) { secondary_[j].key = heap()->empty_string(); - secondary_[j].value = isolate_->builtins()->builtin( - Builtins::kIllegal); + secondary_[j].value = empty; } } diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h index 18c157b16..d9ec88f51 100644 --- a/deps/v8/src/stub-cache.h +++ b/deps/v8/src/stub-cache.h @@ -30,6 +30,7 @@ #include "allocation.h" #include "arguments.h" +#include "ic-inl.h" #include "macro-assembler.h" #include "objects.h" #include "zone-inl.h" @@ -187,7 +188,7 @@ class StubCache { MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreElement( JSObject* receiver, - bool is_store, + KeyedIC::StubKind stub_kind, StrictModeFlag strict_mode); // --- @@ -640,7 +641,7 @@ class KeyedLoadStubCompiler: public StubCompiler { MUST_USE_RESULT MaybeObject* CompileLoadElement(Map* receiver_map); - MUST_USE_RESULT MaybeObject* CompileLoadMegamorphic( + MUST_USE_RESULT MaybeObject* CompileLoadPolymorphic( MapList* receiver_maps, CodeList* handler_ics); @@ -699,12 +700,14 @@ class KeyedStoreStubCompiler: public StubCompiler { MUST_USE_RESULT MaybeObject* CompileStoreElement(Map* receiver_map); - MUST_USE_RESULT MaybeObject* CompileStoreMegamorphic( + MUST_USE_RESULT MaybeObject* CompileStorePolymorphic( MapList* receiver_maps, - CodeList* handler_ics); + CodeList* handler_stubs, + MapList* transitioned_maps); static void GenerateStoreFastElement(MacroAssembler* masm, - bool is_js_array); + bool is_js_array, + ElementsKind element_kind); static void GenerateStoreFastDoubleElement(MacroAssembler* masm, bool is_js_array); diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h index eb825c1a7..de4972dd7 100644 --- a/deps/v8/src/token.h +++ b/deps/v8/src/token.h @@ -216,6 +216,10 @@ class Token { return op == LT || op == LTE || op == GT || op == GTE; } + static bool IsEqualityOp(Value op) { + return op == EQ || op == EQ_STRICT; + } + static Value NegateCompareOp(Value op) { ASSERT(IsCompareOp(op)); switch (op) { diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc index c64368e59..a4b16f4f3 100644 --- a/deps/v8/src/type-info.cc +++ b/deps/v8/src/type-info.cc @@ -60,8 +60,10 @@ TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) { TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code, - Handle<Context> global_context) { + Handle<Context> global_context, + Isolate* isolate) { global_context_ = global_context; + isolate_ = isolate; BuildDictionary(code); ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue); } @@ -71,12 +73,12 @@ Handle<Object> TypeFeedbackOracle::GetInfo(unsigned ast_id) { int entry = dictionary_->FindEntry(ast_id); return entry != NumberDictionary::kNotFound ? Handle<Object>(dictionary_->ValueAt(entry)) - : Isolate::Current()->factory()->undefined_value(); + : Handle<Object>::cast(isolate_->factory()->undefined_value()); } bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) { - Handle<Object> map_or_code(GetInfo(expr->id())); + Handle<Object> map_or_code = GetInfo(expr->id()); if (map_or_code->IsMap()) return true; if (map_or_code->IsCode()) { Handle<Code> code = Handle<Code>::cast(map_or_code); @@ -90,10 +92,10 @@ bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) { bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) { - Handle<Object> map_or_code(GetInfo(expr->id())); + Handle<Object> map_or_code = GetInfo(expr->id()); if (map_or_code->IsCode()) { Handle<Code> code = Handle<Code>::cast(map_or_code); - Builtins* builtins = Isolate::Current()->builtins(); + Builtins* builtins = isolate_->builtins(); return code->is_keyed_load_stub() && *code != builtins->builtin(Builtins::kKeyedLoadIC_Generic) && code->ic_state() == MEGAMORPHIC; @@ -103,7 +105,7 @@ bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) { bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) { - Handle<Object> map_or_code(GetInfo(expr->id())); + Handle<Object> map_or_code = GetInfo(expr->id()); if (map_or_code->IsMap()) return true; if (map_or_code->IsCode()) { Handle<Code> code = Handle<Code>::cast(map_or_code); @@ -116,10 +118,10 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) { bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) { - Handle<Object> map_or_code(GetInfo(expr->id())); + Handle<Object> map_or_code = GetInfo(expr->id()); if (map_or_code->IsCode()) { Handle<Code> code = Handle<Code>::cast(map_or_code); - Builtins* builtins = Isolate::Current()->builtins(); + Builtins* builtins = isolate_->builtins(); return code->is_keyed_store_stub() && *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) && *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) && @@ -131,13 +133,13 @@ bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) { bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) { Handle<Object> value = GetInfo(expr->id()); - return value->IsMap() || value->IsSmi(); + return value->IsMap() || value->IsSmi() || value->IsJSFunction(); } Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) { ASSERT(LoadIsMonomorphicNormal(expr)); - Handle<Object> map_or_code(GetInfo(expr->id())); + Handle<Object> map_or_code = GetInfo(expr->id()); if (map_or_code->IsCode()) { Handle<Code> code = Handle<Code>::cast(map_or_code); Map* first_map = code->FindFirstMap(); @@ -150,7 +152,7 @@ Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) { Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) { ASSERT(StoreIsMonomorphicNormal(expr)); - Handle<Object> map_or_code(GetInfo(expr->id())); + Handle<Object> map_or_code = GetInfo(expr->id()); if (map_or_code->IsCode()) { Handle<Code> code = Handle<Code>::cast(map_or_code); return Handle<Map>(code->FindFirstMap()); @@ -203,6 +205,7 @@ CheckType TypeFeedbackOracle::GetCallCheckType(Call* expr) { return check; } + Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck( CheckType check) { JSFunction* function = NULL; @@ -225,9 +228,14 @@ Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck( } +Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) { + return Handle<JSFunction>::cast(GetInfo(expr->id())); +} + + bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) { return *GetInfo(expr->id()) == - Isolate::Current()->builtins()->builtin(id); + isolate_->builtins()->builtin(id); } @@ -397,11 +405,11 @@ void TypeFeedbackOracle::CollectReceiverTypes(unsigned ast_id, Handle<String> name, Code::Flags flags, SmallMapList* types) { - Isolate* isolate = Isolate::Current(); Handle<Object> object = GetInfo(ast_id); if (object->IsUndefined() || object->IsSmi()) return; - if (*object == isolate->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) { + if (*object == + isolate_->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) { // TODO(fschneider): We could collect the maps and signal that // we need a generic store (or load) here. ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC); @@ -410,7 +418,7 @@ void TypeFeedbackOracle::CollectReceiverTypes(unsigned ast_id, } else if (Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) { types->Reserve(4); ASSERT(object->IsCode()); - isolate->stub_cache()->CollectMatchingMaps(types, *name, flags); + isolate_->stub_cache()->CollectMatchingMaps(types, *name, flags); } } @@ -488,14 +496,16 @@ void TypeFeedbackOracle::RelocateRelocInfos(ZoneList<RelocInfo>* infos, void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) { for (int i = 0; i < infos->length(); i++) { + Address target_address = (*infos)[i].target_address(); unsigned ast_id = static_cast<unsigned>((*infos)[i].data()); - Code* target = Code::GetCodeFromTargetAddress((*infos)[i].target_address()); - ProcessTarget(ast_id, target); + ProcessTargetAt(target_address, ast_id); } } -void TypeFeedbackOracle::ProcessTarget(unsigned ast_id, Code* target) { +void TypeFeedbackOracle::ProcessTargetAt(Address target_address, + unsigned ast_id) { + Code* target = Code::GetCodeFromTargetAddress(target_address); switch (target->kind()) { case Code::LOAD_IC: case Code::STORE_IC: @@ -504,7 +514,7 @@ void TypeFeedbackOracle::ProcessTarget(unsigned ast_id, Code* target) { if (target->ic_state() == MONOMORPHIC) { if (target->kind() == Code::CALL_IC && target->check_type() != RECEIVER_MAP_CHECK) { - SetInfo(ast_id, Smi::FromInt(target->check_type())); + SetInfo(ast_id, Smi::FromInt(target->check_type())); } else { Object* map = target->FindFirstMap(); SetInfo(ast_id, map == NULL ? static_cast<Object*>(target) : map); @@ -529,6 +539,16 @@ void TypeFeedbackOracle::ProcessTarget(unsigned ast_id, Code* target) { SetInfo(ast_id, target); break; + case Code::STUB: + if (target->major_key() == CodeStub::CallFunction && + target->has_function_cache()) { + Object* value = CallFunctionStub::GetCachedValue(target_address); + if (value->IsJSFunction()) { + SetInfo(ast_id, value); + } + } + break; + default: break; } diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h index 448e4c94e..0ba10aaa5 100644 --- a/deps/v8/src/type-info.h +++ b/deps/v8/src/type-info.h @@ -216,7 +216,9 @@ class UnaryOperation; class TypeFeedbackOracle BASE_EMBEDDED { public: - TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context); + TypeFeedbackOracle(Handle<Code> code, + Handle<Context> global_context, + Isolate* isolate); bool LoadIsMonomorphicNormal(Property* expr); bool LoadIsMegamorphicWithTypeInfo(Property* expr); @@ -243,6 +245,8 @@ class TypeFeedbackOracle BASE_EMBEDDED { CheckType GetCallCheckType(Call* expr); Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check); + Handle<JSFunction> GetCallTarget(Call* expr); + bool LoadIsBuiltin(Property* expr, Builtins::Name id); // TODO(1571) We can't use ToBooleanStub::Types as the return value because @@ -273,13 +277,14 @@ class TypeFeedbackOracle BASE_EMBEDDED { byte* old_start, byte* new_start); void ProcessRelocInfos(ZoneList<RelocInfo>* infos); - void ProcessTarget(unsigned ast_id, Code* target); + void ProcessTargetAt(Address target_address, unsigned ast_id); // Returns an element from the backing store. Returns undefined if // there is no information. Handle<Object> GetInfo(unsigned ast_id); Handle<Context> global_context_; + Isolate* isolate_; Handle<NumberDictionary> dictionary_; DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle); diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js index c910d756b..1656664a3 100644 --- a/deps/v8/src/uri.js +++ b/deps/v8/src/uri.js @@ -111,47 +111,59 @@ function URIDecodeOctets(octets, result, index) { var o1 = octets[1]; if (o0 < 0xe0) { var a = o0 & 0x1f; - if ((o1 < 0x80) || (o1 > 0xbf)) + if ((o1 < 0x80) || (o1 > 0xbf)) { throw new $URIError("URI malformed"); + } var b = o1 & 0x3f; value = (a << 6) + b; - if (value < 0x80 || value > 0x7ff) + if (value < 0x80 || value > 0x7ff) { throw new $URIError("URI malformed"); + } } else { var o2 = octets[2]; if (o0 < 0xf0) { var a = o0 & 0x0f; - if ((o1 < 0x80) || (o1 > 0xbf)) + if ((o1 < 0x80) || (o1 > 0xbf)) { throw new $URIError("URI malformed"); + } var b = o1 & 0x3f; - if ((o2 < 0x80) || (o2 > 0xbf)) + if ((o2 < 0x80) || (o2 > 0xbf)) { throw new $URIError("URI malformed"); + } var c = o2 & 0x3f; value = (a << 12) + (b << 6) + c; - if ((value < 0x800) || (value > 0xffff)) + if ((value < 0x800) || (value > 0xffff)) { throw new $URIError("URI malformed"); + } } else { var o3 = octets[3]; if (o0 < 0xf8) { var a = (o0 & 0x07); - if ((o1 < 0x80) || (o1 > 0xbf)) + if ((o1 < 0x80) || (o1 > 0xbf)) { throw new $URIError("URI malformed"); + } var b = (o1 & 0x3f); - if ((o2 < 0x80) || (o2 > 0xbf)) + if ((o2 < 0x80) || (o2 > 0xbf)) { throw new $URIError("URI malformed"); + } var c = (o2 & 0x3f); - if ((o3 < 0x80) || (o3 > 0xbf)) + if ((o3 < 0x80) || (o3 > 0xbf)) { throw new $URIError("URI malformed"); + } var d = (o3 & 0x3f); value = (a << 18) + (b << 12) + (c << 6) + d; - if ((value < 0x10000) || (value > 0x10ffff)) + if ((value < 0x10000) || (value > 0x10ffff)) { throw new $URIError("URI malformed"); + } } else { throw new $URIError("URI malformed"); } } } } + if (0xD800 <= value && value <= 0xDFFF) { + throw new $URIError("URI malformed"); + } if (value < 0x10000) { result[index++] = value; return index; @@ -214,7 +226,8 @@ function Decode(uri, reserved) { if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed"); for (var i = 1; i < n; i++) { if (uri.charAt(++k) != '%') throw new $URIError("URI malformed"); - octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k)); + octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k), + uri.charCodeAt(++k)); } index = URIDecodeOctets(octets, result, index); } else { @@ -366,7 +379,9 @@ function CharCodeToHex4Str(cc) { function IsValidHex(s) { for (var i = 0; i < s.length; ++i) { var cc = s.charCodeAt(i); - if ((48 <= cc && cc <= 57) || (65 <= cc && cc <= 70) || (97 <= cc && cc <= 102)) { + if ((48 <= cc && cc <= 57) || + (65 <= cc && cc <= 70) || + (97 <= cc && cc <= 102)) { // '0'..'9', 'A'..'F' and 'a' .. 'f'. } else { return false; diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h index 26c522b89..a523118a3 100644 --- a/deps/v8/src/utils.h +++ b/deps/v8/src/utils.h @@ -113,7 +113,7 @@ static inline T AddressFrom(intptr_t x) { // Return the largest multiple of m which is <= x. template <typename T> -static inline T RoundDown(T x, int m) { +static inline T RoundDown(T x, intptr_t m) { ASSERT(IsPowerOf2(m)); return AddressFrom<T>(OffsetFrom(x) & -m); } @@ -121,8 +121,8 @@ static inline T RoundDown(T x, int m) { // Return the smallest multiple of m which is >= x. template <typename T> -static inline T RoundUp(T x, int m) { - return RoundDown(x + m - 1, m); +static inline T RoundUp(T x, intptr_t m) { + return RoundDown<T>(static_cast<T>(x + m - 1), m); } @@ -159,9 +159,15 @@ static inline uint32_t RoundUpToPowerOf2(uint32_t x) { } +static inline uint32_t RoundDownToPowerOf2(uint32_t x) { + uint32_t rounded_up = RoundUpToPowerOf2(x); + if (rounded_up > x) return rounded_up >> 1; + return rounded_up; +} -template <typename T> -static inline bool IsAligned(T value, T alignment) { + +template <typename T, typename U> +static inline bool IsAligned(T value, U alignment) { ASSERT(IsPowerOf2(alignment)); return (value & (alignment - 1)) == 0; } @@ -170,7 +176,7 @@ static inline bool IsAligned(T value, T alignment) { // Returns true if (addr + offset) is aligned. static inline bool IsAddressAligned(Address addr, intptr_t alignment, - int offset) { + int offset = 0) { intptr_t offs = OffsetFrom(addr + offset); return IsAligned(offs, alignment); } diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h index 2de830300..47341e72c 100644 --- a/deps/v8/src/v8-counters.h +++ b/deps/v8/src/v8-counters.h @@ -107,7 +107,10 @@ namespace internal { SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \ /* Number of code objects found from pc. */ \ SC(pc_to_code, V8.PcToCode) \ - SC(pc_to_code_cached, V8.PcToCodeCached) + SC(pc_to_code_cached, V8.PcToCodeCached) \ + /* The store-buffer implementation of the write barrier. */ \ + SC(store_buffer_compactions, V8.StoreBufferCompactions) \ + SC(store_buffer_overflows, V8.StoreBufferOverflows) #define STATS_COUNTER_LIST_2(SC) \ @@ -126,10 +129,6 @@ namespace internal { V8.GCCompactorCausedByWeakHandles) \ SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \ SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \ - SC(map_to_fast_elements, V8.MapToFastElements) \ - SC(map_to_fast_double_elements, V8.MapToFastDoubleElements) \ - SC(map_to_slow_elements, V8.MapToSlowElements) \ - SC(map_to_external_array_elements, V8.MapToExternalArrayElements) \ /* How is the generic keyed-load stub used? */ \ SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \ SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \ diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc index 1e9b5dc14..a04114e70 100644 --- a/deps/v8/src/v8.cc +++ b/deps/v8/src/v8.cc @@ -38,6 +38,7 @@ #include "log.h" #include "runtime-profiler.h" #include "serialize.h" +#include "store-buffer.h" namespace v8 { namespace internal { @@ -56,6 +57,15 @@ static EntropySource entropy_source; bool V8::Initialize(Deserializer* des) { + // Setting --harmony implies all other harmony flags. + // TODO(rossberg): Is there a better place to put this? + if (FLAG_harmony) { + FLAG_harmony_typeof = true; + FLAG_harmony_scoping = true; + FLAG_harmony_proxies = true; + FLAG_harmony_weakmaps = true; + } + InitializeOncePerProcess(); // The current thread may not yet had entered an isolate to run. @@ -215,6 +225,12 @@ void V8::InitializeOncePerProcess() { FLAG_peephole_optimization = !use_crankshaft_; ElementsAccessor::InitializeOncePerProcess(); + + if (FLAG_stress_compaction) { + FLAG_force_marking_deque_overflows = true; + FLAG_gc_global = true; + FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2; + } } } } // namespace v8::internal diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h index e565ca5ae..2e039d429 100644 --- a/deps/v8/src/v8.h +++ b/deps/v8/src/v8.h @@ -60,10 +60,11 @@ #include "objects-inl.h" #include "spaces-inl.h" #include "heap-inl.h" +#include "incremental-marking-inl.h" +#include "mark-compact-inl.h" #include "log-inl.h" #include "cpu-profiler-inl.h" #include "handles-inl.h" -#include "isolate-inl.h" namespace v8 { namespace internal { @@ -124,6 +125,15 @@ class V8 : public AllStatic { static bool use_crankshaft_; }; + +// JavaScript defines two kinds of 'nil'. +enum NilValue { kNullValue, kUndefinedValue }; + + +// JavaScript defines two kinds of equality. +enum EqualityKind { kStrictEquality, kNonStrictEquality }; + + } } // namespace v8::internal namespace i = v8::internal; diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h index eb5c49d75..09d26d2f1 100644 --- a/deps/v8/src/v8globals.h +++ b/deps/v8/src/v8globals.h @@ -79,18 +79,20 @@ const Address kFromSpaceZapValue = reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf)); const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb); const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef); +const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf; #else const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef); const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf); const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf); const uint32_t kSlotsZapValue = 0xbeefdeef; const uint32_t kDebugZapValue = 0xbadbaddb; +const uint32_t kFreeListZapValue = 0xfeed1eaf; #endif -// Number of bits to represent the page size for paged spaces. The value of 13 -// gives 8K bytes per page. -const int kPageSizeBits = 13; +// Number of bits to represent the page size for paged spaces. The value of 20 +// gives 1Mb bytes per page. +const int kPageSizeBits = 20; // On Intel architecture, cache line size is 64 bytes. // On ARM it may be less (32 bytes), but as far this constant is @@ -98,10 +100,6 @@ const int kPageSizeBits = 13; const int kProcessorCacheLineSize = 64; // Constants relevant to double precision floating point numbers. - -// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no -// other bits set. -const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51; // If looking only at the top 32 bits, the QNaN mask is bits 19 to 30. const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32); @@ -131,6 +129,7 @@ class FixedArray; class FunctionEntry; class FunctionLiteral; class FunctionTemplateInfo; +class MemoryChunk; class NumberDictionary; class StringDictionary; template <typename T> class Handle; @@ -254,12 +253,6 @@ struct CodeDesc { }; -// Callback function on object slots, used for iterating heap object slots in -// HeapObjects, global pointers to heap objects, etc. The callback allows the -// callback function to change the value of the slot. -typedef void (*ObjectSlotCallback)(HeapObject** pointer); - - // Callback function used for iterating objects in heap spaces, // for example, scanning heap objects. typedef int (*HeapObjectCallback)(HeapObject* obj); @@ -306,7 +299,9 @@ enum CallFunctionFlags { NO_CALL_FUNCTION_FLAGS = 0, // Receiver might implicitly be the global objects. If it is, the // hole is passed to the call function stub. - RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0 + RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0, + // The call target is cached in the instruction stream. + RECORD_CALL_TARGET = 1 << 1 }; @@ -316,6 +311,19 @@ enum InlineCacheHolderFlag { }; +// The Store Buffer (GC). +typedef enum { + kStoreBufferFullEvent, + kStoreBufferStartScanningPagesEvent, + kStoreBufferScanningPageEvent +} StoreBufferEvent; + + +typedef void (*StoreBufferCallback)(Heap* heap, + MemoryChunk* page, + StoreBufferEvent event); + + // Type of properties. // Order of properties is significant. // Must fit in the BitField PropertyDetails::TypeField. @@ -488,7 +496,7 @@ enum StrictModeFlag { // Used to specify if a macro instruction must perform a smi check on tagged // values. enum SmiCheckType { - DONT_DO_SMI_CHECK = 0, + DONT_DO_SMI_CHECK, DO_SMI_CHECK }; @@ -496,7 +504,7 @@ enum SmiCheckType { // Used to specify whether a receiver is implicitly or explicitly // provided to a call. enum CallKind { - CALL_AS_METHOD = 0, + CALL_AS_METHOD, CALL_AS_FUNCTION }; @@ -510,6 +518,35 @@ const uint64_t kHoleNanInt64 = const uint64_t kLastNonNaNInt64 = (static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32); + +enum VariableMode { + // User declared variables: + VAR, // declared via 'var', and 'function' declarations + + CONST, // declared via 'const' declarations + + LET, // declared via 'let' declarations + + // Variables introduced by the compiler: + DYNAMIC, // always require dynamic lookup (we don't know + // the declaration) + + DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the + // variable is global unless it has been shadowed + // by an eval-introduced variable + + DYNAMIC_LOCAL, // requires dynamic lookup, but we know that the + // variable is local and where it is unless it + // has been shadowed by an eval-introduced + // variable + + INTERNAL, // like VAR, but not user-visible (may or may not + // be in a context) + + TEMPORARY // temporary variables (not user-visible), never + // in a context +}; + } } // namespace v8::internal #endif // V8_V8GLOBALS_H_ diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js index 588bdb21b..dee303237 100644 --- a/deps/v8/src/v8natives.js +++ b/deps/v8/src/v8natives.js @@ -193,13 +193,14 @@ function GlobalEval(x) { function SetUpGlobal() { %CheckIsBootstrapping(); // ECMA 262 - 15.1.1.1. - %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE); + %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY); // ECMA-262 - 15.1.1.2. - %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE); + %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE | READ_ONLY); // ECMA-262 - 15.1.1.3. - %SetProperty(global, "undefined", void 0, DONT_ENUM | DONT_DELETE); + %SetProperty(global, "undefined", void 0, + DONT_ENUM | DONT_DELETE | READ_ONLY); // Set up non-enumerable function on the global object. InstallFunctions(global, DONT_ENUM, $Array( @@ -689,12 +690,7 @@ function DefineProxyProperty(obj, p, attributes, should_throw) { // ES5 8.12.9. -function DefineOwnProperty(obj, p, desc, should_throw) { - if (%IsJSProxy(obj)) { - var attributes = FromGenericPropertyDescriptor(desc); - return DefineProxyProperty(obj, p, attributes, should_throw); - } - +function DefineObjectProperty(obj, p, desc, should_throw) { var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p)); // A false value here means that access checks failed. if (current_or_access === false) return void 0; @@ -858,6 +854,63 @@ function DefineOwnProperty(obj, p, desc, should_throw) { } +// ES5 section 15.4.5.1. +function DefineArrayProperty(obj, p, desc, should_throw) { + var length_desc = GetOwnProperty(obj, "length"); + var length = length_desc.getValue(); + + // Step 3 - Special handling for the length property. + if (p == "length") { + if (!desc.hasValue()) { + return DefineObjectProperty(obj, "length", desc, should_throw); + } + var new_length = ToUint32(desc.getValue()); + if (new_length != ToNumber(desc.getValue())) { + throw new $RangeError('defineProperty() array length out of range'); + } + // TODO(1756): There still are some uncovered corner cases left on how to + // handle changes to the length property of arrays. + return DefineObjectProperty(obj, "length", desc, should_throw); + } + + // Step 4 - Special handling for array index. + var index = ToUint32(p); + if (index == ToNumber(p) && index != 4294967295) { + if ((index >= length && !length_desc.isWritable()) || + !DefineObjectProperty(obj, p, desc, true)) { + if (should_throw) { + throw MakeTypeError("define_disallowed", [p]); + } else { + return; + } + } + if (index >= length) { + // TODO(mstarzinger): We should actually set the value of the property + // descriptor here and pass it to DefineObjectProperty(). Take a look at + // ES5 section 15.4.5.1, step 4.e.i and 4.e.ii for details. + obj.length = index + 1; + } + return true; + } + + // Step 5 - Fallback to default implementation. + return DefineObjectProperty(obj, p, desc, should_throw); +} + + +// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies. +function DefineOwnProperty(obj, p, desc, should_throw) { + if (%IsJSProxy(obj)) { + var attributes = FromGenericPropertyDescriptor(desc); + return DefineProxyProperty(obj, p, attributes, should_throw); + } else if (IS_ARRAY(obj)) { + return DefineArrayProperty(obj, p, desc, should_throw); + } else { + return DefineObjectProperty(obj, p, desc, should_throw); + } +} + + // ES5 section 15.2.3.2. function ObjectGetPrototypeOf(obj) { if (!IS_SPEC_OBJECT(obj)) @@ -1042,12 +1095,21 @@ function ProxyFix(obj) { throw MakeTypeError("handler_returned_undefined", [handler, "fix"]); } - if (IS_SPEC_FUNCTION(obj)) { + if (%IsJSFunctionProxy(obj)) { var callTrap = %GetCallTrap(obj); var constructTrap = %GetConstructTrap(obj); var code = DelegateCallAndConstruct(callTrap, constructTrap); %Fix(obj); // becomes a regular function %SetCode(obj, code); + // TODO(rossberg): What about length and other properties? Not specified. + // We just put in some half-reasonable defaults for now. + var prototype = new $Object(); + $Object.defineProperty(prototype, "constructor", + {value: obj, writable: true, enumerable: false, configrable: true}); + $Object.defineProperty(obj, "prototype", + {value: prototype, writable: true, enumerable: false, configrable: false}) + $Object.defineProperty(obj, "length", + {value: 0, writable: true, enumerable: false, configrable: false}); } else { %Fix(obj); } diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h index aada521e4..c854f0412 100644 --- a/deps/v8/src/v8utils.h +++ b/deps/v8/src/v8utils.h @@ -142,8 +142,14 @@ inline void CopyWords(T* dst, T* src, int num_words) { } -template <typename T> -static inline void MemsetPointer(T** dest, T* value, int counter) { +template <typename T, typename U> +static inline void MemsetPointer(T** dest, U* value, int counter) { +#ifdef DEBUG + T* a = NULL; + U* b = NULL; + a = b; // Fake assignment to check assignability. + USE(a); +#endif // DEBUG #if defined(V8_HOST_ARCH_IA32) #define STOS "stosl" #elif defined(V8_HOST_ARCH_X64) diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc index 971061b05..076cdc0a4 100644 --- a/deps/v8/src/variables.cc +++ b/deps/v8/src/variables.cc @@ -37,7 +37,7 @@ namespace internal { // ---------------------------------------------------------------------------- // Implementation Variable. -const char* Variable::Mode2String(Mode mode) { +const char* Variable::Mode2String(VariableMode mode) { switch (mode) { case VAR: return "VAR"; case CONST: return "CONST"; @@ -55,7 +55,7 @@ const char* Variable::Mode2String(Mode mode) { Variable::Variable(Scope* scope, Handle<String> name, - Mode mode, + VariableMode mode, bool is_valid_LHS, Kind kind) : scope_(scope), diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h index 56c8dabd3..612d8d33c 100644 --- a/deps/v8/src/variables.h +++ b/deps/v8/src/variables.h @@ -40,34 +40,6 @@ namespace internal { class Variable: public ZoneObject { public: - enum Mode { - // User declared variables: - VAR, // declared via 'var', and 'function' declarations - - CONST, // declared via 'const' declarations - - LET, // declared via 'let' declarations - - // Variables introduced by the compiler: - DYNAMIC, // always require dynamic lookup (we don't know - // the declaration) - - DYNAMIC_GLOBAL, // requires dynamic lookup, but we know that the - // variable is global unless it has been shadowed - // by an eval-introduced variable - - DYNAMIC_LOCAL, // requires dynamic lookup, but we know that the - // variable is local and where it is unless it - // has been shadowed by an eval-introduced - // variable - - INTERNAL, // like VAR, but not user-visible (may or may not - // be in a context) - - TEMPORARY // temporary variables (not user-visible), never - // in a context - }; - enum Kind { NORMAL, THIS, @@ -103,12 +75,12 @@ class Variable: public ZoneObject { Variable(Scope* scope, Handle<String> name, - Mode mode, + VariableMode mode, bool is_valid_lhs, Kind kind); // Printing support - static const char* Mode2String(Mode mode); + static const char* Mode2String(VariableMode mode); bool IsValidLeftHandSide() { return is_valid_LHS_; } @@ -119,7 +91,7 @@ class Variable: public ZoneObject { Scope* scope() const { return scope_; } Handle<String> name() const { return name_; } - Mode mode() const { return mode_; } + VariableMode mode() const { return mode_; } bool is_accessed_from_inner_scope() const { return is_accessed_from_inner_scope_; } @@ -177,7 +149,7 @@ class Variable: public ZoneObject { private: Scope* scope_; Handle<String> name_; - Mode mode_; + VariableMode mode_; Kind kind_; Location location_; int index_; diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index e16b63eb8..30402266a 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -33,8 +33,8 @@ // NOTE these macros are used by the SCons build script so their names // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 3 -#define MINOR_VERSION 6 -#define BUILD_NUMBER 4 +#define MINOR_VERSION 7 +#define BUILD_NUMBER 0 #define PATCH_LEVEL 0 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/src/win32-headers.h b/deps/v8/src/win32-headers.h index fca5c137e..0ee330668 100644 --- a/deps/v8/src/win32-headers.h +++ b/deps/v8/src/win32-headers.h @@ -75,6 +75,7 @@ // makes it impossible to have them elsewhere. #include <winsock2.h> #include <ws2tcpip.h> +#include <wspiapi.h> #include <process.h> // for _beginthreadex() #include <stdlib.h> #endif // V8_WIN32_HEADERS_FULL diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h index 8db54f075..10f0b886d 100644 --- a/deps/v8/src/x64/assembler-x64-inl.h +++ b/deps/v8/src/x64/assembler-x64-inl.h @@ -242,6 +242,11 @@ void RelocInfo::set_target_address(Address target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); if (IsCodeTarget(rmode_)) { Assembler::set_target_address_at(pc_, target); + Object* target_code = Code::GetCodeFromTargetAddress(target); + if (host() != NULL) { + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } } else { Memory::Address_at(pc_) = target; CPU::FlushICache(pc_, sizeof(Address)); @@ -279,8 +284,12 @@ Address* RelocInfo::target_reference_address() { void RelocInfo::set_target_object(Object* target) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); - *reinterpret_cast<Object**>(pc_) = target; + Memory::Object_at(pc_) = target; CPU::FlushICache(pc_, sizeof(Address)); + if (host() != NULL && target->IsHeapObject()) { + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), &Memory::Object_at(pc_), HeapObject::cast(target)); + } } @@ -306,6 +315,12 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) { Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; Memory::Address_at(pc_) = address; CPU::FlushICache(pc_, sizeof(Address)); + if (host() != NULL) { + // TODO(1550) We are passing NULL as a slot because cell can never be on + // evacuation candidate. + host()->GetHeap()->incremental_marking()->RecordWrite( + host(), NULL, cell); + } } @@ -344,6 +359,11 @@ void RelocInfo::set_call_address(Address target) { target; CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset, sizeof(Address)); + if (host() != NULL) { + Object* target_code = Code::GetCodeFromTargetAddress(target); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); + } } @@ -368,7 +388,7 @@ Object** RelocInfo::call_object_address() { void RelocInfo::Visit(ObjectVisitor* visitor) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - visitor->VisitPointer(target_object_address()); + visitor->VisitEmbeddedPointer(this); CPU::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { visitor->VisitCodeTarget(this); @@ -396,7 +416,7 @@ template<typename StaticVisitor> void RelocInfo::Visit(Heap* heap) { RelocInfo::Mode mode = rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { - StaticVisitor::VisitPointer(heap, target_object_address()); + StaticVisitor::VisitEmbeddedPointer(heap, this); CPU::FlushICache(pc_, sizeof(Address)); } else if (RelocInfo::IsCodeTarget(mode)) { StaticVisitor::VisitCodeTarget(heap, this); diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc index 745fdaeb8..1c4980ebc 100644 --- a/deps/v8/src/x64/assembler-x64.cc +++ b/deps/v8/src/x64/assembler-x64.cc @@ -47,7 +47,7 @@ uint64_t CpuFeatures::found_by_runtime_probing_ = 0; void CpuFeatures::Probe() { - ASSERT(!initialized_); + ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures); #ifdef DEBUG initialized_ = true; #endif @@ -2983,7 +2983,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { return; } } - RelocInfo rinfo(pc_, rmode, data); + RelocInfo rinfo(pc_, rmode, data, NULL); reloc_info_writer.Write(&rinfo); } diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index 2e373faac..0d870537f 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -215,6 +215,12 @@ struct XMMRegister { return names[index]; } + static XMMRegister from_code(int code) { + ASSERT(code >= 0); + ASSERT(code < kNumRegisters); + XMMRegister r = { code }; + return r; + } bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } bool is(XMMRegister reg) const { return code_ == reg.code_; } int code() const { @@ -735,6 +741,10 @@ class Assembler : public AssemblerBase { immediate_arithmetic_op_32(0x0, dst, src); } + void addl(const Operand& dst, Register src) { + arithmetic_op_32(0x01, src, dst); + } + void addq(Register dst, Register src) { arithmetic_op(0x03, dst, src); } @@ -1394,13 +1404,14 @@ class Assembler : public AssemblerBase { static const int kMaximalBufferSize = 512*MB; static const int kMinimalBufferSize = 4*KB; + byte byte_at(int pos) { return buffer_[pos]; } + void set_byte_at(int pos, byte value) { buffer_[pos] = value; } + protected: bool emit_debug_code() const { return emit_debug_code_; } private: byte* addr_at(int pos) { return buffer_ + pos; } - byte byte_at(int pos) { return buffer_[pos]; } - void set_byte_at(int pos, byte value) { buffer_[pos] = value; } uint32_t long_at(int pos) { return *reinterpret_cast<uint32_t*>(addr_at(pos)); } diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc index db06909da..79ddb1393 100644 --- a/deps/v8/src/x64/builtins-x64.cc +++ b/deps/v8/src/x64/builtins-x64.cc @@ -79,12 +79,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // -- rdi: constructor function // ----------------------------------- - Label non_function_call; + Label slow, non_function_call; // Check that function is not a smi. __ JumpIfSmi(rdi, &non_function_call); // Check that function is a JSFunction. __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); - __ j(not_equal, &non_function_call); + __ j(not_equal, &slow); // Jump to the function-specific construct stub. __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); @@ -94,10 +94,19 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // rdi: called object // rax: number of arguments + // rcx: object map + Label do_call; + __ bind(&slow); + __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE); + __ j(not_equal, &non_function_call); + __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); + __ jmp(&do_call); + __ bind(&non_function_call); + __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); + __ bind(&do_call); // Set expected number of arguments to zero (not changing rax). __ Set(rbx, 0); - __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ SetCallKind(rcx, CALL_AS_METHOD); __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), RelocInfo::CODE_TARGET); @@ -110,272 +119,278 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // Should never count constructions for api objects. ASSERT(!is_api_function || !count_constructions); - // Enter a construct frame. - __ EnterConstructFrame(); + // Enter a construct frame. + { + FrameScope scope(masm, StackFrame::CONSTRUCT); - // Store a smi-tagged arguments count on the stack. - __ Integer32ToSmi(rax, rax); - __ push(rax); + // Store a smi-tagged arguments count on the stack. + __ Integer32ToSmi(rax, rax); + __ push(rax); - // Push the function to invoke on the stack. - __ push(rdi); + // Push the function to invoke on the stack. + __ push(rdi); - // Try to allocate the object without transitioning into C code. If any of the - // preconditions is not met, the code bails out to the runtime call. - Label rt_call, allocated; - if (FLAG_inline_new) { - Label undo_allocation; + // Try to allocate the object without transitioning into C code. If any of + // the preconditions is not met, the code bails out to the runtime call. + Label rt_call, allocated; + if (FLAG_inline_new) { + Label undo_allocation; #ifdef ENABLE_DEBUGGER_SUPPORT - ExternalReference debug_step_in_fp = - ExternalReference::debug_step_in_fp_address(masm->isolate()); - __ movq(kScratchRegister, debug_step_in_fp); - __ cmpq(Operand(kScratchRegister, 0), Immediate(0)); - __ j(not_equal, &rt_call); + ExternalReference debug_step_in_fp = + ExternalReference::debug_step_in_fp_address(masm->isolate()); + __ movq(kScratchRegister, debug_step_in_fp); + __ cmpq(Operand(kScratchRegister, 0), Immediate(0)); + __ j(not_equal, &rt_call); #endif - // Verified that the constructor is a JSFunction. - // Load the initial map and verify that it is in fact a map. - // rdi: constructor - __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); - // Will both indicate a NULL and a Smi - STATIC_ASSERT(kSmiTag == 0); - __ JumpIfSmi(rax, &rt_call); - // rdi: constructor - // rax: initial map (if proven valid below) - __ CmpObjectType(rax, MAP_TYPE, rbx); - __ j(not_equal, &rt_call); - - // Check that the constructor is not constructing a JSFunction (see comments - // in Runtime_NewObject in runtime.cc). In which case the initial map's - // instance type would be JS_FUNCTION_TYPE. - // rdi: constructor - // rax: initial map - __ CmpInstanceType(rax, JS_FUNCTION_TYPE); - __ j(equal, &rt_call); - - if (count_constructions) { - Label allocate; - // Decrease generous allocation count. - __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); - __ decb(FieldOperand(rcx, SharedFunctionInfo::kConstructionCountOffset)); - __ j(not_zero, &allocate); + // Verified that the constructor is a JSFunction. + // Load the initial map and verify that it is in fact a map. + // rdi: constructor + __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi + ASSERT(kSmiTag == 0); + __ JumpIfSmi(rax, &rt_call); + // rdi: constructor + // rax: initial map (if proven valid below) + __ CmpObjectType(rax, MAP_TYPE, rbx); + __ j(not_equal, &rt_call); + + // Check that the constructor is not constructing a JSFunction (see + // comments in Runtime_NewObject in runtime.cc). In which case the + // initial map's instance type would be JS_FUNCTION_TYPE. + // rdi: constructor + // rax: initial map + __ CmpInstanceType(rax, JS_FUNCTION_TYPE); + __ j(equal, &rt_call); - __ push(rax); - __ push(rdi); + if (count_constructions) { + Label allocate; + // Decrease generous allocation count. + __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ decb(FieldOperand(rcx, + SharedFunctionInfo::kConstructionCountOffset)); + __ j(not_zero, &allocate); - __ push(rdi); // constructor - // The call will replace the stub, so the countdown is only done once. - __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); + __ push(rax); + __ push(rdi); - __ pop(rdi); - __ pop(rax); + __ push(rdi); // constructor + // The call will replace the stub, so the countdown is only done once. + __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); - __ bind(&allocate); - } + __ pop(rdi); + __ pop(rax); - // Now allocate the JSObject on the heap. - __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset)); - __ shl(rdi, Immediate(kPointerSizeLog2)); - // rdi: size of new object - __ AllocateInNewSpace(rdi, - rbx, - rdi, - no_reg, - &rt_call, - NO_ALLOCATION_FLAGS); - // Allocated the JSObject, now initialize the fields. - // rax: initial map - // rbx: JSObject (not HeapObject tagged - the actual address). - // rdi: start of next object - __ movq(Operand(rbx, JSObject::kMapOffset), rax); - __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex); - __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx); - __ movq(Operand(rbx, JSObject::kElementsOffset), rcx); - // Set extra fields in the newly allocated object. - // rax: initial map - // rbx: JSObject - // rdi: start of next object - { Label loop, entry; - // To allow for truncation. + __ bind(&allocate); + } + + // Now allocate the JSObject on the heap. + __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset)); + __ shl(rdi, Immediate(kPointerSizeLog2)); + // rdi: size of new object + __ AllocateInNewSpace(rdi, + rbx, + rdi, + no_reg, + &rt_call, + NO_ALLOCATION_FLAGS); + // Allocated the JSObject, now initialize the fields. + // rax: initial map + // rbx: JSObject (not HeapObject tagged - the actual address). + // rdi: start of next object + __ movq(Operand(rbx, JSObject::kMapOffset), rax); + __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex); + __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx); + __ movq(Operand(rbx, JSObject::kElementsOffset), rcx); + // Set extra fields in the newly allocated object. + // rax: initial map + // rbx: JSObject + // rdi: start of next object + __ lea(rcx, Operand(rbx, JSObject::kHeaderSize)); + __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); if (count_constructions) { + __ movzxbq(rsi, + FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset)); + __ lea(rsi, + Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize)); + // rsi: offset of first field after pre-allocated fields + if (FLAG_debug_code) { + __ cmpq(rsi, rdi); + __ Assert(less_equal, + "Unexpected number of pre-allocated property fields."); + } + __ InitializeFieldsWithFiller(rcx, rsi, rdx); __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex); - } else { + } + __ InitializeFieldsWithFiller(rcx, rdi, rdx); + + // Add the object tag to make the JSObject real, so that we can continue + // and jump into the continuation code at any time from now on. Any + // failures need to undo the allocation, so that the heap is in a + // consistent state and verifiable. + // rax: initial map + // rbx: JSObject + // rdi: start of next object + __ or_(rbx, Immediate(kHeapObjectTag)); + + // Check if a non-empty properties array is needed. + // Allocate and initialize a FixedArray if it is. + // rax: initial map + // rbx: JSObject + // rdi: start of next object + // Calculate total properties described map. + __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset)); + __ movzxbq(rcx, + FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset)); + __ addq(rdx, rcx); + // Calculate unused properties past the end of the in-object properties. + __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset)); + __ subq(rdx, rcx); + // Done if no extra properties are to be allocated. + __ j(zero, &allocated); + __ Assert(positive, "Property allocation count failed."); + + // Scale the number of elements by pointer size and add the header for + // FixedArrays to the start of the next object calculation from above. + // rbx: JSObject + // rdi: start of next object (will be start of FixedArray) + // rdx: number of elements in properties array + __ AllocateInNewSpace(FixedArray::kHeaderSize, + times_pointer_size, + rdx, + rdi, + rax, + no_reg, + &undo_allocation, + RESULT_CONTAINS_TOP); + + // Initialize the FixedArray. + // rbx: JSObject + // rdi: FixedArray + // rdx: number of elements + // rax: start of next object + __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex); + __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map + __ Integer32ToSmi(rdx, rdx); + __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length + + // Initialize the fields to undefined. + // rbx: JSObject + // rdi: FixedArray + // rax: start of next object + // rdx: number of elements + { Label loop, entry; __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); + __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize)); + __ jmp(&entry); + __ bind(&loop); + __ movq(Operand(rcx, 0), rdx); + __ addq(rcx, Immediate(kPointerSize)); + __ bind(&entry); + __ cmpq(rcx, rax); + __ j(below, &loop); } - __ lea(rcx, Operand(rbx, JSObject::kHeaderSize)); - __ jmp(&entry); - __ bind(&loop); - __ movq(Operand(rcx, 0), rdx); - __ addq(rcx, Immediate(kPointerSize)); - __ bind(&entry); - __ cmpq(rcx, rdi); - __ j(less, &loop); - } - // Add the object tag to make the JSObject real, so that we can continue and - // jump into the continuation code at any time from now on. Any failures - // need to undo the allocation, so that the heap is in a consistent state - // and verifiable. - // rax: initial map - // rbx: JSObject - // rdi: start of next object - __ or_(rbx, Immediate(kHeapObjectTag)); - - // Check if a non-empty properties array is needed. - // Allocate and initialize a FixedArray if it is. - // rax: initial map - // rbx: JSObject - // rdi: start of next object - // Calculate total properties described map. - __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset)); - __ movzxbq(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset)); - __ addq(rdx, rcx); - // Calculate unused properties past the end of the in-object properties. - __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset)); - __ subq(rdx, rcx); - // Done if no extra properties are to be allocated. - __ j(zero, &allocated); - __ Assert(positive, "Property allocation count failed."); - - // Scale the number of elements by pointer size and add the header for - // FixedArrays to the start of the next object calculation from above. - // rbx: JSObject - // rdi: start of next object (will be start of FixedArray) - // rdx: number of elements in properties array - __ AllocateInNewSpace(FixedArray::kHeaderSize, - times_pointer_size, - rdx, - rdi, - rax, - no_reg, - &undo_allocation, - RESULT_CONTAINS_TOP); - - // Initialize the FixedArray. - // rbx: JSObject - // rdi: FixedArray - // rdx: number of elements - // rax: start of next object - __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex); - __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map - __ Integer32ToSmi(rdx, rdx); - __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length - - // Initialize the fields to undefined. - // rbx: JSObject - // rdi: FixedArray - // rax: start of next object - // rdx: number of elements - { Label loop, entry; - __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); - __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize)); - __ jmp(&entry); - __ bind(&loop); - __ movq(Operand(rcx, 0), rdx); - __ addq(rcx, Immediate(kPointerSize)); - __ bind(&entry); - __ cmpq(rcx, rax); - __ j(below, &loop); - } + // Store the initialized FixedArray into the properties field of + // the JSObject + // rbx: JSObject + // rdi: FixedArray + __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag + __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi); - // Store the initialized FixedArray into the properties field of - // the JSObject - // rbx: JSObject - // rdi: FixedArray - __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag - __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi); + // Continue with JSObject being successfully allocated + // rbx: JSObject + __ jmp(&allocated); - // Continue with JSObject being successfully allocated - // rbx: JSObject - __ jmp(&allocated); + // Undo the setting of the new top so that the heap is verifiable. For + // example, the map's unused properties potentially do not match the + // allocated objects unused properties. + // rbx: JSObject (previous new top) + __ bind(&undo_allocation); + __ UndoAllocationInNewSpace(rbx); + } - // Undo the setting of the new top so that the heap is verifiable. For - // example, the map's unused properties potentially do not match the - // allocated objects unused properties. - // rbx: JSObject (previous new top) - __ bind(&undo_allocation); - __ UndoAllocationInNewSpace(rbx); - } + // Allocate the new receiver object using the runtime call. + // rdi: function (constructor) + __ bind(&rt_call); + // Must restore rdi (constructor) before calling runtime. + __ movq(rdi, Operand(rsp, 0)); + __ push(rdi); + __ CallRuntime(Runtime::kNewObject, 1); + __ movq(rbx, rax); // store result in rbx - // Allocate the new receiver object using the runtime call. - // rdi: function (constructor) - __ bind(&rt_call); - // Must restore rdi (constructor) before calling runtime. - __ movq(rdi, Operand(rsp, 0)); - __ push(rdi); - __ CallRuntime(Runtime::kNewObject, 1); - __ movq(rbx, rax); // store result in rbx + // New object allocated. + // rbx: newly allocated object + __ bind(&allocated); + // Retrieve the function from the stack. + __ pop(rdi); - // New object allocated. - // rbx: newly allocated object - __ bind(&allocated); - // Retrieve the function from the stack. - __ pop(rdi); - - // Retrieve smi-tagged arguments count from the stack. - __ movq(rax, Operand(rsp, 0)); - __ SmiToInteger32(rax, rax); + // Retrieve smi-tagged arguments count from the stack. + __ movq(rax, Operand(rsp, 0)); + __ SmiToInteger32(rax, rax); - // Push the allocated receiver to the stack. We need two copies - // because we may have to return the original one and the calling - // conventions dictate that the called function pops the receiver. - __ push(rbx); - __ push(rbx); + // Push the allocated receiver to the stack. We need two copies + // because we may have to return the original one and the calling + // conventions dictate that the called function pops the receiver. + __ push(rbx); + __ push(rbx); - // Setup pointer to last argument. - __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset)); + // Setup pointer to last argument. + __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset)); - // Copy arguments and receiver to the expression stack. - Label loop, entry; - __ movq(rcx, rax); - __ jmp(&entry); - __ bind(&loop); - __ push(Operand(rbx, rcx, times_pointer_size, 0)); - __ bind(&entry); - __ decq(rcx); - __ j(greater_equal, &loop); + // Copy arguments and receiver to the expression stack. + Label loop, entry; + __ movq(rcx, rax); + __ jmp(&entry); + __ bind(&loop); + __ push(Operand(rbx, rcx, times_pointer_size, 0)); + __ bind(&entry); + __ decq(rcx); + __ j(greater_equal, &loop); + + // Call the function. + if (is_api_function) { + __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); + Handle<Code> code = + masm->isolate()->builtins()->HandleApiCallConstruct(); + ParameterCount expected(0); + __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); + } else { + ParameterCount actual(rax); + __ InvokeFunction(rdi, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } - // Call the function. - if (is_api_function) { - __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); - Handle<Code> code = - masm->isolate()->builtins()->HandleApiCallConstruct(); - ParameterCount expected(0); - __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET, - CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); - } else { - ParameterCount actual(rax); - __ InvokeFunction(rdi, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); - } + // Restore context from the frame. + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); - // Restore context from the frame. - __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + // If the result is an object (in the ECMA sense), we should get rid + // of the receiver and use the result; see ECMA-262 section 13.2.2-7 + // on page 74. + Label use_receiver, exit; + // If the result is a smi, it is *not* an object in the ECMA sense. + __ JumpIfSmi(rax, &use_receiver); - // If the result is an object (in the ECMA sense), we should get rid - // of the receiver and use the result; see ECMA-262 section 13.2.2-7 - // on page 74. - Label use_receiver, exit; - // If the result is a smi, it is *not* an object in the ECMA sense. - __ JumpIfSmi(rax, &use_receiver); + // If the type of the result (stored in its map) is less than + // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx); + __ j(above_equal, &exit); - // If the type of the result (stored in its map) is less than - // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx); - __ j(above_equal, &exit); + // Throw away the result of the constructor invocation and use the + // on-stack receiver as the result. + __ bind(&use_receiver); + __ movq(rax, Operand(rsp, 0)); - // Throw away the result of the constructor invocation and use the - // on-stack receiver as the result. - __ bind(&use_receiver); - __ movq(rax, Operand(rsp, 0)); + // Restore the arguments count and leave the construct frame. + __ bind(&exit); + __ movq(rbx, Operand(rsp, kPointerSize)); // Get arguments count. - // Restore the arguments count and leave the construct frame. - __ bind(&exit); - __ movq(rbx, Operand(rsp, kPointerSize)); // get arguments count - __ LeaveConstructFrame(); + // Leave construct frame. + } // Remove caller arguments from the stack and return. __ pop(rcx); @@ -413,104 +428,108 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // - Object*** argv // (see Handle::Invoke in execution.cc). - // Platform specific argument handling. After this, the stack contains - // an internal frame and the pushed function and receiver, and - // register rax and rbx holds the argument count and argument array, - // while rdi holds the function pointer and rsi the context. -#ifdef _WIN64 - // MSVC parameters in: - // rcx : entry (ignored) - // rdx : function - // r8 : receiver - // r9 : argc - // [rsp+0x20] : argv - - // Clear the context before we push it when entering the JS frame. - __ Set(rsi, 0); - __ EnterInternalFrame(); - - // Load the function context into rsi. - __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset)); - - // Push the function and the receiver onto the stack. - __ push(rdx); - __ push(r8); + // Open a C++ scope for the FrameScope. + { + // Platform specific argument handling. After this, the stack contains + // an internal frame and the pushed function and receiver, and + // register rax and rbx holds the argument count and argument array, + // while rdi holds the function pointer and rsi the context. - // Load the number of arguments and setup pointer to the arguments. - __ movq(rax, r9); - // Load the previous frame pointer to access C argument on stack - __ movq(kScratchRegister, Operand(rbp, 0)); - __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset)); - // Load the function pointer into rdi. - __ movq(rdi, rdx); +#ifdef _WIN64 + // MSVC parameters in: + // rcx : entry (ignored) + // rdx : function + // r8 : receiver + // r9 : argc + // [rsp+0x20] : argv + + // Clear the context before we push it when entering the internal frame. + __ Set(rsi, 0); + // Enter an internal frame. + FrameScope scope(masm, StackFrame::INTERNAL); + + // Load the function context into rsi. + __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset)); + + // Push the function and the receiver onto the stack. + __ push(rdx); + __ push(r8); + + // Load the number of arguments and setup pointer to the arguments. + __ movq(rax, r9); + // Load the previous frame pointer to access C argument on stack + __ movq(kScratchRegister, Operand(rbp, 0)); + __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset)); + // Load the function pointer into rdi. + __ movq(rdi, rdx); #else // _WIN64 - // GCC parameters in: - // rdi : entry (ignored) - // rsi : function - // rdx : receiver - // rcx : argc - // r8 : argv - - __ movq(rdi, rsi); - // rdi : function - - // Clear the context before we push it when entering the JS frame. - __ Set(rsi, 0); - // Enter an internal frame. - __ EnterInternalFrame(); - - // Push the function and receiver and setup the context. - __ push(rdi); - __ push(rdx); - __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); + // GCC parameters in: + // rdi : entry (ignored) + // rsi : function + // rdx : receiver + // rcx : argc + // r8 : argv + + __ movq(rdi, rsi); + // rdi : function + + // Clear the context before we push it when entering the internal frame. + __ Set(rsi, 0); + // Enter an internal frame. + FrameScope scope(masm, StackFrame::INTERNAL); + + // Push the function and receiver and setup the context. + __ push(rdi); + __ push(rdx); + __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); - // Load the number of arguments and setup pointer to the arguments. - __ movq(rax, rcx); - __ movq(rbx, r8); + // Load the number of arguments and setup pointer to the arguments. + __ movq(rax, rcx); + __ movq(rbx, r8); #endif // _WIN64 - // Current stack contents: - // [rsp + 2 * kPointerSize ... ]: Internal frame - // [rsp + kPointerSize] : function - // [rsp] : receiver - // Current register contents: - // rax : argc - // rbx : argv - // rsi : context - // rdi : function - - // Copy arguments to the stack in a loop. - // Register rbx points to array of pointers to handle locations. - // Push the values of these handles. - Label loop, entry; - __ Set(rcx, 0); // Set loop variable to 0. - __ jmp(&entry); - __ bind(&loop); - __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0)); - __ push(Operand(kScratchRegister, 0)); // dereference handle - __ addq(rcx, Immediate(1)); - __ bind(&entry); - __ cmpq(rcx, rax); - __ j(not_equal, &loop); - - // Invoke the code. - if (is_construct) { - // Expects rdi to hold function pointer. - __ Call(masm->isolate()->builtins()->JSConstructCall(), - RelocInfo::CODE_TARGET); - } else { - ParameterCount actual(rax); - // Function must be in rdi. - __ InvokeFunction(rdi, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Current stack contents: + // [rsp + 2 * kPointerSize ... ]: Internal frame + // [rsp + kPointerSize] : function + // [rsp] : receiver + // Current register contents: + // rax : argc + // rbx : argv + // rsi : context + // rdi : function + + // Copy arguments to the stack in a loop. + // Register rbx points to array of pointers to handle locations. + // Push the values of these handles. + Label loop, entry; + __ Set(rcx, 0); // Set loop variable to 0. + __ jmp(&entry); + __ bind(&loop); + __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0)); + __ push(Operand(kScratchRegister, 0)); // dereference handle + __ addq(rcx, Immediate(1)); + __ bind(&entry); + __ cmpq(rcx, rax); + __ j(not_equal, &loop); + + // Invoke the code. + if (is_construct) { + // Expects rdi to hold function pointer. + __ Call(masm->isolate()->builtins()->JSConstructCall(), + RelocInfo::CODE_TARGET); + } else { + ParameterCount actual(rax); + // Function must be in rdi. + __ InvokeFunction(rdi, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + } + // Exit the internal frame. Notice that this also removes the empty + // context and the function left on the stack by the code + // invocation. } - // Exit the JS frame. Notice that this also removes the empty - // context and the function left on the stack by the code - // invocation. - __ LeaveInternalFrame(); // TODO(X64): Is argument correct? Is there a receiver to remove? - __ ret(1 * kPointerSize); // remove receiver + __ ret(1 * kPointerSize); // Remove receiver. } @@ -526,23 +545,24 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { void Builtins::Generate_LazyCompile(MacroAssembler* masm) { // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Push a copy of the function onto the stack. - __ push(rdi); - // Push call kind information. - __ push(rcx); + // Push a copy of the function onto the stack. + __ push(rdi); + // Push call kind information. + __ push(rcx); - __ push(rdi); // Function is also the parameter to the runtime call. - __ CallRuntime(Runtime::kLazyCompile, 1); + __ push(rdi); // Function is also the parameter to the runtime call. + __ CallRuntime(Runtime::kLazyCompile, 1); - // Restore call kind information. - __ pop(rcx); - // Restore receiver. - __ pop(rdi); + // Restore call kind information. + __ pop(rcx); + // Restore receiver. + __ pop(rdi); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down internal frame. + } // Do a tail-call of the compiled function. __ lea(rax, FieldOperand(rax, Code::kHeaderSize)); @@ -552,23 +572,24 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) { void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Push a copy of the function onto the stack. - __ push(rdi); - // Push call kind information. - __ push(rcx); + // Push a copy of the function onto the stack. + __ push(rdi); + // Push call kind information. + __ push(rcx); - __ push(rdi); // Function is also the parameter to the runtime call. - __ CallRuntime(Runtime::kLazyRecompile, 1); + __ push(rdi); // Function is also the parameter to the runtime call. + __ CallRuntime(Runtime::kLazyRecompile, 1); - // Restore call kind information. - __ pop(rcx); - // Restore function. - __ pop(rdi); + // Restore call kind information. + __ pop(rcx); + // Restore function. + __ pop(rdi); - // Tear down temporary frame. - __ LeaveInternalFrame(); + // Tear down internal frame. + } // Do a tail-call of the compiled function. __ lea(rax, FieldOperand(rax, Code::kHeaderSize)); @@ -579,14 +600,15 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Pass the deoptimization type to the runtime system. - __ Push(Smi::FromInt(static_cast<int>(type))); + // Pass the deoptimization type to the runtime system. + __ Push(Smi::FromInt(static_cast<int>(type))); - __ CallRuntime(Runtime::kNotifyDeoptimized, 1); - // Tear down temporary frame. - __ LeaveInternalFrame(); + __ CallRuntime(Runtime::kNotifyDeoptimized, 1); + // Tear down internal frame. + } // Get the full codegen state from the stack and untag it. __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize)); @@ -623,9 +645,10 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { // the registers without worrying about which of them contain // pointers. This seems a bit fragile. __ Pushad(); - __ EnterInternalFrame(); - __ CallRuntime(Runtime::kNotifyOSR, 0); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kNotifyOSR, 0); + } __ Popad(); __ ret(0); } @@ -695,18 +718,21 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ j(above_equal, &shift_arguments); __ bind(&convert_to_object); - __ EnterInternalFrame(); // In order to preserve argument count. - __ Integer32ToSmi(rax, rax); - __ push(rax); + { + // Enter an internal frame in order to preserve argument count. + FrameScope scope(masm, StackFrame::INTERNAL); + __ Integer32ToSmi(rax, rax); + __ push(rax); - __ push(rbx); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ movq(rbx, rax); - __ Set(rdx, 0); // indicate regular JS_FUNCTION + __ push(rbx); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ movq(rbx, rax); + __ Set(rdx, 0); // indicate regular JS_FUNCTION + + __ pop(rax); + __ SmiToInteger32(rax, rax); + } - __ pop(rax); - __ SmiToInteger32(rax, rax); - __ LeaveInternalFrame(); // Restore the function to rdi. __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize)); __ jmp(&patch_receiver, Label::kNear); @@ -807,160 +833,162 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // rsp+8: arguments // rsp+16: receiver ("this") // rsp+24: function - __ EnterInternalFrame(); - // Stack frame: - // rbp: Old base pointer - // rbp[1]: return address - // rbp[2]: function arguments - // rbp[3]: receiver - // rbp[4]: function - static const int kArgumentsOffset = 2 * kPointerSize; - static const int kReceiverOffset = 3 * kPointerSize; - static const int kFunctionOffset = 4 * kPointerSize; - - __ push(Operand(rbp, kFunctionOffset)); - __ push(Operand(rbp, kArgumentsOffset)); - __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); - - // Check the stack for overflow. We are not trying to catch - // interruptions (e.g. debug break and preemption) here, so the "real stack - // limit" is checked. - Label okay; - __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex); - __ movq(rcx, rsp); - // Make rcx the space we have left. The stack might already be overflowed - // here which will cause rcx to become negative. - __ subq(rcx, kScratchRegister); - // Make rdx the space we need for the array when it is unrolled onto the - // stack. - __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2); - // Check if the arguments will overflow the stack. - __ cmpq(rcx, rdx); - __ j(greater, &okay); // Signed comparison. - - // Out of stack space. - __ push(Operand(rbp, kFunctionOffset)); - __ push(rax); - __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); - __ bind(&okay); - // End of stack check. - - // Push current index and limit. - const int kLimitOffset = - StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize; - const int kIndexOffset = kLimitOffset - 1 * kPointerSize; - __ push(rax); // limit - __ push(Immediate(0)); // index - - // Get the receiver. - __ movq(rbx, Operand(rbp, kReceiverOffset)); - - // Check that the function is a JS function (otherwise it must be a proxy). - Label push_receiver; - __ movq(rdi, Operand(rbp, kFunctionOffset)); - __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); - __ j(not_equal, &push_receiver); + { + FrameScope frame_scope(masm, StackFrame::INTERNAL); + // Stack frame: + // rbp: Old base pointer + // rbp[1]: return address + // rbp[2]: function arguments + // rbp[3]: receiver + // rbp[4]: function + static const int kArgumentsOffset = 2 * kPointerSize; + static const int kReceiverOffset = 3 * kPointerSize; + static const int kFunctionOffset = 4 * kPointerSize; + + __ push(Operand(rbp, kFunctionOffset)); + __ push(Operand(rbp, kArgumentsOffset)); + __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); + + // Check the stack for overflow. We are not trying to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. + Label okay; + __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex); + __ movq(rcx, rsp); + // Make rcx the space we have left. The stack might already be overflowed + // here which will cause rcx to become negative. + __ subq(rcx, kScratchRegister); + // Make rdx the space we need for the array when it is unrolled onto the + // stack. + __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2); + // Check if the arguments will overflow the stack. + __ cmpq(rcx, rdx); + __ j(greater, &okay); // Signed comparison. + + // Out of stack space. + __ push(Operand(rbp, kFunctionOffset)); + __ push(rax); + __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); + __ bind(&okay); + // End of stack check. + + // Push current index and limit. + const int kLimitOffset = + StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize; + const int kIndexOffset = kLimitOffset - 1 * kPointerSize; + __ push(rax); // limit + __ push(Immediate(0)); // index + + // Get the receiver. + __ movq(rbx, Operand(rbp, kReceiverOffset)); + + // Check that the function is a JS function (otherwise it must be a proxy). + Label push_receiver; + __ movq(rdi, Operand(rbp, kFunctionOffset)); + __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); + __ j(not_equal, &push_receiver); + + // Change context eagerly to get the right global object if necessary. + __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); - // Change context eagerly to get the right global object if necessary. - __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); + // Do not transform the receiver for strict mode functions. + Label call_to_object, use_global_receiver; + __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset), + Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); + __ j(not_equal, &push_receiver); - // Do not transform the receiver for strict mode functions. - Label call_to_object, use_global_receiver; - __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); - __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset), - Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); - __ j(not_equal, &push_receiver); - - // Do not transform the receiver for natives. - __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset), - Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte)); - __ j(not_equal, &push_receiver); - - // Compute the receiver in non-strict mode. - __ JumpIfSmi(rbx, &call_to_object, Label::kNear); - __ CompareRoot(rbx, Heap::kNullValueRootIndex); - __ j(equal, &use_global_receiver); - __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); - __ j(equal, &use_global_receiver); - - // If given receiver is already a JavaScript object then there's no - // reason for converting it. - STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx); - __ j(above_equal, &push_receiver); - - // Convert the receiver to an object. - __ bind(&call_to_object); - __ push(rbx); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ movq(rbx, rax); - __ jmp(&push_receiver, Label::kNear); - - // Use the current global receiver object as the receiver. - __ bind(&use_global_receiver); - const int kGlobalOffset = - Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; - __ movq(rbx, FieldOperand(rsi, kGlobalOffset)); - __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset)); - __ movq(rbx, FieldOperand(rbx, kGlobalOffset)); - __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset)); - - // Push the receiver. - __ bind(&push_receiver); - __ push(rbx); - - // Copy all arguments from the array to the stack. - Label entry, loop; - __ movq(rax, Operand(rbp, kIndexOffset)); - __ jmp(&entry); - __ bind(&loop); - __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments - - // Use inline caching to speed up access to arguments. - Handle<Code> ic = - masm->isolate()->builtins()->KeyedLoadIC_Initialize(); - __ Call(ic, RelocInfo::CODE_TARGET); - // It is important that we do not have a test instruction after the - // call. A test instruction after the call is used to indicate that - // we have generated an inline version of the keyed load. In this - // case, we know that we are not generating a test instruction next. - - // Push the nth argument. - __ push(rax); + // Do not transform the receiver for natives. + __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset), + Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte)); + __ j(not_equal, &push_receiver); - // Update the index on the stack and in register rax. - __ movq(rax, Operand(rbp, kIndexOffset)); - __ SmiAddConstant(rax, rax, Smi::FromInt(1)); - __ movq(Operand(rbp, kIndexOffset), rax); + // Compute the receiver in non-strict mode. + __ JumpIfSmi(rbx, &call_to_object, Label::kNear); + __ CompareRoot(rbx, Heap::kNullValueRootIndex); + __ j(equal, &use_global_receiver); + __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); + __ j(equal, &use_global_receiver); - __ bind(&entry); - __ cmpq(rax, Operand(rbp, kLimitOffset)); - __ j(not_equal, &loop); + // If given receiver is already a JavaScript object then there's no + // reason for converting it. + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx); + __ j(above_equal, &push_receiver); - // Invoke the function. - Label call_proxy; - ParameterCount actual(rax); - __ SmiToInteger32(rax, rax); - __ movq(rdi, Operand(rbp, kFunctionOffset)); - __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); - __ j(not_equal, &call_proxy); - __ InvokeFunction(rdi, actual, CALL_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + // Convert the receiver to an object. + __ bind(&call_to_object); + __ push(rbx); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ movq(rbx, rax); + __ jmp(&push_receiver, Label::kNear); - __ LeaveInternalFrame(); - __ ret(3 * kPointerSize); // remove this, receiver, and arguments + // Use the current global receiver object as the receiver. + __ bind(&use_global_receiver); + const int kGlobalOffset = + Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; + __ movq(rbx, FieldOperand(rsi, kGlobalOffset)); + __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset)); + __ movq(rbx, FieldOperand(rbx, kGlobalOffset)); + __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset)); - // Invoke the function proxy. - __ bind(&call_proxy); - __ push(rdi); // add function proxy as last argument - __ incq(rax); - __ Set(rbx, 0); - __ SetCallKind(rcx, CALL_AS_METHOD); - __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY); - __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), - RelocInfo::CODE_TARGET); + // Push the receiver. + __ bind(&push_receiver); + __ push(rbx); + + // Copy all arguments from the array to the stack. + Label entry, loop; + __ movq(rax, Operand(rbp, kIndexOffset)); + __ jmp(&entry); + __ bind(&loop); + __ movq(rdx, Operand(rbp, kArgumentsOffset)); // load arguments + + // Use inline caching to speed up access to arguments. + Handle<Code> ic = + masm->isolate()->builtins()->KeyedLoadIC_Initialize(); + __ Call(ic, RelocInfo::CODE_TARGET); + // It is important that we do not have a test instruction after the + // call. A test instruction after the call is used to indicate that + // we have generated an inline version of the keyed load. In this + // case, we know that we are not generating a test instruction next. + + // Push the nth argument. + __ push(rax); + + // Update the index on the stack and in register rax. + __ movq(rax, Operand(rbp, kIndexOffset)); + __ SmiAddConstant(rax, rax, Smi::FromInt(1)); + __ movq(Operand(rbp, kIndexOffset), rax); - __ LeaveInternalFrame(); + __ bind(&entry); + __ cmpq(rax, Operand(rbp, kLimitOffset)); + __ j(not_equal, &loop); + + // Invoke the function. + Label call_proxy; + ParameterCount actual(rax); + __ SmiToInteger32(rax, rax); + __ movq(rdi, Operand(rbp, kFunctionOffset)); + __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); + __ j(not_equal, &call_proxy); + __ InvokeFunction(rdi, actual, CALL_FUNCTION, + NullCallWrapper(), CALL_AS_METHOD); + + frame_scope.GenerateLeaveFrame(); + __ ret(3 * kPointerSize); // remove this, receiver, and arguments + + // Invoke the function proxy. + __ bind(&call_proxy); + __ push(rdi); // add function proxy as last argument + __ incq(rax); + __ Set(rbx, 0); + __ SetCallKind(rcx, CALL_AS_METHOD); + __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY); + __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), + RelocInfo::CODE_TARGET); + + // Leave internal frame. + } __ ret(3 * kPointerSize); // remove this, receiver, and arguments } @@ -1520,10 +1548,11 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // Pass the function to optimize as the argument to the on-stack // replacement runtime function. - __ EnterInternalFrame(); - __ push(rax); - __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(rax); + __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); + } // If the result was -1 it means that we couldn't optimize the // function. Just return and continue in the unoptimized version. @@ -1541,7 +1570,9 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { StackCheckStub stub; __ TailCallStub(&stub); - __ Abort("Unreachable code: returned from tail call."); + if (FLAG_debug_code) { + __ Abort("Unreachable code: returned from tail call."); + } __ bind(&ok); __ ret(0); diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc index df4438b73..7d41ffe53 100644 --- a/deps/v8/src/x64/code-stubs-x64.cc +++ b/deps/v8/src/x64/code-stubs-x64.cc @@ -155,6 +155,70 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { } +void FastNewBlockContextStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [rsp + (1 * kPointerSize)]: function + // [rsp + (2 * kPointerSize)]: serialized scope info + + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + __ AllocateInNewSpace(FixedArray::SizeFor(length), + rax, rbx, rcx, &gc, TAG_OBJECT); + + // Get the function from the stack. + __ movq(rcx, Operand(rsp, 1 * kPointerSize)); + + // Get the serialized scope info from the stack. + __ movq(rbx, Operand(rsp, 2 * kPointerSize)); + + // Setup the object header. + __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex); + __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); + __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); + + // If this block context is nested in the global context we get a smi + // sentinel instead of a function. The block context should get the + // canonical empty function of the global context as its closure which + // we still have to look up. + Label after_sentinel; + __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear); + if (FLAG_debug_code) { + const char* message = "Expected 0 as a Smi sentinel"; + __ cmpq(rcx, Immediate(0)); + __ Assert(equal, message); + } + __ movq(rcx, GlobalObjectOperand()); + __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset)); + __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX)); + __ bind(&after_sentinel); + + // Setup the fixed slots. + __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx); + __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi); + __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx); + + // Copy the global object from the previous context. + __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX)); + __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx); + + // Initialize the rest of the slots to the hole value. + __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex); + for (int i = 0; i < slots_; i++) { + __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx); + } + + // Return and remove the on-stack parameter. + __ movq(rsi, rax); + __ ret(2 * kPointerSize); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); +} + + void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // Stack layout on entry: // @@ -233,6 +297,8 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { // The stub expects its argument on the stack and returns its result in tos_: // zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. Label patch; const Register argument = rax; const Register map = rdx; @@ -328,6 +394,25 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { } +void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { + __ PushCallerSaved(save_doubles_); + const int argument_count = 1; + __ PrepareCallCFunction(argument_count); +#ifdef _WIN64 + __ LoadAddress(rcx, ExternalReference::isolate_address()); +#else + __ LoadAddress(rdi, ExternalReference::isolate_address()); +#endif + + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::store_buffer_overflow_function(masm->isolate()), + argument_count); + __ PopCallerSaved(save_doubles_); + __ ret(0); +} + + void ToBooleanStub::CheckOddball(MacroAssembler* masm, Type type, Heap::RootListIndex value, @@ -622,12 +707,13 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - __ EnterInternalFrame(); - __ push(rax); - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ movq(rcx, rax); - __ pop(rax); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(rax); + __ CallRuntime(Runtime::kNumberAlloc, 0); + __ movq(rcx, rax); + __ pop(rax); + } __ bind(&heapnumber_allocated); // rcx: allocated 'empty' number @@ -751,6 +837,10 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { void BinaryOpStub::Generate(MacroAssembler* masm) { + // Explicitly allow generation of nested stubs. It is safe here because + // generation code does not use any raw pointers. + AllowStubCallsScope allow_stub_calls(masm, true); + switch (operands_type_) { case BinaryOpIC::UNINITIALIZED: GenerateTypeTransition(masm); @@ -1453,11 +1543,12 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ addq(rsp, Immediate(kDoubleSize)); // We return the value in xmm1 without adding it to the cache, but // we cause a scavenging GC so that future allocations will succeed. - __ EnterInternalFrame(); - // Allocate an unused object bigger than a HeapNumber. - __ Push(Smi::FromInt(2 * kDoubleSize)); - __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + // Allocate an unused object bigger than a HeapNumber. + __ Push(Smi::FromInt(2 * kDoubleSize)); + __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); + } __ Ret(); } @@ -1473,10 +1564,11 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ bind(&runtime_call); __ AllocateHeapNumber(rax, rdi, &skip_cache); __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1); - __ EnterInternalFrame(); - __ push(rax); - __ CallRuntime(RuntimeFunction(), 1); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(rax); + __ CallRuntime(RuntimeFunction(), 1); + } __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); __ Ret(); } @@ -2346,10 +2438,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { #ifdef V8_INTERPRETED_REGEXP __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); #else // V8_INTERPRETED_REGEXP - if (!FLAG_regexp_entry_native) { - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); - return; - } // Stack frame on entry. // rsp[0]: return address @@ -2670,12 +2758,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Store last subject and last input. __ movq(rax, Operand(rsp, kSubjectOffset)); __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax); - __ movq(rcx, rbx); - __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi); + __ RecordWriteField(rbx, + RegExpImpl::kLastSubjectOffset, + rax, + rdi, + kDontSaveFPRegs); __ movq(rax, Operand(rsp, kSubjectOffset)); __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax); - __ movq(rcx, rbx); - __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi); + __ RecordWriteField(rbx, + RegExpImpl::kLastInputOffset, + rax, + rdi, + kDontSaveFPRegs); // Get the static offsets vector filled by the native regexp code. __ LoadAddress(rcx, @@ -3231,6 +3325,22 @@ void StackCheckStub::Generate(MacroAssembler* masm) { } +void CallFunctionStub::FinishCode(Code* code) { + code->set_has_function_cache(false); +} + + +void CallFunctionStub::Clear(Heap* heap, Address address) { + UNREACHABLE(); +} + + +Object* CallFunctionStub::GetCachedValue(Address address) { + UNREACHABLE(); + return NULL; +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { Label slow, non_function; @@ -3319,6 +3429,35 @@ bool CEntryStub::NeedsImmovableCode() { } +bool CEntryStub::IsPregenerated() { +#ifdef _WIN64 + return result_size_ == 1; +#else + return true; +#endif +} + + +void CodeStub::GenerateStubsAheadOfTime() { + CEntryStub::GenerateAheadOfTime(); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); + // It is important that the store buffer overflow stubs are generated first. + RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); +} + + +void CodeStub::GenerateFPStubs() { +} + + +void CEntryStub::GenerateAheadOfTime() { + CEntryStub stub(1, kDontSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + CEntryStub save_doubles(1, kSaveFPRegs); + save_doubles.GetCode()->set_is_pregenerated(true); +} + + void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { // Throw exception in eax. __ Throw(rax); @@ -3757,6 +3896,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex); } else { + // Get return address and delta to inlined map check. __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); __ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax); @@ -3791,9 +3931,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); } else { // Store offset of true in the root array at the inline check site. - ASSERT((Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias - == 0xB0 - 0x100); - __ movl(rax, Immediate(0xB0)); // TrueValue is at -10 * kPointerSize. + int true_offset = 0x100 + + (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; + // Assert it is a 1-byte signed value. + ASSERT(true_offset >= 0 && true_offset < 0x100); + __ movl(rax, Immediate(true_offset)); __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); @@ -3812,9 +3954,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex); } else { // Store offset of false in the root array at the inline check site. - ASSERT((Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias - == 0xB8 - 0x100); - __ movl(rax, Immediate(0xB8)); // FalseValue is at -9 * kPointerSize. + int false_offset = 0x100 + + (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; + // Assert it is a 1-byte signed value. + ASSERT(false_offset >= 0 && false_offset < 0x100); + __ movl(rax, Immediate(false_offset)); __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); @@ -3936,22 +4080,23 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { Heap::kEmptyStringRootIndex); __ j(not_equal, &call_runtime_); // Get the first of the two strings and load its instance type. - __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset)); + ASSERT(!kScratchRegister.is(scratch_)); + __ movq(kScratchRegister, FieldOperand(object_, ConsString::kFirstOffset)); __ jmp(&assure_seq_string, Label::kNear); // SlicedString, unpack and add offset. __ bind(&sliced_string); __ addq(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset)); - __ movq(object_, FieldOperand(object_, SlicedString::kParentOffset)); + __ movq(kScratchRegister, FieldOperand(object_, SlicedString::kParentOffset)); __ bind(&assure_seq_string); - __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ movq(result_, FieldOperand(kScratchRegister, HeapObject::kMapOffset)); __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); // If the first cons component is also non-flat, then go to runtime. STATIC_ASSERT(kSeqStringTag == 0); __ testb(result_, Immediate(kStringRepresentationMask)); __ j(not_zero, &call_runtime_); - __ jmp(&flat_string); + __ movq(object_, kScratchRegister); // Check for 1-byte or 2-byte string. __ bind(&flat_string); @@ -5271,12 +5416,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { // Call the runtime system in a fresh internal frame. ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); - __ EnterInternalFrame(); - __ push(rdx); - __ push(rax); - __ Push(Smi::FromInt(op_)); - __ CallExternalReference(miss, 3); - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(rdx); + __ push(rax); + __ Push(Smi::FromInt(op_)); + __ CallExternalReference(miss, 3); + } // Compute the entry point of the rewritten stub. __ lea(rdi, FieldOperand(rax, Code::kHeaderSize)); @@ -5407,6 +5553,8 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { + // This stub overrides SometimesSetsUpAFrame() to return false. That means + // we cannot call anything that could cause a GC from this stub. // Stack frame on entry: // esp[0 * kPointerSize]: return address. // esp[1 * kPointerSize]: key's hash. @@ -5492,6 +5640,279 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { } +struct AheadOfTimeWriteBarrierStubList { + Register object, value, address; + RememberedSetAction action; +}; + + +struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { + // Used in RegExpExecStub. + { rbx, rax, rdi, EMIT_REMEMBERED_SET }, + // Used in CompileArrayPushCall. + { rbx, rcx, rdx, EMIT_REMEMBERED_SET }, + // Used in CompileStoreGlobal. + { rbx, rcx, rdx, OMIT_REMEMBERED_SET }, + // Used in StoreStubCompiler::CompileStoreField and + // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. + { rdx, rcx, rbx, EMIT_REMEMBERED_SET }, + // GenerateStoreField calls the stub with two different permutations of + // registers. This is the second. + { rbx, rcx, rdx, EMIT_REMEMBERED_SET }, + // StoreIC::GenerateNormal via GenerateDictionaryStore. + { rbx, r8, r9, EMIT_REMEMBERED_SET }, + // KeyedStoreIC::GenerateGeneric. + { rbx, rdx, rcx, EMIT_REMEMBERED_SET}, + // KeyedStoreStubCompiler::GenerateStoreFastElement. + { rdi, rdx, rcx, EMIT_REMEMBERED_SET}, + // Null termination. + { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET} +}; + + +bool RecordWriteStub::IsPregenerated() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + if (object_.is(entry->object) && + value_.is(entry->value) && + address_.is(entry->address) && + remembered_set_action_ == entry->action && + save_fp_regs_mode_ == kDontSaveFPRegs) { + return true; + } + } + return false; +} + + +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { + StoreBufferOverflowStub stub1(kDontSaveFPRegs); + stub1.GetCode()->set_is_pregenerated(true); + StoreBufferOverflowStub stub2(kSaveFPRegs); + stub2.GetCode()->set_is_pregenerated(true); +} + + +void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { + for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; + !entry->object.is(no_reg); + entry++) { + RecordWriteStub stub(entry->object, + entry->value, + entry->address, + entry->action, + kDontSaveFPRegs); + stub.GetCode()->set_is_pregenerated(true); + } +} + + +// Takes the input in 3 registers: address_ value_ and object_. A pointer to +// the value has just been written into the object, now this stub makes sure +// we keep the GC informed. The word in the object where the value has been +// written is in the address register. +void RecordWriteStub::Generate(MacroAssembler* masm) { + Label skip_to_incremental_noncompacting; + Label skip_to_incremental_compacting; + + // The first two instructions are generated with labels so as to get the + // offset fixed up correctly by the bind(Label*) call. We patch it back and + // forth between a compare instructions (a nop in this position) and the + // real branch when we start and stop incremental heap marking. + // See RecordWriteStub::Patch for details. + __ jmp(&skip_to_incremental_noncompacting, Label::kNear); + __ jmp(&skip_to_incremental_compacting, Label::kFar); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ ret(0); + } + + __ bind(&skip_to_incremental_noncompacting); + GenerateIncremental(masm, INCREMENTAL); + + __ bind(&skip_to_incremental_compacting); + GenerateIncremental(masm, INCREMENTAL_COMPACTION); + + // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. + // Will be checked in IncrementalMarking::ActivateGeneratedStub. + masm->set_byte_at(0, kTwoByteNopInstruction); + masm->set_byte_at(2, kFiveByteNopInstruction); +} + + +void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { + regs_.Save(masm); + + if (remembered_set_action_ == EMIT_REMEMBERED_SET) { + Label dont_need_remembered_set; + + __ movq(regs_.scratch0(), Operand(regs_.address(), 0)); + __ JumpIfNotInNewSpace(regs_.scratch0(), + regs_.scratch0(), + &dont_need_remembered_set); + + __ CheckPageFlag(regs_.object(), + regs_.scratch0(), + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + not_zero, + &dont_need_remembered_set); + + // First notify the incremental marker if necessary, then update the + // remembered set. + CheckNeedsToInformIncrementalMarker( + masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + + __ bind(&dont_need_remembered_set); + } + + CheckNeedsToInformIncrementalMarker( + masm, kReturnOnNoNeedToInformIncrementalMarker, mode); + InformIncrementalMarker(masm, mode); + regs_.Restore(masm); + __ ret(0); +} + + +void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { + regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); +#ifdef _WIN64 + Register arg3 = r8; + Register arg2 = rdx; + Register arg1 = rcx; +#else + Register arg3 = rdx; + Register arg2 = rsi; + Register arg1 = rdi; +#endif + Register address = + arg1.is(regs_.address()) ? kScratchRegister : regs_.address(); + ASSERT(!address.is(regs_.object())); + ASSERT(!address.is(arg1)); + __ Move(address, regs_.address()); + __ Move(arg1, regs_.object()); + if (mode == INCREMENTAL_COMPACTION) { + // TODO(gc) Can we just set address arg2 in the beginning? + __ Move(arg2, address); + } else { + ASSERT(mode == INCREMENTAL); + __ movq(arg2, Operand(address, 0)); + } + __ LoadAddress(arg3, ExternalReference::isolate_address()); + int argument_count = 3; + + AllowExternalCallThatCantCauseGC scope(masm); + __ PrepareCallCFunction(argument_count); + if (mode == INCREMENTAL_COMPACTION) { + __ CallCFunction( + ExternalReference::incremental_evacuation_record_write_function( + masm->isolate()), + argument_count); + } else { + ASSERT(mode == INCREMENTAL); + __ CallCFunction( + ExternalReference::incremental_marking_record_write_function( + masm->isolate()), + argument_count); + } + regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); +} + + +void RecordWriteStub::CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode) { + Label on_black; + Label need_incremental; + Label need_incremental_pop_object; + + // Let's look at the color of the object: If it is not black we don't have + // to inform the incremental marker. + __ JumpIfBlack(regs_.object(), + regs_.scratch0(), + regs_.scratch1(), + &on_black, + Label::kNear); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ ret(0); + } + + __ bind(&on_black); + + // Get the value from the slot. + __ movq(regs_.scratch0(), Operand(regs_.address(), 0)); + + if (mode == INCREMENTAL_COMPACTION) { + Label ensure_not_white; + + __ CheckPageFlag(regs_.scratch0(), // Contains value. + regs_.scratch1(), // Scratch. + MemoryChunk::kEvacuationCandidateMask, + zero, + &ensure_not_white, + Label::kNear); + + __ CheckPageFlag(regs_.object(), + regs_.scratch1(), // Scratch. + MemoryChunk::kSkipEvacuationSlotsRecordingMask, + zero, + &need_incremental); + + __ bind(&ensure_not_white); + } + + // We need an extra register for this, so we push the object register + // temporarily. + __ push(regs_.object()); + __ EnsureNotWhite(regs_.scratch0(), // The value. + regs_.scratch1(), // Scratch. + regs_.object(), // Scratch. + &need_incremental_pop_object, + Label::kNear); + __ pop(regs_.object()); + + regs_.Restore(masm); + if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { + __ RememberedSetHelper(object_, + address_, + value_, + save_fp_regs_mode_, + MacroAssembler::kReturnAtEnd); + } else { + __ ret(0); + } + + __ bind(&need_incremental_pop_object); + __ pop(regs_.object()); + + __ bind(&need_incremental); + + // Fall through when we need to inform the incremental marker. +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h index 4058118ee..698ba403c 100644 --- a/deps/v8/src/x64/code-stubs-x64.h +++ b/deps/v8/src/x64/code-stubs-x64.h @@ -59,6 +59,32 @@ class TranscendentalCacheStub: public CodeStub { }; +class StoreBufferOverflowStub: public CodeStub { + public: + explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) + : save_doubles_(save_fp) { } + + void Generate(MacroAssembler* masm); + + virtual bool IsPregenerated() { return true; } + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + private: + SaveFPRegsMode save_doubles_; + + Major MajorKey() { return StoreBufferOverflow; } + int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } +}; + + +// Flag that indicates how to generate code for the stub GenericBinaryOpStub. +enum GenericBinaryFlags { + NO_GENERIC_BINARY_FLAGS = 0, + NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. +}; + + class UnaryOpStub: public CodeStub { public: UnaryOpStub(Token::Value op, @@ -413,6 +439,8 @@ class StringDictionaryLookupStub: public CodeStub { Register r0, Register r1); + virtual bool SometimesSetsUpAFrame() { return false; } + private: static const int kInlinedProbes = 4; static const int kTotalProbes = 20; @@ -425,7 +453,7 @@ class StringDictionaryLookupStub: public CodeStub { StringDictionary::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return StringDictionaryNegativeLookup; } + Major MajorKey() { return StringDictionaryLookup; } int MinorKey() { return DictionaryBits::encode(dictionary_.code()) | @@ -446,6 +474,253 @@ class StringDictionaryLookupStub: public CodeStub { }; +class RecordWriteStub: public CodeStub { + public: + RecordWriteStub(Register object, + Register value, + Register address, + RememberedSetAction remembered_set_action, + SaveFPRegsMode fp_mode) + : object_(object), + value_(value), + address_(address), + remembered_set_action_(remembered_set_action), + save_fp_regs_mode_(fp_mode), + regs_(object, // An input reg. + address, // An input reg. + value) { // One scratch reg. + } + + enum Mode { + STORE_BUFFER_ONLY, + INCREMENTAL, + INCREMENTAL_COMPACTION + }; + + virtual bool IsPregenerated(); + static void GenerateFixedRegStubsAheadOfTime(); + virtual bool SometimesSetsUpAFrame() { return false; } + + static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8. + static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8. + + static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32. + static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32. + + static Mode GetMode(Code* stub) { + byte first_instruction = stub->instruction_start()[0]; + byte second_instruction = stub->instruction_start()[2]; + + if (first_instruction == kTwoByteJumpInstruction) { + return INCREMENTAL; + } + + ASSERT(first_instruction == kTwoByteNopInstruction); + + if (second_instruction == kFiveByteJumpInstruction) { + return INCREMENTAL_COMPACTION; + } + + ASSERT(second_instruction == kFiveByteNopInstruction); + + return STORE_BUFFER_ONLY; + } + + static void Patch(Code* stub, Mode mode) { + switch (mode) { + case STORE_BUFFER_ONLY: + ASSERT(GetMode(stub) == INCREMENTAL || + GetMode(stub) == INCREMENTAL_COMPACTION); + stub->instruction_start()[0] = kTwoByteNopInstruction; + stub->instruction_start()[2] = kFiveByteNopInstruction; + break; + case INCREMENTAL: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + stub->instruction_start()[0] = kTwoByteJumpInstruction; + break; + case INCREMENTAL_COMPACTION: + ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); + stub->instruction_start()[0] = kTwoByteNopInstruction; + stub->instruction_start()[2] = kFiveByteJumpInstruction; + break; + } + ASSERT(GetMode(stub) == mode); + CPU::FlushICache(stub->instruction_start(), 7); + } + + private: + // This is a helper class for freeing up 3 scratch registers, where the third + // is always rcx (needed for shift operations). The input is two registers + // that must be preserved and one scratch register provided by the caller. + class RegisterAllocation { + public: + RegisterAllocation(Register object, + Register address, + Register scratch0) + : object_orig_(object), + address_orig_(address), + scratch0_orig_(scratch0), + object_(object), + address_(address), + scratch0_(scratch0) { + ASSERT(!AreAliased(scratch0, object, address, no_reg)); + scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_); + if (scratch0.is(rcx)) { + scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_); + } + if (object.is(rcx)) { + object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_); + } + if (address.is(rcx)) { + address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_); + } + ASSERT(!AreAliased(scratch0_, object_, address_, rcx)); + } + + void Save(MacroAssembler* masm) { + ASSERT(!address_orig_.is(object_)); + ASSERT(object_.is(object_orig_) || address_.is(address_orig_)); + ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); + ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_)); + ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_)); + // We don't have to save scratch0_orig_ because it was given to us as + // a scratch register. But if we had to switch to a different reg then + // we should save the new scratch0_. + if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_); + if (!rcx.is(scratch0_orig_) && + !rcx.is(object_orig_) && + !rcx.is(address_orig_)) { + masm->push(rcx); + } + masm->push(scratch1_); + if (!address_.is(address_orig_)) { + masm->push(address_); + masm->movq(address_, address_orig_); + } + if (!object_.is(object_orig_)) { + masm->push(object_); + masm->movq(object_, object_orig_); + } + } + + void Restore(MacroAssembler* masm) { + // These will have been preserved the entire time, so we just need to move + // them back. Only in one case is the orig_ reg different from the plain + // one, since only one of them can alias with rcx. + if (!object_.is(object_orig_)) { + masm->movq(object_orig_, object_); + masm->pop(object_); + } + if (!address_.is(address_orig_)) { + masm->movq(address_orig_, address_); + masm->pop(address_); + } + masm->pop(scratch1_); + if (!rcx.is(scratch0_orig_) && + !rcx.is(object_orig_) && + !rcx.is(address_orig_)) { + masm->pop(rcx); + } + if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_); + } + + // If we have to call into C then we need to save and restore all caller- + // saved registers that were not already preserved. + + // The three scratch registers (incl. rcx) will be restored by other means + // so we don't bother pushing them here. Rbx, rbp and r12-15 are callee + // save and don't need to be preserved. + void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { + masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx); + } + + inline void RestoreCallerSaveRegisters(MacroAssembler*masm, + SaveFPRegsMode mode) { + masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx); + } + + inline Register object() { return object_; } + inline Register address() { return address_; } + inline Register scratch0() { return scratch0_; } + inline Register scratch1() { return scratch1_; } + + private: + Register object_orig_; + Register address_orig_; + Register scratch0_orig_; + Register object_; + Register address_; + Register scratch0_; + Register scratch1_; + // Third scratch register is always rcx. + + Register GetRegThatIsNotRcxOr(Register r1, + Register r2, + Register r3) { + for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { + Register candidate = Register::FromAllocationIndex(i); + if (candidate.is(rcx)) continue; + if (candidate.is(r1)) continue; + if (candidate.is(r2)) continue; + if (candidate.is(r3)) continue; + return candidate; + } + UNREACHABLE(); + return no_reg; + } + friend class RecordWriteStub; + }; + + enum OnNoNeedToInformIncrementalMarker { + kReturnOnNoNeedToInformIncrementalMarker, + kUpdateRememberedSetOnNoNeedToInformIncrementalMarker + }; + + void Generate(MacroAssembler* masm); + void GenerateIncremental(MacroAssembler* masm, Mode mode); + void CheckNeedsToInformIncrementalMarker( + MacroAssembler* masm, + OnNoNeedToInformIncrementalMarker on_no_need, + Mode mode); + void InformIncrementalMarker(MacroAssembler* masm, Mode mode); + + Major MajorKey() { return RecordWrite; } + + int MinorKey() { + return ObjectBits::encode(object_.code()) | + ValueBits::encode(value_.code()) | + AddressBits::encode(address_.code()) | + RememberedSetActionBits::encode(remembered_set_action_) | + SaveFPRegsModeBits::encode(save_fp_regs_mode_); + } + + bool MustBeInStubCache() { + // All stubs must be registered in the stub cache + // otherwise IncrementalMarker would not be able to find + // and patch it. + return true; + } + + void Activate(Code* code) { + code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); + } + + class ObjectBits: public BitField<int, 0, 4> {}; + class ValueBits: public BitField<int, 4, 4> {}; + class AddressBits: public BitField<int, 8, 4> {}; + class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {}; + class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {}; + + Register object_; + Register value_; + Register address_; + RememberedSetAction remembered_set_action_; + SaveFPRegsMode save_fp_regs_mode_; + Label slow_; + RegisterAllocation regs_; +}; + + } } // namespace v8::internal #endif // V8_X64_CODE_STUBS_X64_H_ diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index 507bbd44c..f6102c7c7 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -38,12 +38,16 @@ namespace internal { // Platform-specific RuntimeCallHelper functions. void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { - masm->EnterInternalFrame(); + masm->EnterFrame(StackFrame::INTERNAL); + ASSERT(!masm->has_frame()); + masm->set_has_frame(true); } void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { - masm->LeaveInternalFrame(); + masm->LeaveFrame(StackFrame::INTERNAL); + ASSERT(masm->has_frame()); + masm->set_has_frame(false); } diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc index 423e6f244..2149fc2d1 100644 --- a/deps/v8/src/x64/debug-x64.cc +++ b/deps/v8/src/x64/debug-x64.cc @@ -100,64 +100,65 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, RegList non_object_regs, bool convert_call_to_jmp) { // Enter an internal frame. - __ EnterInternalFrame(); - - // Store the registers containing live values on the expression stack to - // make sure that these are correctly updated during GC. Non object values - // are stored as as two smis causing it to be untouched by GC. - ASSERT((object_regs & ~kJSCallerSaved) == 0); - ASSERT((non_object_regs & ~kJSCallerSaved) == 0); - ASSERT((object_regs & non_object_regs) == 0); - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - ASSERT(!reg.is(kScratchRegister)); - if ((object_regs & (1 << r)) != 0) { - __ push(reg); + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Store the registers containing live values on the expression stack to + // make sure that these are correctly updated during GC. Non object values + // are stored as as two smis causing it to be untouched by GC. + ASSERT((object_regs & ~kJSCallerSaved) == 0); + ASSERT((non_object_regs & ~kJSCallerSaved) == 0); + ASSERT((object_regs & non_object_regs) == 0); + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + ASSERT(!reg.is(kScratchRegister)); + if ((object_regs & (1 << r)) != 0) { + __ push(reg); + } + // Store the 64-bit value as two smis. + if ((non_object_regs & (1 << r)) != 0) { + __ movq(kScratchRegister, reg); + __ Integer32ToSmi(reg, reg); + __ push(reg); + __ sar(kScratchRegister, Immediate(32)); + __ Integer32ToSmi(kScratchRegister, kScratchRegister); + __ push(kScratchRegister); + } } - // Store the 64-bit value as two smis. - if ((non_object_regs & (1 << r)) != 0) { - __ movq(kScratchRegister, reg); - __ Integer32ToSmi(reg, reg); - __ push(reg); - __ sar(kScratchRegister, Immediate(32)); - __ Integer32ToSmi(kScratchRegister, kScratchRegister); - __ push(kScratchRegister); - } - } #ifdef DEBUG - __ RecordComment("// Calling from debug break to runtime - come in - over"); + __ RecordComment("// Calling from debug break to runtime - come in - over"); #endif - __ Set(rax, 0); // No arguments (argc == 0). - __ movq(rbx, ExternalReference::debug_break(masm->isolate())); - - CEntryStub ceb(1); - __ CallStub(&ceb); - - // Restore the register values from the expression stack. - for (int i = kNumJSCallerSaved - 1; i >= 0; i--) { - int r = JSCallerSavedCode(i); - Register reg = { r }; - if (FLAG_debug_code) { - __ Set(reg, kDebugZapValue); - } - if ((object_regs & (1 << r)) != 0) { - __ pop(reg); + __ Set(rax, 0); // No arguments (argc == 0). + __ movq(rbx, ExternalReference::debug_break(masm->isolate())); + + CEntryStub ceb(1); + __ CallStub(&ceb); + + // Restore the register values from the expression stack. + for (int i = kNumJSCallerSaved - 1; i >= 0; i--) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if (FLAG_debug_code) { + __ Set(reg, kDebugZapValue); + } + if ((object_regs & (1 << r)) != 0) { + __ pop(reg); + } + // Reconstruct the 64-bit value from two smis. + if ((non_object_regs & (1 << r)) != 0) { + __ pop(kScratchRegister); + __ SmiToInteger32(kScratchRegister, kScratchRegister); + __ shl(kScratchRegister, Immediate(32)); + __ pop(reg); + __ SmiToInteger32(reg, reg); + __ or_(reg, kScratchRegister); + } } - // Reconstruct the 64-bit value from two smis. - if ((non_object_regs & (1 << r)) != 0) { - __ pop(kScratchRegister); - __ SmiToInteger32(kScratchRegister, kScratchRegister); - __ shl(kScratchRegister, Immediate(32)); - __ pop(reg); - __ SmiToInteger32(reg, reg); - __ or_(reg, kScratchRegister); - } - } - // Get rid of the internal frame. - __ LeaveInternalFrame(); + // Get rid of the internal frame. + } // If this call did not replace a call but patched other code then there will // be an unwanted return address left on the stack. Here we get rid of that. diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc index b52e65932..b7e334ee7 100644 --- a/deps/v8/src/x64/deoptimizer-x64.cc +++ b/deps/v8/src/x64/deoptimizer-x64.cc @@ -197,13 +197,19 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { // Destroy the code which is not supposed to run again. ZapCodeRange(previous_pc, jump_table_address); #endif + Isolate* isolate = code->GetIsolate(); // Add the deoptimizing code to the list. DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); - DeoptimizerData* data = code->GetIsolate()->deoptimizer_data(); + DeoptimizerData* data = isolate->deoptimizer_data(); node->set_next(data->deoptimizing_code_list_); data->deoptimizing_code_list_ = node; + // We might be in the middle of incremental marking with compaction. + // Tell collector to treat this code object in a special way and + // ignore all slots that might have been recorded on it. + isolate->heap()->mark_compact_collector()->InvalidateCode(code); + // Set the code for the function to non-optimized version. function->ReplaceCode(function->shared()->code()); @@ -220,7 +226,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } -void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, +void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, + Address pc_after, Code* check_code, Code* replacement_code) { Address call_target_address = pc_after - kIntSize; @@ -250,6 +257,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after, *(call_target_address - 2) = 0x90; // nop Assembler::set_target_address_at(call_target_address, replacement_code->entry()); + + RelocInfo rinfo(call_target_address, + RelocInfo::CODE_TARGET, + 0, + unoptimized_code); + unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode( + unoptimized_code, &rinfo, replacement_code); } @@ -268,6 +282,8 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after, *(call_target_address - 2) = 0x07; // offset Assembler::set_target_address_at(call_target_address, check_code->entry()); + check_code->GetHeap()->incremental_marking()-> + RecordCodeTargetPatch(call_target_address, check_code); } @@ -713,7 +729,10 @@ void Deoptimizer::EntryGenerator::Generate() { Isolate* isolate = masm()->isolate(); - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + } // Preserve deoptimizer object in register rax and get the input // frame descriptor pointer. __ movq(rbx, Operand(rax, Deoptimizer::input_offset())); @@ -759,8 +778,11 @@ void Deoptimizer::EntryGenerator::Generate() { __ PrepareCallCFunction(2); __ movq(arg1, rax); __ LoadAddress(arg2, ExternalReference::isolate_address()); - __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 2); + { + AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction( + ExternalReference::compute_output_frames_function(isolate), 2); + } __ pop(rax); // Replace the current frame with the output frames. diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc index bd3e76902..b5c5fc5e7 100644 --- a/deps/v8/src/x64/full-codegen-x64.cc +++ b/deps/v8/src/x64/full-codegen-x64.cc @@ -147,6 +147,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { __ bind(&ok); } + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done below). + FrameScope frame_scope(masm_, StackFrame::MANUAL); + __ push(rbp); // Caller's frame pointer. __ movq(rbp, rsp); __ push(rsi); // Callee's context. @@ -195,11 +200,9 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // Store it in the context. int context_offset = Context::SlotOffset(var->index()); __ movq(Operand(rsi, context_offset), rax); - // Update the write barrier. This clobbers all involved - // registers, so we have use a third register to avoid - // clobbering rsi. - __ movq(rcx, rsi); - __ RecordWrite(rcx, context_offset, rax, rbx); + // Update the write barrier. This clobbers rax and rbx. + __ RecordWriteContextSlot( + rsi, context_offset, rax, rbx, kDontSaveFPRegs); } } } @@ -251,7 +254,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // constant. if (scope()->is_function_scope() && scope()->function() != NULL) { int ignored = 0; - EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored); + EmitDeclaration(scope()->function(), CONST, NULL, &ignored); } VisitDeclarations(scope()->declarations()); } @@ -638,10 +641,11 @@ void FullCodeGenerator::SetVar(Variable* var, ASSERT(!scratch1.is(src)); MemOperand location = VarOperand(var, scratch0); __ movq(location, src); + // Emit the write barrier code if the location is in the heap. if (var->IsContextSlot()) { int offset = Context::SlotOffset(var->index()); - __ RecordWrite(scratch0, offset, src, scratch1); + __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs); } } @@ -673,7 +677,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state, void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, - Variable::Mode mode, + VariableMode mode, FunctionLiteral* function, int* global_count) { // If it was not possible to allocate the variable at compile time, we @@ -691,7 +695,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, Comment cmnt(masm_, "[ Declaration"); VisitForAccumulatorValue(function); __ movq(StackOperand(variable), result_register()); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { Comment cmnt(masm_, "[ Declaration"); __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); __ movq(StackOperand(variable), kScratchRegister); @@ -715,10 +719,16 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, VisitForAccumulatorValue(function); __ movq(ContextOperand(rsi, variable->index()), result_register()); int offset = Context::SlotOffset(variable->index()); - __ movq(rbx, rsi); - __ RecordWrite(rbx, offset, result_register(), rcx); + // We know that we have written a function, which is not a smi. + __ RecordWriteContextSlot(rsi, + offset, + result_register(), + rcx, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); PrepareForBailoutForId(proxy->id(), NO_REGISTERS); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { Comment cmnt(masm_, "[ Declaration"); __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); __ movq(ContextOperand(rsi, variable->index()), kScratchRegister); @@ -732,10 +742,8 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, __ push(rsi); __ Push(variable->name()); // Declaration nodes are always introduced in one of three modes. - ASSERT(mode == Variable::VAR || - mode == Variable::CONST || - mode == Variable::LET); - PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE; + ASSERT(mode == VAR || mode == CONST || mode == LET); + PropertyAttributes attr = (mode == CONST) ? READ_ONLY : NONE; __ Push(Smi::FromInt(attr)); // Push initial value, if any. // Note: For variables we must not push an initial value (such as @@ -743,7 +751,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy, // must not destroy the current value. if (function != NULL) { VisitForStackValue(function); - } else if (mode == Variable::CONST || mode == Variable::LET) { + } else if (mode == CONST || mode == LET) { __ PushRoot(Heap::kTheHoleValueRootIndex); } else { __ Push(Smi::FromInt(0)); // Indicates no initial value. @@ -1168,16 +1176,21 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, // introducing variables. In those cases, we do not want to // perform a runtime call for all variables in the scope // containing the eval. - if (var->mode() == Variable::DYNAMIC_GLOBAL) { + if (var->mode() == DYNAMIC_GLOBAL) { EmitLoadGlobalCheckExtensions(var, typeof_state, slow); __ jmp(done); - } else if (var->mode() == Variable::DYNAMIC_LOCAL) { + } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ movq(rax, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == Variable::CONST) { + if (local->mode() == CONST || local->mode() == LET) { __ CompareRoot(rax, Heap::kTheHoleValueRootIndex); __ j(not_equal, done); - __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); + if (local->mode() == CONST) { + __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); + } else { // LET + __ Push(var->name()); + __ CallRuntime(Runtime::kThrowReferenceError, 1); + } } __ jmp(done); } @@ -1208,7 +1221,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { case Variable::LOCAL: case Variable::CONTEXT: { Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot"); - if (var->mode() != Variable::LET && var->mode() != Variable::CONST) { + if (var->mode() != LET && var->mode() != CONST) { context()->Plug(var); } else { // Let and const need a read barrier. @@ -1216,10 +1229,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { GetVar(rax, var); __ CompareRoot(rax, Heap::kTheHoleValueRootIndex); __ j(not_equal, &done, Label::kNear); - if (var->mode() == Variable::LET) { + if (var->mode() == LET) { __ Push(var->name()); __ CallRuntime(Runtime::kThrowReferenceError, 1); - } else { // Variable::CONST + } else { // CONST __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); } __ bind(&done); @@ -1445,13 +1458,23 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { VisitForAccumulatorValue(subexpr); // Store the subexpression value in the array's elements. - __ movq(rbx, Operand(rsp, 0)); // Copy of array literal. - __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset)); + __ movq(r8, Operand(rsp, 0)); // Copy of array literal. + __ movq(rbx, FieldOperand(r8, JSObject::kElementsOffset)); int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ movq(FieldOperand(rbx, offset), result_register()); + Label no_map_change; + __ JumpIfSmi(result_register(), &no_map_change); // Update the write barrier for the array store. - __ RecordWrite(rbx, offset, result_register(), rcx); + __ RecordWriteField(rbx, offset, result_register(), rcx, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset)); + __ CheckFastSmiOnlyElements(rdi, &no_map_change, Label::kNear); + __ push(r8); + __ CallRuntime(Runtime::kNonSmiElementStored, 1); + __ bind(&no_map_change); PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS); } @@ -1756,7 +1779,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); } - } else if (var->mode() == Variable::LET && op != Token::INIT_LET) { + } else if (var->mode() == LET && op != Token::INIT_LET) { // Non-initializing assignment to let variable needs a write barrier. if (var->IsLookupSlot()) { __ push(rax); // Value. @@ -1777,11 +1800,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ movq(location, rax); if (var->IsContextSlot()) { __ movq(rdx, rax); - __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx); + __ RecordWriteContextSlot( + rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs); } } - } else if (var->mode() != Variable::CONST) { + } else if (var->mode() != CONST) { // Assignment to var or initializing assignment to let. if (var->IsStackAllocated() || var->IsContextSlot()) { MemOperand location = VarOperand(var, rcx); @@ -1795,7 +1819,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, __ movq(location, rax); if (var->IsContextSlot()) { __ movq(rdx, rax); - __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx); + __ RecordWriteContextSlot( + rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs); } } else { ASSERT(var->IsLookupSlot()); @@ -2004,10 +2029,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, // Push the strict mode flag. In harmony mode every eval call // is a strict mode eval call. - StrictModeFlag strict_mode = strict_mode_flag(); - if (FLAG_harmony_block_scoping) { - strict_mode = kStrictMode; - } + StrictModeFlag strict_mode = + FLAG_harmony_scoping ? kStrictMode : strict_mode_flag(); __ Push(Smi::FromInt(strict_mode)); __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP @@ -2049,7 +2072,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { // context lookup in the runtime system. Label done; Variable* var = proxy->var(); - if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) { + if (!var->IsUnallocated() && var->mode() == DYNAMIC_GLOBAL) { Label slow; EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow); // Push the function and resolve eval. @@ -2545,20 +2568,24 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) { // Check that the object is a JS object but take special care of JS // functions to make sure they have 'Function' as their class. + // Assume that there are only two callable types, and one of them is at + // either end of the type range for JS object types. Saves extra comparisons. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax); // Map is now in rax. __ j(below, &null); - - // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and - // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after - // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == - LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); - __ CmpInstanceType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE); - __ j(above_equal, &function); - - // Check if the constructor in the map is a function. + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + __ j(equal, &function); + + __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + __ j(equal, &function); + // Assume that there is no larger type. + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); + + // Check if the constructor in the map is a JS function. __ movq(rax, FieldOperand(rax, Map::kConstructorOffset)); __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx); __ j(not_equal, &non_function_constructor); @@ -2726,7 +2753,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) { // Update the write barrier. Save the value as it will be // overwritten by the write barrier code and is needed afterward. __ movq(rdx, rax); - __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx); + __ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs); __ bind(&done); context()->Plug(rax); @@ -3010,14 +3037,33 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) { __ movq(Operand(index_2, 0), object); __ movq(Operand(index_1, 0), temp); - Label new_space; - __ InNewSpace(elements, temp, equal, &new_space); - - __ movq(object, elements); - __ RecordWriteHelper(object, index_1, temp); - __ RecordWriteHelper(elements, index_2, temp); + Label no_remembered_set; + __ CheckPageFlag(elements, + temp, + 1 << MemoryChunk::SCAN_ON_SCAVENGE, + not_zero, + &no_remembered_set, + Label::kNear); + // Possible optimization: do a check that both values are Smis + // (or them and test against Smi mask.) + + // We are swapping two objects in an array and the incremental marker never + // pauses in the middle of scanning a single object. Therefore the + // incremental marker is not disturbed, so we don't need to call the + // RecordWrite stub that notifies the incremental marker. + __ RememberedSetHelper(elements, + index_1, + temp, + kDontSaveFPRegs, + MacroAssembler::kFallThroughAtEnd); + __ RememberedSetHelper(elements, + index_2, + temp, + kDontSaveFPRegs, + MacroAssembler::kFallThroughAtEnd); + + __ bind(&no_remembered_set); - __ bind(&new_space); // We are done. Drop elements from the stack, and return undefined. __ addq(rsp, Immediate(3 * kPointerSize)); __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); @@ -3833,10 +3879,14 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, - Handle<String> check, - Label* if_true, - Label* if_false, - Label* fall_through) { + Handle<String> check) { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + { AccumulatorValueContext context(this); VisitForTypeofValue(expr); } @@ -3875,9 +3925,11 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, Split(not_zero, if_true, if_false, fall_through); } else if (check->Equals(isolate()->heap()->function_symbol())) { __ JumpIfSmi(rax, if_false); - STATIC_ASSERT(LAST_CALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE); - __ CmpObjectType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, rdx); - Split(above_equal, if_true, if_false, fall_through); + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx); + __ j(equal, if_true); + __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE); + Split(equal, if_true, if_false, fall_through); } else if (check->Equals(isolate()->heap()->object_symbol())) { __ JumpIfSmi(rax, if_false); if (!FLAG_harmony_typeof) { @@ -3895,18 +3947,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } else { if (if_false != fall_through) __ jmp(if_false); } -} - - -void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr, - Label* if_true, - Label* if_false, - Label* fall_through) { - VisitForAccumulatorValue(expr); - PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - - __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); - Split(equal, if_true, if_false, fall_through); + context()->Plug(if_true, if_false); } @@ -3914,6 +3955,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Comment cmnt(masm_, "[ CompareOperation"); SetSourcePosition(expr->position()); + // First we try a fast inlined version of the compare when one of + // the operands is a literal. + if (TryLiteralCompare(expr)) return; + // Always perform the comparison for its control flow. Pack the result // into the expression's context after the comparison is performed. Label materialize_true, materialize_false; @@ -3923,13 +3968,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - // First we try a fast inlined version of the compare when one of - // the operands is a literal. - if (TryLiteralCompare(expr, if_true, if_false, fall_through)) { - context()->Plug(if_true, if_false); - return; - } - Token::Value op = expr->op(); VisitForStackValue(expr->left()); switch (op) { @@ -3957,7 +3995,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Condition cc = no_condition; switch (op) { case Token::EQ_STRICT: - // Fall through. case Token::EQ: cc = equal; __ pop(rdx); @@ -4018,8 +4055,9 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } -void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { - Comment cmnt(masm_, "[ CompareToNull"); +void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, + Expression* sub_expr, + NilValue nil) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; @@ -4027,14 +4065,20 @@ void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { context()->PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false, &fall_through); - VisitForAccumulatorValue(expr->expression()); + VisitForAccumulatorValue(sub_expr); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - __ CompareRoot(rax, Heap::kNullValueRootIndex); - if (expr->is_strict()) { + Heap::RootListIndex nil_value = nil == kNullValue ? + Heap::kNullValueRootIndex : + Heap::kUndefinedValueRootIndex; + __ CompareRoot(rax, nil_value); + if (expr->op() == Token::EQ_STRICT) { Split(equal, if_true, if_false, fall_through); } else { + Heap::RootListIndex other_nil_value = nil == kNullValue ? + Heap::kUndefinedValueRootIndex : + Heap::kNullValueRootIndex; __ j(equal, if_true); - __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); + __ CompareRoot(rax, other_nil_value); __ j(equal, if_true); __ JumpIfSmi(rax, if_false); // It can be an undetectable object. diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc index 9d55594dc..27a96674c 100644 --- a/deps/v8/src/x64/ic-x64.cc +++ b/deps/v8/src/x64/ic-x64.cc @@ -221,7 +221,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Update write barrier. Make sure not to clobber the value. __ movq(scratch0, value); - __ RecordWrite(elements, scratch1, scratch0); + __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs); } @@ -606,45 +606,40 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - Label slow, slow_with_tagged_index, fast, array, extra; + Label slow, slow_with_tagged_index, fast, array, extra, check_extra_double; + Label fast_object_with_map_check, fast_object_without_map_check; + Label fast_double_with_map_check, fast_double_without_map_check; // Check that the object isn't a smi. __ JumpIfSmi(rdx, &slow_with_tagged_index); // Get the map from the receiver. - __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); + __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); // Check that the receiver does not require access checks. We need // to do this because this generic stub does not perform map checks. - __ testb(FieldOperand(rbx, Map::kBitFieldOffset), + __ testb(FieldOperand(r9, Map::kBitFieldOffset), Immediate(1 << Map::kIsAccessCheckNeeded)); __ j(not_zero, &slow_with_tagged_index); // Check that the key is a smi. __ JumpIfNotSmi(rcx, &slow_with_tagged_index); __ SmiToInteger32(rcx, rcx); - __ CmpInstanceType(rbx, JS_ARRAY_TYPE); + __ CmpInstanceType(r9, JS_ARRAY_TYPE); __ j(equal, &array); // Check that the object is some kind of JSObject. - __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE); + __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE); __ j(below, &slow); - __ CmpInstanceType(rbx, JS_PROXY_TYPE); - __ j(equal, &slow); - __ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE); - __ j(equal, &slow); // Object case: Check key against length in the elements array. // rax: value // rdx: JSObject // rcx: index __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); - // Check that the object is in fast mode and writable. - __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), - Heap::kFixedArrayMapRootIndex); - __ j(not_equal, &slow); + // Check array bounds. __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx); // rax: value // rbx: FixedArray // rcx: index - __ j(above, &fast); + __ j(above, &fast_object_with_map_check); // Slow case: call runtime. __ bind(&slow); @@ -666,9 +661,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx); __ j(below_equal, &slow); // Increment index to get new length. + __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset)); + __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex); + __ j(not_equal, &check_extra_double); __ leal(rdi, Operand(rcx, 1)); __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi); - __ jmp(&fast); + __ jmp(&fast_object_without_map_check); + + __ bind(&check_extra_double); + // rdi: elements array's map + __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex); + __ j(not_equal, &slow); + __ leal(rdi, Operand(rcx, 1)); + __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi); + __ jmp(&fast_double_without_map_check); // Array case: Get the length and the elements array from the JS // array. Check that the array is in fast mode (and writable); if it @@ -678,9 +684,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // rdx: receiver (a JSArray) // rcx: index __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); - __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), - Heap::kFixedArrayMapRootIndex); - __ j(not_equal, &slow); // Check the key against the length in the array, compute the // address to store into and fall through to fast case. @@ -688,20 +691,45 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ j(below_equal, &extra); // Fast case: Do the store. - __ bind(&fast); + __ bind(&fast_object_with_map_check); // rax: value // rbx: receiver's elements array (a FixedArray) // rcx: index + // rdx: receiver (a JSArray) + __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset)); + __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex); + __ j(not_equal, &fast_double_with_map_check); + __ bind(&fast_object_without_map_check); + // Smi stores don't require further checks. Label non_smi_value; + __ JumpIfNotSmi(rax, &non_smi_value); + // It's irrelevant whether array is smi-only or not when writing a smi. __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize), rax); - __ JumpIfNotSmi(rax, &non_smi_value, Label::kNear); __ ret(0); + __ bind(&non_smi_value); - // Slow case that needs to retain rcx for use by RecordWrite. - // Update write barrier for the elements array address. + // Writing a non-smi, check whether array allows non-smi elements. + // r9: receiver's map + __ CheckFastObjectElements(r9, &slow, Label::kNear); + __ lea(rcx, + FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize)); + __ movq(Operand(rcx, 0), rax); __ movq(rdx, rax); - __ RecordWriteNonSmi(rbx, 0, rdx, rcx); + __ RecordWrite( + rbx, rcx, rdx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ ret(0); + + __ bind(&fast_double_with_map_check); + // Check for fast double array case. If this fails, call through to the + // runtime. + // rdi: elements array's map + __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex); + __ j(not_equal, &slow); + __ bind(&fast_double_without_map_check); + // If the value is a number, store it as a double in the FastDoubleElements + // array. + __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0, &slow); __ ret(0); } @@ -846,21 +874,22 @@ static void GenerateCallMiss(MacroAssembler* masm, __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // Enter an internal frame. - __ EnterInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); - // Push the receiver and the name of the function. - __ push(rdx); - __ push(rcx); + // Push the receiver and the name of the function. + __ push(rdx); + __ push(rcx); - // Call the entry. - CEntryStub stub(1); - __ Set(rax, 2); - __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate())); - __ CallStub(&stub); + // Call the entry. + CEntryStub stub(1); + __ Set(rax, 2); + __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate())); + __ CallStub(&stub); - // Move result to rdi and exit the internal frame. - __ movq(rdi, rax); - __ LeaveInternalFrame(); + // Move result to rdi and exit the internal frame. + __ movq(rdi, rax); + } // Check if the receiver is a global object of some sort. // This can happen only for regular CallIC but not KeyedCallIC. @@ -1002,13 +1031,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // This branch is taken when calling KeyedCallIC_Miss is neither required // nor beneficial. __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1); - __ EnterInternalFrame(); - __ push(rcx); // save the key - __ push(rdx); // pass the receiver - __ push(rcx); // pass the key - __ CallRuntime(Runtime::kKeyedGetProperty, 2); - __ pop(rcx); // restore the key - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(rcx); // save the key + __ push(rdx); // pass the receiver + __ push(rcx); // pass the key + __ CallRuntime(Runtime::kKeyedGetProperty, 2); + __ pop(rcx); // restore the key + } __ movq(rdi, rax); __ jmp(&do_call); @@ -1212,7 +1242,12 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ movq(mapped_location, rax); __ lea(r9, mapped_location); __ movq(r8, rax); - __ RecordWrite(rbx, r9, r8); + __ RecordWrite(rbx, + r9, + r8, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + INLINE_SMI_CHECK); __ Ret(); __ bind(¬in); // The unmapped lookup expects that the parameter map is in rbx. @@ -1221,7 +1256,12 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ movq(unmapped_location, rax); __ lea(r9, unmapped_location); __ movq(r8, rax); - __ RecordWrite(rbx, r9, r8); + __ RecordWrite(rbx, + r9, + r8, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + INLINE_SMI_CHECK); __ Ret(); __ bind(&slow); GenerateMiss(masm, false); diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index 9064a266e..45aaad754 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -81,6 +81,12 @@ bool LCodeGen::GenerateCode() { HPhase phase("Code generation", chunk()); ASSERT(is_unused()); status_ = GENERATING; + + // Open a frame scope to indicate that there is a frame on the stack. The + // MANUAL indicates that the scope shouldn't actually generate code to set up + // the frame (that is done in GeneratePrologue). + FrameScope frame_scope(masm_, StackFrame::MANUAL); + return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && @@ -217,11 +223,8 @@ bool LCodeGen::GeneratePrologue() { // Store it in the context. int context_offset = Context::SlotOffset(var->index()); __ movq(Operand(rsi, context_offset), rax); - // Update the write barrier. This clobbers all involved - // registers, so we have use a third register to avoid - // clobbering rsi. - __ movq(rcx, rsi); - __ RecordWrite(rcx, context_offset, rax, rbx); + // Update the write barrier. This clobbers rax and rbx. + __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs); } } Comment(";;; End allocate local context"); @@ -280,6 +283,9 @@ bool LCodeGen::GenerateDeferredCode() { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; __ bind(code->entry()); + Comment(";;; Deferred code @%d: %s.", + code->instruction_index(), + code->instr()->Mnemonic()); code->Generate(); __ jmp(code->exit()); } @@ -667,7 +673,7 @@ void LCodeGen::RecordSafepoint( int deoptimization_index) { ASSERT(kind == expected_safepoint_kind_); - const ZoneList<LOperand*>* operands = pointers->operands(); + const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); Safepoint safepoint = safepoints_.DefineSafepoint(masm(), kind, arguments, deoptimization_index); @@ -1577,30 +1583,33 @@ void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { } -void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) { +void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) { Register reg = ToRegister(instr->InputAt(0)); - int false_block = chunk_->LookupDestination(instr->false_block_id()); + // If the expression is known to be untagged or a smi, then it's definitely + // not null, and it can't be a an undetectable object. if (instr->hydrogen()->representation().IsSpecialization() || instr->hydrogen()->type().IsSmi()) { - // If the expression is known to untagged or smi, then it's definitely - // not null, and it can't be a an undetectable object. - // Jump directly to the false block. EmitGoto(false_block); return; } int true_block = chunk_->LookupDestination(instr->true_block_id()); - - __ CompareRoot(reg, Heap::kNullValueRootIndex); - if (instr->is_strict()) { + Heap::RootListIndex nil_value = instr->nil() == kNullValue ? + Heap::kNullValueRootIndex : + Heap::kUndefinedValueRootIndex; + __ CompareRoot(reg, nil_value); + if (instr->kind() == kStrictEquality) { EmitBranch(true_block, false_block, equal); } else { + Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ? + Heap::kUndefinedValueRootIndex : + Heap::kNullValueRootIndex; Label* true_label = chunk_->GetAssemblyLabel(true_block); Label* false_label = chunk_->GetAssemblyLabel(false_block); __ j(equal, true_label); - __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); + __ CompareRoot(reg, other_nil_value); __ j(equal, true_label); __ JumpIfSmi(reg, false_label); // Check for undetectable objects by looking in the bit field in @@ -1752,30 +1761,40 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, Handle<String> class_name, Register input, - Register temp) { + Register temp, + Register scratch) { __ JumpIfSmi(input, is_false); - __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); - __ j(below, is_false); - // Map is now in temp. - // Functions have class 'Function'. - __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE); if (class_name->IsEqualTo(CStrVector("Function"))) { - __ j(above_equal, is_true); + // Assuming the following assertions, we can use the same compares to test + // for both being a function type and being in the object type range. + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); + STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == + FIRST_SPEC_OBJECT_TYPE + 1); + STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == + LAST_SPEC_OBJECT_TYPE - 1); + STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); + __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); + __ j(below, is_false); + __ j(equal, is_true); + __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE); + __ j(equal, is_true); } else { - __ j(above_equal, is_false); + // Faster code path to avoid two compares: subtract lower bound from the + // actual type and do a signed compare with the width of the type range. + __ movq(temp, FieldOperand(input, HeapObject::kMapOffset)); + __ movq(scratch, FieldOperand(temp, Map::kInstanceTypeOffset)); + __ subb(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ cmpb(scratch, + Immediate(static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - + FIRST_NONCALLABLE_SPEC_OBJECT_TYPE))); + __ j(above, is_false); } + // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. // Check if the constructor in the map is a function. __ movq(temp, FieldOperand(temp, Map::kConstructorOffset)); - // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last type and - // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after - // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter. - STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); - STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE == - LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1); - // Objects with a non-function constructor have class 'Object'. __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister); if (class_name->IsEqualTo(CStrVector("Object"))) { @@ -1804,6 +1823,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true, void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { Register input = ToRegister(instr->InputAt(0)); Register temp = ToRegister(instr->TempAt(0)); + Register temp2 = ToRegister(instr->TempAt(1)); Handle<String> class_name = instr->hydrogen()->class_name(); int true_block = chunk_->LookupDestination(instr->true_block_id()); @@ -1812,7 +1832,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { Label* true_label = chunk_->GetAssemblyLabel(true_block); Label* false_label = chunk_->GetAssemblyLabel(false_block); - EmitClassOfTest(true_label, false_label, class_name, input, temp); + EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2); EmitBranch(true_block, false_block, equal); } @@ -1853,9 +1873,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { virtual void Generate() { codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_); } - + virtual LInstruction* instr() { return instr_; } Label* map_check() { return &map_check_; } - private: LInstanceOfKnownGlobal* instr_; Label map_check_; @@ -1996,7 +2015,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL); __ movq(result, Operand(result, 0)); } - if (instr->hydrogen()->check_hole_value()) { + if (instr->hydrogen()->RequiresHoleCheck()) { __ CompareRoot(result, Heap::kTheHoleValueRootIndex); DeoptimizeIf(equal, instr->environment()); } @@ -2016,25 +2035,39 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { + Register object = ToRegister(instr->TempAt(0)); + Register address = ToRegister(instr->TempAt(1)); Register value = ToRegister(instr->InputAt(0)); - Register temp = ToRegister(instr->TempAt(0)); - ASSERT(!value.is(temp)); - bool check_hole = instr->hydrogen()->check_hole_value(); - if (!check_hole && value.is(rax)) { - __ store_rax(instr->hydrogen()->cell().location(), - RelocInfo::GLOBAL_PROPERTY_CELL); - return; - } + ASSERT(!value.is(object)); + Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell()); + + __ movq(address, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL); + // If the cell we are storing to contains the hole it could have // been deleted from the property dictionary. In that case, we need // to update the property details in the property dictionary to mark // it as no longer deleted. We deoptimize in that case. - __ movq(temp, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL); - if (check_hole) { - __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ CompareRoot(Operand(address, 0), Heap::kTheHoleValueRootIndex); DeoptimizeIf(equal, instr->environment()); } - __ movq(Operand(temp, 0), value); + + // Store the value. + __ movq(Operand(address, 0), value); + + Label smi_store; + __ JumpIfSmi(value, &smi_store, Label::kNear); + + int offset = JSGlobalPropertyCell::kValueOffset - kHeapObjectTag; + __ lea(object, Operand(address, -offset)); + // Cells are always in the remembered set. + __ RecordWrite(object, + address, + value, + kSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + __ bind(&smi_store); } @@ -2064,7 +2097,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { if (instr->needs_write_barrier()) { int offset = Context::SlotOffset(instr->slot_index()); Register scratch = ToRegister(instr->TempAt(0)); - __ RecordWrite(context, offset, value, scratch); + __ RecordWriteContextSlot(context, offset, value, scratch, kSaveFPRegs); } } @@ -2283,17 +2316,15 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( LLoadKeyedFastDoubleElement* instr) { XMMRegister result(ToDoubleRegister(instr->result())); - if (instr->hydrogen()->RequiresHoleCheck()) { - int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + - sizeof(kHoleNanLower32); - Operand hole_check_operand = BuildFastArrayOperand( - instr->elements(), - instr->key(), - FAST_DOUBLE_ELEMENTS, - offset); - __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); - DeoptimizeIf(equal, instr->environment()); - } + int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + + sizeof(kHoleNanLower32); + Operand hole_check_operand = BuildFastArrayOperand( + instr->elements(), + instr->key(), + FAST_DOUBLE_ELEMENTS, + offset); + __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); + DeoptimizeIf(equal, instr->environment()); Operand double_load_operand = BuildFastArrayOperand( instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, @@ -2365,6 +2396,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -2681,6 +2713,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { virtual void Generate() { codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); } + virtual LInstruction* instr() { return instr_; } private: LUnaryMathOperation* instr_; }; @@ -2977,7 +3010,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { ASSERT(ToRegister(instr->result()).is(rax)); int arity = instr->arity(); - CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT); + CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); __ Drop(1); @@ -3033,7 +3066,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (instr->needs_write_barrier()) { Register temp = ToRegister(instr->TempAt(0)); // Update the write barrier for the object for in-object properties. - __ RecordWrite(object, offset, value, temp); + __ RecordWriteField(object, offset, value, temp, kSaveFPRegs); } } else { Register temp = ToRegister(instr->TempAt(0)); @@ -3042,7 +3075,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (instr->needs_write_barrier()) { // Update the write barrier for the properties array. // object is used as a scratch register. - __ RecordWrite(temp, offset, value, object); + __ RecordWriteField(temp, offset, value, object, kSaveFPRegs); } } } @@ -3090,6 +3123,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3125,6 +3159,13 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register elements = ToRegister(instr->object()); Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; + // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS + // conversion, so it deopts in that case. + if (instr->hydrogen()->ValueNeedsSmiCheck()) { + Condition cc = masm()->CheckSmi(value); + DeoptimizeIf(NegateCondition(cc), instr->environment()); + } + // Do the store. if (instr->key()->IsConstantOperand()) { ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); @@ -3146,7 +3187,7 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { key, times_pointer_size, FixedArray::kHeaderSize)); - __ RecordWrite(elements, key, value); + __ RecordWrite(elements, key, value, kSaveFPRegs); } } @@ -3196,6 +3237,7 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStringCharCodeAt* instr_; }; @@ -3316,6 +3358,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStringCharFromCode* instr_; }; @@ -3392,6 +3435,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } + virtual LInstruction* instr() { return instr_; } private: LNumberTagD* instr_; }; @@ -3487,16 +3531,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, } -class DeferredTaggedToI: public LDeferredCode { - public: - DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } - private: - LTaggedToI* instr_; -}; - - void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { Label done, heap_number; Register input_reg = ToRegister(instr->InputAt(0)); @@ -3545,6 +3579,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { void LCodeGen::DoTaggedToI(LTaggedToI* instr) { + class DeferredTaggedToI: public LDeferredCode { + public: + DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); } + virtual LInstruction* instr() { return instr_; } + private: + LTaggedToI* instr_; + }; + LOperand* input = instr->InputAt(0); ASSERT(input->IsRegister()); ASSERT(input->Equals(instr->result())); @@ -3981,9 +4025,12 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, final_branch_condition = not_zero; } else if (type_name->Equals(heap()->function_symbol())) { + STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label); - __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input); - final_branch_condition = above_equal; + __ CmpObjectType(input, JS_FUNCTION_TYPE, input); + __ j(equal, true_label); + __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE); + final_branch_condition = equal; } else if (type_name->Equals(heap()->object_symbol())) { __ JumpIfSmi(input, false_label); @@ -4109,6 +4156,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } + virtual LInstruction* instr() { return instr_; } private: LStackCheck* instr_; }; diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h index 8cb4cece9..106d7bb2e 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.h +++ b/deps/v8/src/x64/lithium-codegen-x64.h @@ -140,7 +140,8 @@ class LCodeGen BASE_EMBEDDED { Label* if_false, Handle<String> class_name, Register input, - Register temporary); + Register temporary, + Register scratch); int GetStackSlotCount() const { return chunk()->spill_slot_count(); } int GetParameterCount() const { return scope()->num_parameters(); } @@ -345,16 +346,20 @@ class LCodeGen BASE_EMBEDDED { class LDeferredCode: public ZoneObject { public: explicit LDeferredCode(LCodeGen* codegen) - : codegen_(codegen), external_exit_(NULL) { + : codegen_(codegen), + external_exit_(NULL), + instruction_index_(codegen->current_instruction_) { codegen->AddDeferredCode(this); } virtual ~LDeferredCode() { } virtual void Generate() = 0; + virtual LInstruction* instr() = 0; void SetExit(Label *exit) { external_exit_ = exit; } Label* entry() { return &entry_; } Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; } + int instruction_index() const { return instruction_index_; } protected: LCodeGen* codegen() const { return codegen_; } @@ -365,6 +370,7 @@ class LDeferredCode: public ZoneObject { Label entry_; Label exit_; Label* external_exit_; + int instruction_index_; }; } } // namespace v8::internal diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc index 5fc56462b..a67a59320 100644 --- a/deps/v8/src/x64/lithium-x64.cc +++ b/deps/v8/src/x64/lithium-x64.cc @@ -214,10 +214,11 @@ void LCmpIDAndBranch::PrintDataTo(StringStream* stream) { } -void LIsNullAndBranch::PrintDataTo(StringStream* stream) { +void LIsNilAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if "); InputAt(0)->PrintTo(stream); - stream->Add(is_strict() ? " === null" : " == null"); + stream->Add(kind() == kStrictEquality ? " === " : " == "); + stream->Add(nil() == kNullValue ? "null" : "undefined"); stream->Add(" then B%d else B%d", true_block_id(), false_block_id()); } @@ -706,7 +707,9 @@ LInstruction* LChunkBuilder::DefineFixedDouble( LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { HEnvironment* hydrogen_env = current_block_->last_environment(); - instr->set_environment(CreateEnvironment(hydrogen_env)); + int argument_index_accumulator = 0; + instr->set_environment(CreateEnvironment(hydrogen_env, + &argument_index_accumulator)); return instr; } @@ -989,10 +992,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { } -LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { +LEnvironment* LChunkBuilder::CreateEnvironment( + HEnvironment* hydrogen_env, + int* argument_index_accumulator) { if (hydrogen_env == NULL) return NULL; - LEnvironment* outer = CreateEnvironment(hydrogen_env->outer()); + LEnvironment* outer = + CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator); int ast_id = hydrogen_env->ast_id(); ASSERT(ast_id != AstNode::kNoNumber); int value_count = hydrogen_env->length(); @@ -1002,7 +1008,6 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { argument_count_, value_count, outer); - int argument_index = 0; for (int i = 0; i < value_count; ++i) { if (hydrogen_env->is_special_index(i)) continue; @@ -1011,7 +1016,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) { if (value->IsArgumentsObject()) { op = NULL; } else if (value->IsPushArgument()) { - op = new LArgument(argument_index++); + op = new LArgument((*argument_index_accumulator)++); } else { op = UseAny(value); } @@ -1436,10 +1441,10 @@ LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch( } -LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) { +LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) { ASSERT(instr->value()->representation().IsTagged()); - LOperand* temp = instr->is_strict() ? NULL : TempRegister(); - return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp); + LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister(); + return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp); } @@ -1489,6 +1494,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch( LInstruction* LChunkBuilder::DoClassOfTestAndBranch( HClassOfTestAndBranch* instr) { return new LClassOfTestAndBranch(UseTempRegister(instr->value()), + TempRegister(), TempRegister()); } @@ -1716,7 +1722,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { LLoadGlobalCell* result = new LLoadGlobalCell; - return instr->check_hole_value() + return instr->RequiresHoleCheck() ? AssignEnvironment(DefineAsRegister(result)) : DefineAsRegister(result); } @@ -1731,8 +1737,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { LStoreGlobalCell* result = - new LStoreGlobalCell(UseRegister(instr->value()), TempRegister()); - return instr->check_hole_value() ? AssignEnvironment(result) : result; + new LStoreGlobalCell(UseTempRegister(instr->value()), + TempRegister(), + TempRegister()); + return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; } diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h index d169bf6df..d43a86a9a 100644 --- a/deps/v8/src/x64/lithium-x64.h +++ b/deps/v8/src/x64/lithium-x64.h @@ -107,7 +107,7 @@ class LCodeGen; V(Integer32ToDouble) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ - V(IsNullAndBranch) \ + V(IsNilAndBranch) \ V(IsObjectAndBranch) \ V(IsSmiAndBranch) \ V(IsUndetectableAndBranch) \ @@ -609,17 +609,18 @@ class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> { }; -class LIsNullAndBranch: public LControlInstruction<1, 1> { +class LIsNilAndBranch: public LControlInstruction<1, 1> { public: - LIsNullAndBranch(LOperand* value, LOperand* temp) { + LIsNilAndBranch(LOperand* value, LOperand* temp) { inputs_[0] = value; temps_[0] = temp; } - DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch) + DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch) - bool is_strict() const { return hydrogen()->is_strict(); } + EqualityKind kind() const { return hydrogen()->kind(); } + NilValue nil() const { return hydrogen()->nil(); } virtual void PrintDataTo(StringStream* stream); }; @@ -705,11 +706,12 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> { }; -class LClassOfTestAndBranch: public LControlInstruction<1, 1> { +class LClassOfTestAndBranch: public LControlInstruction<1, 2> { public: - LClassOfTestAndBranch(LOperand* value, LOperand* temp) { + LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) { inputs_[0] = value; temps_[0] = temp; + temps_[1] = temp2; } DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, @@ -1197,11 +1199,12 @@ class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> { }; -class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> { +class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> { public: - explicit LStoreGlobalCell(LOperand* value, LOperand* temp) { + explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) { inputs_[0] = value; - temps_[0] = temp; + temps_[0] = temp1; + temps_[1] = temp2; } DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell") @@ -2146,7 +2149,8 @@ class LChunkBuilder BASE_EMBEDDED { LInstruction* instr, int ast_id); void ClearInstructionPendingDeoptimizationEnvironment(); - LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env); + LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env, + int* argument_index_accumulator); void VisitInstruction(HInstruction* current); diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index 9cfc9b658..7fe6d5821 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -44,6 +44,7 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) : Assembler(arg_isolate, buffer, size), generating_stub_(false), allow_stub_calls_(true), + has_frame_(false), root_array_available_(true) { if (isolate() != NULL) { code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), @@ -196,28 +197,47 @@ void MacroAssembler::CompareRoot(const Operand& with, } -void MacroAssembler::RecordWriteHelper(Register object, - Register addr, - Register scratch) { - if (emit_debug_code()) { - // Check that the object is not in new space. - Label not_in_new_space; - InNewSpace(object, scratch, not_equal, ¬_in_new_space, Label::kNear); - Abort("new-space object passed to RecordWriteHelper"); - bind(¬_in_new_space); +void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. + Register addr, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetFinalAction and_then) { + if (FLAG_debug_code) { + Label ok; + JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear); + int3(); + bind(&ok); + } + // Load store buffer top. + LoadRoot(scratch, Heap::kStoreBufferTopRootIndex); + // Store pointer to buffer. + movq(Operand(scratch, 0), addr); + // Increment buffer top. + addq(scratch, Immediate(kPointerSize)); + // Write back new top of buffer. + StoreRoot(scratch, Heap::kStoreBufferTopRootIndex); + // Call stub on end of buffer. + Label done; + // Check for end of buffer. + testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); + if (and_then == kReturnAtEnd) { + Label buffer_overflowed; + j(not_equal, &buffer_overflowed, Label::kNear); + ret(0); + bind(&buffer_overflowed); + } else { + ASSERT(and_then == kFallThroughAtEnd); + j(equal, &done, Label::kNear); + } + StoreBufferOverflowStub store_buffer_overflow = + StoreBufferOverflowStub(save_fp); + CallStub(&store_buffer_overflow); + if (and_then == kReturnAtEnd) { + ret(0); + } else { + ASSERT(and_then == kFallThroughAtEnd); + bind(&done); } - - // Compute the page start address from the heap object pointer, and reuse - // the 'object' register for it. - and_(object, Immediate(~Page::kPageAlignmentMask)); - - // Compute number of region covering addr. See Page::GetRegionNumberForAddress - // method for more details. - shrl(addr, Immediate(Page::kRegionSizeLog2)); - andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2)); - - // Set dirty mark for region. - bts(Operand(object, Page::kDirtyFlagOffset), addr); } @@ -225,7 +245,7 @@ void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc, Label* branch, - Label::Distance near_jump) { + Label::Distance distance) { if (Serializer::enabled()) { // Can't do arithmetic on external references if it might get serialized. // The mask isn't really an address. We load it as an external reference in @@ -240,7 +260,7 @@ void MacroAssembler::InNewSpace(Register object, } movq(kScratchRegister, ExternalReference::new_space_start(isolate())); cmpq(scratch, kScratchRegister); - j(cc, branch, near_jump); + j(cc, branch, distance); } else { ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask()))); intptr_t new_space_start = @@ -252,127 +272,128 @@ void MacroAssembler::InNewSpace(Register object, lea(scratch, Operand(object, kScratchRegister, times_1, 0)); } and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask()))); - j(cc, branch, near_jump); + j(cc, branch, distance); } } -void MacroAssembler::RecordWrite(Register object, - int offset, - Register value, - Register index) { +void MacroAssembler::RecordWriteField( + Register object, + int offset, + Register value, + Register dst, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { // The compiled code assumes that record write doesn't change the // context register, so we check that none of the clobbered // registers are rsi. - ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi)); + ASSERT(!value.is(rsi) && !dst.is(rsi)); // First, check if a write barrier is even needed. The tests below - // catch stores of smis and stores into the young generation. + // catch stores of Smis. Label done; - JumpIfSmi(value, &done); - RecordWriteNonSmi(object, offset, value, index); + // Skip barrier if writing a smi. + if (smi_check == INLINE_SMI_CHECK) { + JumpIfSmi(value, &done); + } + + // Although the object register is tagged, the offset is relative to the start + // of the object, so so offset must be a multiple of kPointerSize. + ASSERT(IsAligned(offset, kPointerSize)); + + lea(dst, FieldOperand(object, offset)); + if (emit_debug_code()) { + Label ok; + testb(dst, Immediate((1 << kPointerSizeLog2) - 1)); + j(zero, &ok, Label::kNear); + int3(); + bind(&ok); + } + + RecordWrite( + object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK); + bind(&done); - // Clobber all input registers when running with the debug-code flag - // turned on to provoke errors. This clobbering repeats the - // clobbering done inside RecordWriteNonSmi but it's necessary to - // avoid having the fast case for smis leave the registers - // unchanged. + // Clobber clobbered input registers when running with the debug-code flag + // turned on to provoke errors. if (emit_debug_code()) { - movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); - movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE); } } void MacroAssembler::RecordWrite(Register object, Register address, - Register value) { + Register value, + SaveFPRegsMode fp_mode, + RememberedSetAction remembered_set_action, + SmiCheck smi_check) { // The compiled code assumes that record write doesn't change the // context register, so we check that none of the clobbered // registers are rsi. - ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi)); - - // First, check if a write barrier is even needed. The tests below - // catch stores of smis and stores into the young generation. - Label done; - JumpIfSmi(value, &done); + ASSERT(!value.is(rsi) && !address.is(rsi)); - InNewSpace(object, value, equal, &done); - - RecordWriteHelper(object, address, value); - - bind(&done); - - // Clobber all input registers when running with the debug-code flag - // turned on to provoke errors. + ASSERT(!object.is(value)); + ASSERT(!object.is(address)); + ASSERT(!value.is(address)); if (emit_debug_code()) { - movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); - movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE); - movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + AbortIfSmi(object); + } + + if (remembered_set_action == OMIT_REMEMBERED_SET && + !FLAG_incremental_marking) { + return; } -} + if (FLAG_debug_code) { + Label ok; + cmpq(value, Operand(address, 0)); + j(equal, &ok, Label::kNear); + int3(); + bind(&ok); + } -void MacroAssembler::RecordWriteNonSmi(Register object, - int offset, - Register scratch, - Register index) { + // First, check if a write barrier is even needed. The tests below + // catch stores of smis and stores into the young generation. Label done; - if (emit_debug_code()) { - Label okay; - JumpIfNotSmi(object, &okay, Label::kNear); - Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); - bind(&okay); - - if (offset == 0) { - // index must be int32. - Register tmp = index.is(rax) ? rbx : rax; - push(tmp); - movl(tmp, index); - cmpq(tmp, index); - Check(equal, "Index register for RecordWrite must be untagged int32."); - pop(tmp); - } + if (smi_check == INLINE_SMI_CHECK) { + // Skip barrier if writing a smi. + JumpIfSmi(value, &done); } - // Test that the object address is not in the new space. We cannot - // update page dirty marks for new space pages. - InNewSpace(object, scratch, equal, &done); + CheckPageFlag(value, + value, // Used as scratch. + MemoryChunk::kPointersToHereAreInterestingMask, + zero, + &done, + Label::kNear); - // The offset is relative to a tagged or untagged HeapObject pointer, - // so either offset or offset + kHeapObjectTag must be a - // multiple of kPointerSize. - ASSERT(IsAligned(offset, kPointerSize) || - IsAligned(offset + kHeapObjectTag, kPointerSize)); + CheckPageFlag(object, + value, // Used as scratch. + MemoryChunk::kPointersFromHereAreInterestingMask, + zero, + &done, + Label::kNear); - Register dst = index; - if (offset != 0) { - lea(dst, Operand(object, offset)); - } else { - // array access: calculate the destination address in the same manner as - // KeyedStoreIC::GenerateGeneric. - lea(dst, FieldOperand(object, - index, - times_pointer_size, - FixedArray::kHeaderSize)); - } - RecordWriteHelper(object, dst, scratch); + RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); + CallStub(&stub); bind(&done); - // Clobber all input registers when running with the debug-code flag + // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); - movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); - movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); } } + void MacroAssembler::Assert(Condition cc, const char* msg) { if (emit_debug_code()) Check(cc, msg); } @@ -400,7 +421,7 @@ void MacroAssembler::Check(Condition cc, const char* msg) { Label L; j(cc, &L, Label::kNear); Abort(msg); - // will not return here + // Control will not return here. bind(&L); } @@ -448,9 +469,6 @@ void MacroAssembler::Abort(const char* msg) { RecordComment(msg); } #endif - // Disable stub call restrictions to always allow calls to abort. - AllowStubCallsScope allow_scope(this, true); - push(rax); movq(kScratchRegister, p0, RelocInfo::NONE); push(kScratchRegister); @@ -458,20 +476,28 @@ void MacroAssembler::Abort(const char* msg) { reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))), RelocInfo::NONE); push(kScratchRegister); - CallRuntime(Runtime::kAbort, 2); - // will not return here + + if (!has_frame_) { + // We don't actually want to generate a pile of code for this, so just + // claim there is a stack frame, without generating one. + FrameScope scope(this, StackFrame::NONE); + CallRuntime(Runtime::kAbort, 2); + } else { + CallRuntime(Runtime::kAbort, 2); + } + // Control will not return here. int3(); } void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) { - ASSERT(allow_stub_calls()); // calls are not allowed in some stubs + ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); } MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. + ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs. MaybeObject* result = stub->TryGetCode(); if (!result->IsFailure()) { call(Handle<Code>(Code::cast(result->ToObjectUnchecked())), @@ -482,13 +508,12 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) { void MacroAssembler::TailCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. + ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); Jump(stub->GetCode(), RelocInfo::CODE_TARGET); } MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls()); // Calls are not allowed in some stubs. MaybeObject* result = stub->TryGetCode(); if (!result->IsFailure()) { jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())), @@ -504,6 +529,12 @@ void MacroAssembler::StubReturn(int argc) { } +bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { + if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; + return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); +} + + void MacroAssembler::IllegalOperation(int num_arguments) { if (num_arguments > 0) { addq(rsp, Immediate(num_arguments * kPointerSize)); @@ -540,8 +571,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); Set(rax, function->nargs); LoadAddress(rbx, ExternalReference(function, isolate())); - CEntryStub ces(1); - ces.SaveDoubles(); + CEntryStub ces(1, kSaveFPRegs); CallStub(&ces); } @@ -795,8 +825,8 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference( void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, const CallWrapper& call_wrapper) { - // Calls are not allowed in some stubs. - ASSERT(flag == JUMP_FUNCTION || allow_stub_calls()); + // You can't call a builtin without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); // Rely on the assertion to check that the number of provided // arguments match the expected number of arguments. Fake a @@ -825,6 +855,57 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { } +static const Register saved_regs[] = + { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 }; +static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register); + + +void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, + Register exclusion1, + Register exclusion2, + Register exclusion3) { + // We don't allow a GC during a store buffer overflow so there is no need to + // store the registers in any particular way, but we do have to store and + // restore them. + for (int i = 0; i < kNumberOfSavedRegs; i++) { + Register reg = saved_regs[i]; + if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { + push(reg); + } + } + // R12 to r15 are callee save on all platforms. + if (fp_mode == kSaveFPRegs) { + CpuFeatures::Scope scope(SSE2); + subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); + for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + XMMRegister reg = XMMRegister::from_code(i); + movsd(Operand(rsp, i * kDoubleSize), reg); + } + } +} + + +void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, + Register exclusion1, + Register exclusion2, + Register exclusion3) { + if (fp_mode == kSaveFPRegs) { + CpuFeatures::Scope scope(SSE2); + for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + XMMRegister reg = XMMRegister::from_code(i); + movsd(reg, Operand(rsp, i * kDoubleSize)); + } + addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); + } + for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) { + Register reg = saved_regs[i]; + if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { + pop(reg); + } + } +} + + void MacroAssembler::Set(Register dst, int64_t x) { if (x == 0) { xorl(dst, dst); @@ -2567,13 +2648,91 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { void MacroAssembler::CheckFastElements(Register map, Label* fail, Label::Distance distance) { - STATIC_ASSERT(FAST_ELEMENTS == 0); + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); + cmpb(FieldOperand(map, Map::kBitField2Offset), + Immediate(Map::kMaximumBitField2FastElementValue)); + j(above, fail, distance); +} + + +void MacroAssembler::CheckFastObjectElements(Register map, + Label* fail, + Label::Distance distance) { + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + STATIC_ASSERT(FAST_ELEMENTS == 1); + cmpb(FieldOperand(map, Map::kBitField2Offset), + Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue)); + j(below_equal, fail, distance); cmpb(FieldOperand(map, Map::kBitField2Offset), Immediate(Map::kMaximumBitField2FastElementValue)); j(above, fail, distance); } +void MacroAssembler::CheckFastSmiOnlyElements(Register map, + Label* fail, + Label::Distance distance) { + STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); + cmpb(FieldOperand(map, Map::kBitField2Offset), + Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue)); + j(above, fail, distance); +} + + +void MacroAssembler::StoreNumberToDoubleElements( + Register maybe_number, + Register elements, + Register key, + XMMRegister xmm_scratch, + Label* fail) { + Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done; + + JumpIfSmi(maybe_number, &smi_value, Label::kNear); + + CheckMap(maybe_number, + isolate()->factory()->heap_number_map(), + fail, + DONT_DO_SMI_CHECK); + + // Double value, canonicalize NaN. + uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); + cmpl(FieldOperand(maybe_number, offset), + Immediate(kNaNOrInfinityLowerBoundUpper32)); + j(greater_equal, &maybe_nan, Label::kNear); + + bind(¬_nan); + movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset)); + bind(&have_double_value); + movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize), + xmm_scratch); + jmp(&done); + + bind(&maybe_nan); + // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise + // it's an Infinity, and the non-NaN code path applies. + j(greater, &is_nan, Label::kNear); + cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0)); + j(zero, ¬_nan); + bind(&is_nan); + // Convert all NaNs to the same canonical NaN value when they are stored in + // the double array. + Set(kScratchRegister, BitCast<uint64_t>( + FixedDoubleArray::canonical_not_the_hole_nan_as_double())); + movq(xmm_scratch, kScratchRegister); + jmp(&have_double_value, Label::kNear); + + bind(&smi_value); + // Value is a smi. convert to a double and store. + // Preserve original value. + SmiToInteger32(kScratchRegister, maybe_number); + cvtlsi2sd(xmm_scratch, kScratchRegister); + movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize), + xmm_scratch); + bind(&done); +} + + void MacroAssembler::CheckMap(Register obj, Handle<Map> map, Label* fail, @@ -2787,10 +2946,10 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) { #ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { - ASSERT(allow_stub_calls()); Set(rax, 0); // No arguments. LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate())); CEntryStub ces(1); + ASSERT(AllowThisStubCall(&ces)); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); } #endif // ENABLE_DEBUGGER_SUPPORT @@ -2816,6 +2975,9 @@ void MacroAssembler::InvokeCode(Register code, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + Label done; InvokePrologue(expected, actual, @@ -2847,6 +3009,9 @@ void MacroAssembler::InvokeCode(Handle<Code> code, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + Label done; Register dummy = rax; InvokePrologue(expected, @@ -2877,6 +3042,9 @@ void MacroAssembler::InvokeFunction(Register function, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + ASSERT(function.is(rdi)); movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); movq(rsi, FieldOperand(function, JSFunction::kContextOffset)); @@ -2896,6 +3064,9 @@ void MacroAssembler::InvokeFunction(JSFunction* function, InvokeFlag flag, const CallWrapper& call_wrapper, CallKind call_kind) { + // You can't call a function without a valid frame. + ASSERT(flag == JUMP_FUNCTION || has_frame()); + ASSERT(function->is_compiled()); // Get the function and setup the context. Move(rdi, Handle<JSFunction>(function)); @@ -3759,6 +3930,20 @@ void MacroAssembler::CopyBytes(Register destination, } +void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler) { + Label loop, entry; + jmp(&entry); + bind(&loop); + movq(Operand(start_offset, 0), filler); + addq(start_offset, Immediate(kPointerSize)); + bind(&entry); + cmpq(start_offset, end_offset); + j(less, &loop); +} + + void MacroAssembler::LoadContext(Register dst, int context_chain_length) { if (context_chain_length > 0) { // Move up the chain of contexts to the context containing the slot. @@ -3858,6 +4043,7 @@ void MacroAssembler::CallCFunction(ExternalReference function, void MacroAssembler::CallCFunction(Register function, int num_arguments) { + ASSERT(has_frame()); // Check stack alignment. if (emit_debug_code()) { CheckStackAlignment(); @@ -3872,6 +4058,17 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) { } +bool AreAliased(Register r1, Register r2, Register r3, Register r4) { + if (r1.is(r2)) return true; + if (r1.is(r3)) return true; + if (r1.is(r4)) return true; + if (r2.is(r3)) return true; + if (r2.is(r4)) return true; + if (r3.is(r4)) return true; + return false; +} + + CodePatcher::CodePatcher(byte* address, int size) : address_(address), size_(size), @@ -3892,6 +4089,195 @@ CodePatcher::~CodePatcher() { ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); } + +void MacroAssembler::CheckPageFlag( + Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance) { + ASSERT(cc == zero || cc == not_zero); + if (scratch.is(object)) { + and_(scratch, Immediate(~Page::kPageAlignmentMask)); + } else { + movq(scratch, Immediate(~Page::kPageAlignmentMask)); + and_(scratch, object); + } + if (mask < (1 << kBitsPerByte)) { + testb(Operand(scratch, MemoryChunk::kFlagsOffset), + Immediate(static_cast<uint8_t>(mask))); + } else { + testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask)); + } + j(cc, condition_met, condition_met_distance); +} + + +void MacroAssembler::JumpIfBlack(Register object, + Register bitmap_scratch, + Register mask_scratch, + Label* on_black, + Label::Distance on_black_distance) { + ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx)); + GetMarkBits(object, bitmap_scratch, mask_scratch); + + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + // The mask_scratch register contains a 1 at the position of the first bit + // and a 0 at all other positions, including the position of the second bit. + movq(rcx, mask_scratch); + // Make rcx into a mask that covers both marking bits using the operation + // rcx = mask | (mask << 1). + lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0)); + // Note that we are using a 4-byte aligned 8-byte load. + and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); + cmpq(mask_scratch, rcx); + j(equal, on_black, on_black_distance); +} + + +// Detect some, but not all, common pointer-free objects. This is used by the +// incremental write barrier which doesn't care about oddballs (they are always +// marked black immediately so this code is not hit). +void MacroAssembler::JumpIfDataObject( + Register value, + Register scratch, + Label* not_data_object, + Label::Distance not_data_object_distance) { + Label is_data_object; + movq(scratch, FieldOperand(value, HeapObject::kMapOffset)); + CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); + j(equal, &is_data_object, Label::kNear); + ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + // If it's a string and it's not a cons string then it's an object containing + // no GC pointers. + testb(FieldOperand(scratch, Map::kInstanceTypeOffset), + Immediate(kIsIndirectStringMask | kIsNotStringMask)); + j(not_zero, not_data_object, not_data_object_distance); + bind(&is_data_object); +} + + +void MacroAssembler::GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg) { + ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx)); + movq(bitmap_reg, addr_reg); + // Sign extended 32 bit immediate. + and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); + movq(rcx, addr_reg); + int shift = + Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; + shrl(rcx, Immediate(shift)); + and_(rcx, + Immediate((Page::kPageAlignmentMask >> shift) & + ~(Bitmap::kBytesPerCell - 1))); + + addq(bitmap_reg, rcx); + movq(rcx, addr_reg); + shrl(rcx, Immediate(kPointerSizeLog2)); + and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); + movl(mask_reg, Immediate(1)); + shl_cl(mask_reg); +} + + +void MacroAssembler::EnsureNotWhite( + Register value, + Register bitmap_scratch, + Register mask_scratch, + Label* value_is_white_and_not_data, + Label::Distance distance) { + ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx)); + GetMarkBits(value, bitmap_scratch, mask_scratch); + + // If the value is black or grey we don't need to do anything. + ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); + ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); + ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); + ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); + + Label done; + + // Since both black and grey have a 1 in the first position and white does + // not have a 1 there we only need to check one bit. + testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); + j(not_zero, &done, Label::kNear); + + if (FLAG_debug_code) { + // Check for impossible bit pattern. + Label ok; + push(mask_scratch); + // shl. May overflow making the check conservative. + addq(mask_scratch, mask_scratch); + testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); + j(zero, &ok, Label::kNear); + int3(); + bind(&ok); + pop(mask_scratch); + } + + // Value is white. We check whether it is data that doesn't need scanning. + // Currently only checks for HeapNumber and non-cons strings. + Register map = rcx; // Holds map while checking type. + Register length = rcx; // Holds length of object after checking type. + Label not_heap_number; + Label is_data_object; + + // Check for heap-number + movq(map, FieldOperand(value, HeapObject::kMapOffset)); + CompareRoot(map, Heap::kHeapNumberMapRootIndex); + j(not_equal, ¬_heap_number, Label::kNear); + movq(length, Immediate(HeapNumber::kSize)); + jmp(&is_data_object, Label::kNear); + + bind(¬_heap_number); + // Check for strings. + ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); + ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); + // If it's a string and it's not a cons string then it's an object containing + // no GC pointers. + Register instance_type = rcx; + movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); + testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask)); + j(not_zero, value_is_white_and_not_data); + // It's a non-indirect (non-cons and non-slice) string. + // If it's external, the length is just ExternalString::kSize. + // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). + Label not_external; + // External strings are the only ones with the kExternalStringTag bit + // set. + ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); + ASSERT_EQ(0, kConsStringTag & kExternalStringTag); + testb(instance_type, Immediate(kExternalStringTag)); + j(zero, ¬_external, Label::kNear); + movq(length, Immediate(ExternalString::kSize)); + jmp(&is_data_object, Label::kNear); + + bind(¬_external); + // Sequential string, either ASCII or UC16. + ASSERT(kAsciiStringTag == 0x04); + and_(length, Immediate(kStringEncodingMask)); + xor_(length, Immediate(kStringEncodingMask)); + addq(length, Immediate(0x04)); + // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2. + imul(length, FieldOperand(value, String::kLengthOffset)); + shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); + addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); + and_(length, Immediate(~kObjectAlignmentMask)); + + bind(&is_data_object); + // Value is a data object, and it is white. Mark it black. Since we know + // that the object is white we can make it black by flipping one bit. + or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); + + and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask)); + addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length); + + bind(&done); +} + } } // namespace v8::internal #endif // V8_TARGET_ARCH_X64 diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index e7eb104c0..7e0ba0054 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -29,6 +29,7 @@ #define V8_X64_MACRO_ASSEMBLER_X64_H_ #include "assembler.h" +#include "frames.h" #include "v8globals.h" namespace v8 { @@ -61,6 +62,11 @@ static const int kRootRegisterBias = 128; // Convenience for platform-independent signatures. typedef Operand MemOperand; +enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; +enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; + +bool AreAliased(Register r1, Register r2, Register r3, Register r4); + // Forward declaration. class JumpTarget; @@ -72,6 +78,7 @@ struct SmiIndex { ScaleFactor scale; }; + // MacroAssembler implements a collection of frequently used macros. class MacroAssembler: public Assembler { public: @@ -134,56 +141,145 @@ class MacroAssembler: public Assembler { void CompareRoot(const Operand& with, Heap::RootListIndex index); void PushRoot(Heap::RootListIndex index); - // --------------------------------------------------------------------------- - // GC Support - - // For page containing |object| mark region covering |addr| dirty. - // RecordWriteHelper only works if the object is not in new - // space. - void RecordWriteHelper(Register object, - Register addr, - Register scratch); - - // Check if object is in new space. The condition cc can be equal or - // not_equal. If it is equal a jump will be done if the object is on new - // space. The register scratch can be object itself, but it will be clobbered. - void InNewSpace(Register object, - Register scratch, - Condition cc, - Label* branch, - Label::Distance near_jump = Label::kFar); + // These functions do not arrange the registers in any particular order so + // they are not useful for calls that can cause a GC. The caller can + // exclude up to 3 registers that do not need to be saved and restored. + void PushCallerSaved(SaveFPRegsMode fp_mode, + Register exclusion1 = no_reg, + Register exclusion2 = no_reg, + Register exclusion3 = no_reg); + void PopCallerSaved(SaveFPRegsMode fp_mode, + Register exclusion1 = no_reg, + Register exclusion2 = no_reg, + Register exclusion3 = no_reg); + +// --------------------------------------------------------------------------- +// GC Support + + + enum RememberedSetFinalAction { + kReturnAtEnd, + kFallThroughAtEnd + }; - // For page containing |object| mark region covering [object+offset] - // dirty. |object| is the object being stored into, |value| is the - // object being stored. If |offset| is zero, then the |scratch| - // register contains the array index into the elements array - // represented as an untagged 32-bit integer. All registers are - // clobbered by the operation. RecordWrite filters out smis so it - // does not update the write barrier if the value is a smi. - void RecordWrite(Register object, - int offset, - Register value, - Register scratch); - - // For page containing |object| mark region covering [address] + // Record in the remembered set the fact that we have a pointer to new space + // at the address pointed to by the addr register. Only works if addr is not + // in new space. + void RememberedSetHelper(Register object, // Used for debug code. + Register addr, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetFinalAction and_then); + + void CheckPageFlag(Register object, + Register scratch, + int mask, + Condition cc, + Label* condition_met, + Label::Distance condition_met_distance = Label::kFar); + + // Check if object is in new space. Jumps if the object is not in new space. + // The register scratch can be object itself, but scratch will be clobbered. + void JumpIfNotInNewSpace(Register object, + Register scratch, + Label* branch, + Label::Distance distance = Label::kFar) { + InNewSpace(object, scratch, not_equal, branch, distance); + } + + // Check if object is in new space. Jumps if the object is in new space. + // The register scratch can be object itself, but it will be clobbered. + void JumpIfInNewSpace(Register object, + Register scratch, + Label* branch, + Label::Distance distance = Label::kFar) { + InNewSpace(object, scratch, equal, branch, distance); + } + + // Check if an object has the black incremental marking color. Also uses rcx! + void JumpIfBlack(Register object, + Register scratch0, + Register scratch1, + Label* on_black, + Label::Distance on_black_distance = Label::kFar); + + // Detects conservatively whether an object is data-only, ie it does need to + // be scanned by the garbage collector. + void JumpIfDataObject(Register value, + Register scratch, + Label* not_data_object, + Label::Distance not_data_object_distance); + + // Checks the color of an object. If the object is already grey or black + // then we just fall through, since it is already live. If it is white and + // we can determine that it doesn't need to be scanned, then we just mark it + // black and fall through. For the rest we jump to the label so the + // incremental marker can fix its assumptions. + void EnsureNotWhite(Register object, + Register scratch1, + Register scratch2, + Label* object_is_white_and_not_data, + Label::Distance distance); + + // Notify the garbage collector that we wrote a pointer into an object. + // |object| is the object being stored into, |value| is the object being + // stored. value and scratch registers are clobbered by the operation. + // The offset is the offset from the start of the object, not the offset from + // the tagged HeapObject pointer. For use with FieldOperand(reg, off). + void RecordWriteField( + Register object, + int offset, + Register value, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); + + // As above, but the offset has the tag presubtracted. For use with + // Operand(reg, off). + void RecordWriteContextSlot( + Register context, + int offset, + Register value, + Register scratch, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK) { + RecordWriteField(context, + offset + kHeapObjectTag, + value, + scratch, + save_fp, + remembered_set_action, + smi_check); + } + + // Notify the garbage collector that we wrote a pointer into a fixed array. + // |array| is the array being stored into, |value| is the + // object being stored. |index| is the array index represented as a + // Smi. All registers are clobbered by the operation RecordWriteArray + // filters out smis so it does not update the write barrier if the + // value is a smi. + void RecordWriteArray( + Register array, + Register value, + Register index, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); + + // For page containing |object| mark region covering |address| // dirty. |object| is the object being stored into, |value| is the - // object being stored. All registers are clobbered by the + // object being stored. The address and value registers are clobbered by the // operation. RecordWrite filters out smis so it does not update // the write barrier if the value is a smi. - void RecordWrite(Register object, - Register address, - Register value); - - // For page containing |object| mark region covering [object+offset] dirty. - // The value is known to not be a smi. - // object is the object being stored into, value is the object being stored. - // If offset is zero, then the scratch register contains the array index into - // the elements array represented as an untagged 32-bit integer. - // All registers are clobbered by the operation. - void RecordWriteNonSmi(Register object, - int offset, - Register value, - Register scratch); + void RecordWrite( + Register object, + Register address, + Register value, + SaveFPRegsMode save_fp, + RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, + SmiCheck smi_check = INLINE_SMI_CHECK); #ifdef ENABLE_DEBUGGER_SUPPORT // --------------------------------------------------------------------------- @@ -192,15 +288,6 @@ class MacroAssembler: public Assembler { void DebugBreak(); #endif - // --------------------------------------------------------------------------- - // Activation frames - - void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } - void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } - - void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } - void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } - // Enter specific kind of exit frame; either in normal or // debug mode. Expects the number of arguments in register rax and // sets up the number of arguments in register rdi and the pointer @@ -760,6 +847,28 @@ class MacroAssembler: public Assembler { Label* fail, Label::Distance distance = Label::kFar); + // Check if a map for a JSObject indicates that the object can have both smi + // and HeapObject elements. Jump to the specified label if it does not. + void CheckFastObjectElements(Register map, + Label* fail, + Label::Distance distance = Label::kFar); + + // Check if a map for a JSObject indicates that the object has fast smi only + // elements. Jump to the specified label if it does not. + void CheckFastSmiOnlyElements(Register map, + Label* fail, + Label::Distance distance = Label::kFar); + + // Check to see if maybe_number can be stored as a double in + // FastDoubleElements. If it can, store it at the index specified by key in + // the FastDoubleElements array elements, otherwise jump to fail. + // Note that key must not be smi-tagged. + void StoreNumberToDoubleElements(Register maybe_number, + Register elements, + Register key, + XMMRegister xmm_scratch, + Label* fail); + // Check if the map of an object is equal to a specified map and // branch to label if not. Skip the smi check if not required // (object is known to be a heap object) @@ -1119,6 +1228,13 @@ class MacroAssembler: public Assembler { int min_length = 0, Register scratch = kScratchRegister); + // Initialize fields with filler values. Fields starting at |start_offset| + // not including end_offset are overwritten with the value in |filler|. At + // the end the loop, |start_offset| takes the value of |end_offset|. + void InitializeFieldsWithFiller(Register start_offset, + Register end_offset, + Register filler); + // --------------------------------------------------------------------------- // StatsCounter support @@ -1151,11 +1267,18 @@ class MacroAssembler: public Assembler { bool generating_stub() { return generating_stub_; } void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } bool allow_stub_calls() { return allow_stub_calls_; } + void set_has_frame(bool value) { has_frame_ = value; } + bool has_frame() { return has_frame_; } + inline bool AllowThisStubCall(CodeStub* stub); static int SafepointRegisterStackIndex(Register reg) { return SafepointRegisterStackIndex(reg.code()); } + // Activation support. + void EnterFrame(StackFrame::Type type); + void LeaveFrame(StackFrame::Type type); + private: // Order general registers are pushed by Pushad. // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15. @@ -1165,6 +1288,7 @@ class MacroAssembler: public Assembler { bool generating_stub_; bool allow_stub_calls_; + bool has_frame_; bool root_array_available_; // Returns a register holding the smi value. The register MUST NOT be @@ -1188,10 +1312,6 @@ class MacroAssembler: public Assembler { const CallWrapper& call_wrapper = NullCallWrapper(), CallKind call_kind = CALL_AS_METHOD); - // Activation support. - void EnterFrame(StackFrame::Type type); - void LeaveFrame(StackFrame::Type type); - void EnterExitFramePrologue(bool save_rax); // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack @@ -1218,6 +1338,20 @@ class MacroAssembler: public Assembler { Register scratch, bool gc_allowed); + // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace. + void InNewSpace(Register object, + Register scratch, + Condition cc, + Label* branch, + Label::Distance distance = Label::kFar); + + // Helper for finding the mark bits for an address. Afterwards, the + // bitmap register points at the word with the mark bits and the mask + // the position of the first bit. Uses rcx as scratch and leaves addr_reg + // unchanged. + inline void GetMarkBits(Register addr_reg, + Register bitmap_reg, + Register mask_reg); // Compute memory operands for safepoint stack slots. Operand SafepointRegisterSlot(Register reg); diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc index a782bd705..55fabc003 100644 --- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc +++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc @@ -193,7 +193,7 @@ void RegExpMacroAssemblerX64::CheckCharacterGT(uc16 limit, Label* on_greater) { void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) { Label not_at_start; // Did we start the match at the start of the string at all? - __ cmpb(Operand(rbp, kStartIndex), Immediate(0)); + __ cmpl(Operand(rbp, kStartIndex), Immediate(0)); BranchOrBacktrack(not_equal, ¬_at_start); // If we did, are we still at the start of the input? __ lea(rax, Operand(rsi, rdi, times_1, 0)); @@ -205,7 +205,7 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) { void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) { // Did we start the match at the start of the string at all? - __ cmpb(Operand(rbp, kStartIndex), Immediate(0)); + __ cmpl(Operand(rbp, kStartIndex), Immediate(0)); BranchOrBacktrack(not_equal, on_not_at_start); // If we did, are we still at the start of the input? __ lea(rax, Operand(rsi, rdi, times_1, 0)); @@ -431,9 +431,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( // Isolate. __ LoadAddress(rcx, ExternalReference::isolate_address()); #endif - ExternalReference compare = - ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate()); - __ CallCFunction(compare, num_arguments); + + { // NOLINT: Can't find a way to open this scope without confusing the + // linter. + AllowExternalCallThatCantCauseGC scope(&masm_); + ExternalReference compare = + ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate()); + __ CallCFunction(compare, num_arguments); + } // Restore original values before reacting on result value. __ Move(code_object_pointer(), masm_.CodeObject()); @@ -706,7 +711,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { // registers we need. // Entry code: __ bind(&entry_label_); - // Start new stack frame. + + // Tell the system that we have a stack frame. Because the type is MANUAL, no + // is generated. + FrameScope scope(&masm_, StackFrame::MANUAL); + + // Actually emit code to start a new stack frame. __ push(rbp); __ movq(rbp, rsp); // Save parameters and callee-save registers. Order here should correspond diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index 76d255579..c4b2672f6 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -645,7 +645,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { scratch1, scratch2, scratch3, name, miss_label); - __ EnterInternalFrame(); + FrameScope scope(masm, StackFrame::INTERNAL); // Save the name_ register across the call. __ push(name_); @@ -662,7 +662,8 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Restore the name_ register. __ pop(name_); - __ LeaveInternalFrame(); + + // Leave the internal frame. } void LoadWithInterceptor(MacroAssembler* masm, @@ -670,19 +671,21 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register holder, JSObject* holder_obj, Label* interceptor_succeeded) { - __ EnterInternalFrame(); - __ push(holder); // Save the holder. - __ push(name_); // Save the name. - - CompileCallLoadPropertyWithInterceptor(masm, - receiver, - holder, - name_, - holder_obj); - - __ pop(name_); // Restore the name. - __ pop(receiver); // Restore the holder. - __ LeaveInternalFrame(); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ push(holder); // Save the holder. + __ push(name_); // Save the name. + + CompileCallLoadPropertyWithInterceptor(masm, + receiver, + holder, + name_, + holder_obj); + + __ pop(name_); // Restore the name. + __ pop(receiver); // Restore the holder. + // Leave the internal frame. + } __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex); __ j(not_equal, interceptor_succeeded); @@ -781,7 +784,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. __ movq(name_reg, rax); - __ RecordWrite(receiver_reg, offset, name_reg, scratch); + __ RecordWriteField( + receiver_reg, offset, name_reg, scratch, kDontSaveFPRegs); } else { // Write to the properties array. int offset = index * kPointerSize + FixedArray::kHeaderSize; @@ -792,7 +796,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. __ movq(name_reg, rax); - __ RecordWrite(scratch, offset, name_reg, receiver_reg); + __ RecordWriteField( + scratch, offset, name_reg, receiver_reg, kDontSaveFPRegs); } // Return the value (register rax). @@ -1139,40 +1144,42 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Save necessary data before invoking an interceptor. // Requires a frame to make GC aware of pushed pointers. - __ EnterInternalFrame(); + { + FrameScope frame_scope(masm(), StackFrame::INTERNAL); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - // CALLBACKS case needs a receiver to be passed into C++ callback. - __ push(receiver); - } - __ push(holder_reg); - __ push(name_reg); - - // Invoke an interceptor. Note: map checks from receiver to - // interceptor's holder has been compiled before (see a caller - // of this method.) - CompileCallLoadPropertyWithInterceptor(masm(), - receiver, - holder_reg, - name_reg, - interceptor_holder); - - // Check if interceptor provided a value for property. If it's - // the case, return immediately. - Label interceptor_failed; - __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex); - __ j(equal, &interceptor_failed); - __ LeaveInternalFrame(); - __ ret(0); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + // CALLBACKS case needs a receiver to be passed into C++ callback. + __ push(receiver); + } + __ push(holder_reg); + __ push(name_reg); - __ bind(&interceptor_failed); - __ pop(name_reg); - __ pop(holder_reg); - if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { - __ pop(receiver); - } + // Invoke an interceptor. Note: map checks from receiver to + // interceptor's holder has been compiled before (see a caller + // of this method.) + CompileCallLoadPropertyWithInterceptor(masm(), + receiver, + holder_reg, + name_reg, + interceptor_holder); + + // Check if interceptor provided a value for property. If it's + // the case, return immediately. + Label interceptor_failed; + __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex); + __ j(equal, &interceptor_failed); + frame_scope.GenerateLeaveFrame(); + __ ret(0); - __ LeaveInternalFrame(); + __ bind(&interceptor_failed); + __ pop(name_reg); + __ pop(holder_reg); + if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) { + __ pop(receiver); + } + + // Leave the internal frame. + } // Check that the maps from interceptor's holder to lookup's holder // haven't changed. And load lookup's holder into |holder| register. @@ -1421,7 +1428,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ j(not_equal, &call_builtin); if (argc == 1) { // Otherwise fall through to call builtin. - Label exit, with_write_barrier, attempt_to_grow_elements; + Label attempt_to_grow_elements, with_write_barrier; // Get the array's length into rax and calculate new length. __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset)); @@ -1435,30 +1442,40 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ cmpl(rax, rcx); __ j(greater, &attempt_to_grow_elements); + // Check if value is a smi. + __ movq(rcx, Operand(rsp, argc * kPointerSize)); + __ JumpIfNotSmi(rcx, &with_write_barrier); + // Save new length. __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax); // Push the element. - __ movq(rcx, Operand(rsp, argc * kPointerSize)); __ lea(rdx, FieldOperand(rbx, rax, times_pointer_size, FixedArray::kHeaderSize - argc * kPointerSize)); __ movq(Operand(rdx, 0), rcx); - // Check if value is a smi. __ Integer32ToSmi(rax, rax); // Return new length as smi. - - __ JumpIfNotSmi(rcx, &with_write_barrier); - - __ bind(&exit); __ ret((argc + 1) * kPointerSize); __ bind(&with_write_barrier); - __ InNewSpace(rbx, rcx, equal, &exit); + __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset)); + __ CheckFastObjectElements(rdi, &call_builtin); + + // Save new length. + __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax); + + // Push the element. + __ lea(rdx, FieldOperand(rbx, + rax, times_pointer_size, + FixedArray::kHeaderSize - argc * kPointerSize)); + __ movq(Operand(rdx, 0), rcx); - __ RecordWriteHelper(rbx, rdx, rcx); + __ RecordWrite( + rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + __ Integer32ToSmi(rax, rax); // Return new length as smi. __ ret((argc + 1) * kPointerSize); __ bind(&attempt_to_grow_elements); @@ -1466,6 +1483,15 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ jmp(&call_builtin); } + __ movq(rdi, Operand(rsp, argc * kPointerSize)); + // Growing elements that are SMI-only requires special handling in case + // the new element is non-Smi. For now, delegate to the builtin. + Label no_fast_elements_check; + __ JumpIfSmi(rdi, &no_fast_elements_check); + __ movq(rsi, FieldOperand(rdx, HeapObject::kMapOffset)); + __ CheckFastObjectElements(rsi, &call_builtin, Label::kFar); + __ bind(&no_fast_elements_check); + ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate()); ExternalReference new_space_allocation_limit = @@ -1489,16 +1515,22 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, // We fit and could grow elements. __ Store(new_space_allocation_top, rcx); - __ movq(rcx, Operand(rsp, argc * kPointerSize)); // Push the argument... - __ movq(Operand(rdx, 0), rcx); + __ movq(Operand(rdx, 0), rdi); // ... and fill the rest with holes. __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); for (int i = 1; i < kAllocationDelta; i++) { __ movq(Operand(rdx, i * kPointerSize), kScratchRegister); } + // We know the elements array is in new space so we don't need the + // remembered set, but we just pushed a value onto it so we may have to + // tell the incremental marker to rescan the object that we just grew. We + // don't need to worry about the holes because they are in old space and + // already marked black. + __ RecordWrite(rbx, rdx, rdi, kDontSaveFPRegs, OMIT_REMEMBERED_SET); + // Restore receiver to rdx as finish sequence assumes it's here. __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); @@ -1510,7 +1542,6 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ Integer32ToSmi(rax, rax); __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax); - // Elements are in new space, so write barrier is not required. __ ret((argc + 1) * kPointerSize); } @@ -2463,19 +2494,36 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, Handle<Map>(object->map())); __ j(not_equal, &miss); + // Compute the cell operand to use. + __ Move(rbx, Handle<JSGlobalPropertyCell>(cell)); + Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset); + // Check that the value in the cell is not the hole. If it is, this // cell could have been deleted and reintroducing the global needs // to update the property details in the property dictionary of the // global object. We bail out to the runtime system to do that. - __ Move(rbx, Handle<JSGlobalPropertyCell>(cell)); - __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), - Heap::kTheHoleValueRootIndex); + __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex); __ j(equal, &miss); // Store the value in the cell. - __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax); + __ movq(cell_operand, rax); + Label done; + __ JumpIfSmi(rax, &done); + + __ movq(rcx, rax); + __ lea(rdx, cell_operand); + // Cells are always in the remembered set. + __ RecordWrite(rbx, // Object. + rdx, // Address. + rcx, // Value. + kDontSaveFPRegs, + OMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + // Return the value (register rax). + __ bind(&done); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_store_global_inline(), 1); __ ret(0); @@ -2555,9 +2603,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) { } -MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic( +MaybeObject* KeyedStoreStubCompiler::CompileStorePolymorphic( MapList* receiver_maps, - CodeList* handler_ics) { + CodeList* handler_stubs, + MapList* transitioned_maps) { // ----------- S t a t e ------------- // -- rax : value // -- rcx : key @@ -2565,18 +2614,25 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic( // -- rsp[0] : return address // ----------------------------------- Label miss; - __ JumpIfSmi(rdx, &miss); + __ JumpIfSmi(rdx, &miss, Label::kNear); - Register map_reg = rbx; - __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset)); + __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset)); int receiver_count = receiver_maps->length(); - for (int current = 0; current < receiver_count; ++current) { + for (int i = 0; i < receiver_count; ++i) { // Check map and tail call if there's a match - Handle<Map> map(receiver_maps->at(current)); - __ Cmp(map_reg, map); - __ j(equal, - Handle<Code>(handler_ics->at(current)), - RelocInfo::CODE_TARGET); + Handle<Map> map(receiver_maps->at(i)); + __ Cmp(rdi, map); + if (transitioned_maps->at(i) == NULL) { + __ j(equal, Handle<Code>(handler_stubs->at(i)), RelocInfo::CODE_TARGET); + } else { + Label next_map; + __ j(not_equal, &next_map, Label::kNear); + __ movq(rbx, + Handle<Map>(transitioned_maps->at(i)), + RelocInfo::EMBEDDED_OBJECT); + __ jmp(Handle<Code>(handler_stubs->at(i)), RelocInfo::CODE_TARGET); + __ bind(&next_map); + } } __ bind(&miss); @@ -3012,7 +3068,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) { } -MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic( +MaybeObject* KeyedLoadStubCompiler::CompileLoadPolymorphic( MapList* receiver_maps, CodeList* handler_ics) { // ----------- S t a t e ------------- @@ -3436,6 +3492,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ movsd(Operand(rbx, rdi, times_8, 0), xmm0); break; case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3503,6 +3560,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: + case FAST_SMI_ONLY_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: @@ -3634,15 +3692,17 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( } -void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, - bool is_js_array) { +void KeyedStoreStubCompiler::GenerateStoreFastElement( + MacroAssembler* masm, + bool is_js_array, + ElementsKind elements_kind) { // ----------- S t a t e ------------- // -- rax : value // -- rcx : key // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - Label miss_force_generic; + Label miss_force_generic, transition_elements_kind; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -3665,13 +3725,22 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, __ j(above_equal, &miss_force_generic); } - // Do the store and update the write barrier. Make sure to preserve - // the value in register eax. - __ movq(rdx, rax); - __ SmiToInteger32(rcx, rcx); - __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize), - rax); - __ RecordWrite(rdi, 0, rdx, rcx); + if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + __ JumpIfNotSmi(rax, &transition_elements_kind); + __ SmiToInteger32(rcx, rcx); + __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize), + rax); + } else { + // Do the store and update the write barrier. + ASSERT(elements_kind == FAST_ELEMENTS); + __ SmiToInteger32(rcx, rcx); + __ lea(rcx, + FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize)); + __ movq(Operand(rcx, 0), rax); + // Make sure to preserve the value in register rax. + __ movq(rdx, rax); + __ RecordWrite(rdi, rcx, rdx, kDontSaveFPRegs); + } // Done. __ ret(0); @@ -3681,6 +3750,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, Handle<Code> ic_force_generic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ jmp(ic_force_generic, RelocInfo::CODE_TARGET); + + __ bind(&transition_elements_kind); + Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); + __ jmp(ic_miss, RelocInfo::CODE_TARGET); } @@ -3693,8 +3766,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - Label miss_force_generic, smi_value, is_nan, maybe_nan; - Label have_double_value, not_nan; + Label miss_force_generic, transition_elements_kind; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -3715,50 +3787,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ j(above_equal, &miss_force_generic); // Handle smi values specially - __ JumpIfSmi(rax, &smi_value, Label::kNear); - - __ CheckMap(rax, - masm->isolate()->factory()->heap_number_map(), - &miss_force_generic, - DONT_DO_SMI_CHECK); - - // Double value, canonicalize NaN. - uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32); - __ cmpl(FieldOperand(rax, offset), - Immediate(kNaNOrInfinityLowerBoundUpper32)); - __ j(greater_equal, &maybe_nan, Label::kNear); - - __ bind(¬_nan); - __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset)); - __ bind(&have_double_value); - __ SmiToInteger32(rcx, rcx); - __ movsd(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize), - xmm0); - __ ret(0); - - __ bind(&maybe_nan); - // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise - // it's an Infinity, and the non-NaN code path applies. - __ j(greater, &is_nan, Label::kNear); - __ cmpl(FieldOperand(rax, HeapNumber::kValueOffset), Immediate(0)); - __ j(zero, ¬_nan); - __ bind(&is_nan); - // Convert all NaNs to the same canonical NaN value when they are stored in - // the double array. - __ Set(kScratchRegister, BitCast<uint64_t>( - FixedDoubleArray::canonical_not_the_hole_nan_as_double())); - __ movq(xmm0, kScratchRegister); - __ jmp(&have_double_value, Label::kNear); - - __ bind(&smi_value); - // Value is a smi. convert to a double and store. - // Preserve original value. - __ SmiToInteger32(rdx, rax); - __ push(rdx); - __ fild_s(Operand(rsp, 0)); - __ pop(rdx); __ SmiToInteger32(rcx, rcx); - __ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize)); + __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0, + &transition_elements_kind); __ ret(0); // Handle store cache miss, replacing the ic with the generic stub. @@ -3766,6 +3797,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Handle<Code> ic_force_generic = masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); __ jmp(ic_force_generic, RelocInfo::CODE_TARGET); + + __ bind(&transition_elements_kind); + // Restore smi-tagging of rcx. + __ Integer32ToSmi(rcx, rcx); + Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); + __ jmp(ic_miss, RelocInfo::CODE_TARGET); } |