From 5777d7ab3038983d368046ab40eb34075f635348 Mon Sep 17 00:00:00 2001 From: Trevor Norris Date: Mon, 22 Jul 2013 15:26:27 -0700 Subject: v8: upgrade to v8 3.20.7 --- deps/v8/src/mips/assembler-mips.cc | 9 + deps/v8/src/mips/assembler-mips.h | 3 +- deps/v8/src/mips/builtins-mips.cc | 1 - deps/v8/src/mips/code-stubs-mips.cc | 600 ++++++++------------------ deps/v8/src/mips/code-stubs-mips.h | 81 ---- deps/v8/src/mips/codegen-mips.cc | 16 +- deps/v8/src/mips/constants-mips.cc | 4 + deps/v8/src/mips/deoptimizer-mips.cc | 41 +- deps/v8/src/mips/full-codegen-mips.cc | 22 +- deps/v8/src/mips/ic-mips.cc | 27 +- deps/v8/src/mips/lithium-codegen-mips.cc | 216 +++------- deps/v8/src/mips/lithium-codegen-mips.h | 1 - deps/v8/src/mips/lithium-gap-resolver-mips.cc | 5 +- deps/v8/src/mips/lithium-mips.cc | 73 ++-- deps/v8/src/mips/lithium-mips.h | 89 ++-- deps/v8/src/mips/macro-assembler-mips.cc | 34 +- deps/v8/src/mips/macro-assembler-mips.h | 12 +- deps/v8/src/mips/stub-cache-mips.cc | 203 ++++----- 18 files changed, 490 insertions(+), 947 deletions(-) mode change 100755 => 100644 deps/v8/src/mips/builtins-mips.cc (limited to 'deps/v8/src/mips') diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc index c4fefcc512..a04d456ae9 100644 --- a/deps/v8/src/mips/assembler-mips.cc +++ b/deps/v8/src/mips/assembler-mips.cc @@ -501,11 +501,13 @@ bool Assembler::IsBranch(Instr instr) { (opcode == COP1 && rs_field == BC1); // Coprocessor branch. } + bool Assembler::IsEmittedConstant(Instr instr) { uint32_t label_constant = GetLabelConst(instr); return label_constant == 0; // Emitted label const in reg-exp engine. } + bool Assembler::IsBeq(Instr instr) { return GetOpcodeField(instr) == BEQ; } @@ -539,10 +541,12 @@ bool Assembler::IsJal(Instr instr) { return GetOpcodeField(instr) == JAL; } + bool Assembler::IsJr(Instr instr) { return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR; } + bool Assembler::IsJalr(Instr instr) { return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR; } @@ -825,6 +829,7 @@ void Assembler::next(Label* L) { } } + bool Assembler::is_near(Label* L) { if (L->is_bound()) { return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize); @@ -832,6 +837,7 @@ bool Assembler::is_near(Label* L) { return false; } + // We have to use a temporary register for things that can be relocated even // if they can be encoded in the MIPS's 16 bits of immediate-offset instruction // space. There is no guarantee that the relocated location can be similarly @@ -1669,6 +1675,7 @@ void Assembler::cfc1(Register rt, FPUControlRegister fs) { GenInstrRegister(COP1, CFC1, rt, fs); } + void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { uint64_t i; OS::MemCopy(&i, &d, 8); @@ -1677,6 +1684,7 @@ void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { *hi = i >> 32; } + // Arithmetic. void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) { @@ -2257,6 +2265,7 @@ void Assembler::set_target_address_at(Address pc, Address target) { CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t)); } + void Assembler::JumpLabelToJumpRegister(Address pc) { // Address pc points to lui/ori instructions. // Jump to label may follow at pc + 2 * kInstrSize. diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h index d12c0dace4..8d533b36f4 100644 --- a/deps/v8/src/mips/assembler-mips.h +++ b/deps/v8/src/mips/assembler-mips.h @@ -583,7 +583,8 @@ class Assembler : public AssemblerBase { LAST_CODE_MARKER, FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, // Code aging - CODE_AGE_MARKER_NOP = 6 + CODE_AGE_MARKER_NOP = 6, + CODE_AGE_SEQUENCE_NOP }; // Type == 0 is the default non-marking nop. For mips this is a diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc old mode 100755 new mode 100644 index 35d21f05e6..3f5dca0009 --- a/deps/v8/src/mips/builtins-mips.cc +++ b/deps/v8/src/mips/builtins-mips.cc @@ -208,7 +208,6 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { a3, // Scratch. t0, // Scratch. t1, // Scratch. - false, // Is it a Smi? ¬_cached); __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0); __ bind(&argument_is_string); diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc index 69b957afa8..f984b3a7b7 100644 --- a/deps/v8/src/mips/code-stubs-mips.cc +++ b/deps/v8/src/mips/code-stubs-mips.cc @@ -61,6 +61,16 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( } +void CreateAllocationSiteStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a2 }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = NULL; +} + + void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { @@ -227,8 +237,42 @@ void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( } +void UnaryOpStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a0 }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(UnaryOpIC_Miss); +} + + +void StoreGlobalStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a1, a2, a0 }; + descriptor->register_param_count_ = 3; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(StoreIC_MissFromStubFailure); +} + + +void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a0, a3, a1, a2 }; + descriptor->register_param_count_ = 4; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); +} + + #define __ ACCESS_MASM(masm) + static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, Condition cc); @@ -1181,17 +1225,10 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, // Now that we have the types we might as well check for // internalized-internalized. - Label not_internalized; - STATIC_ASSERT(kInternalizedTag != 0); - __ And(t2, a2, Operand(kIsNotStringMask | kIsInternalizedMask)); - __ Branch(¬_internalized, ne, t2, - Operand(kInternalizedTag | kStringTag)); - - __ And(a3, a3, Operand(kIsNotStringMask | kIsInternalizedMask)); - __ Branch(&return_not_equal, eq, a3, - Operand(kInternalizedTag | kStringTag)); - - __ bind(¬_internalized); + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); + __ Or(a2, a2, Operand(a3)); + __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask)); + __ Branch(&return_not_equal, eq, at, Operand(zero_reg)); } @@ -1227,15 +1264,15 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, // a2 is object type of rhs. Label object_test; - STATIC_ASSERT(kInternalizedTag != 0); + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); __ And(at, a2, Operand(kIsNotStringMask)); __ Branch(&object_test, ne, at, Operand(zero_reg)); - __ And(at, a2, Operand(kIsInternalizedMask)); - __ Branch(possible_strings, eq, at, Operand(zero_reg)); + __ And(at, a2, Operand(kIsNotInternalizedMask)); + __ Branch(possible_strings, ne, at, Operand(zero_reg)); __ GetObjectType(rhs, a3, a3); __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE)); - __ And(at, a3, Operand(kIsInternalizedMask)); - __ Branch(possible_strings, eq, at, Operand(zero_reg)); + __ And(at, a3, Operand(kIsNotInternalizedMask)); + __ Branch(possible_strings, ne, at, Operand(zero_reg)); // Both are internalized strings. We already checked they weren't the same // pointer so they are not equal. @@ -1266,7 +1303,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3, - bool object_is_smi, Label* not_found) { // Use of registers. Register result is used as a temporary. Register number_string_cache = result; @@ -1289,37 +1325,35 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, Isolate* isolate = masm->isolate(); Label is_smi; Label load_result_from_cache; - if (!object_is_smi) { - __ JumpIfSmi(object, &is_smi); - __ CheckMap(object, - scratch1, - Heap::kHeapNumberMapRootIndex, - not_found, - DONT_DO_SMI_CHECK); + __ JumpIfSmi(object, &is_smi); + __ CheckMap(object, + scratch1, + Heap::kHeapNumberMapRootIndex, + not_found, + DONT_DO_SMI_CHECK); - STATIC_ASSERT(8 == kDoubleSize); - __ Addu(scratch1, - object, - Operand(HeapNumber::kValueOffset - kHeapObjectTag)); - __ lw(scratch2, MemOperand(scratch1, kPointerSize)); - __ lw(scratch1, MemOperand(scratch1, 0)); - __ Xor(scratch1, scratch1, Operand(scratch2)); - __ And(scratch1, scratch1, Operand(mask)); - - // Calculate address of entry in string cache: each entry consists - // of two pointer sized fields. - __ sll(scratch1, scratch1, kPointerSizeLog2 + 1); - __ Addu(scratch1, number_string_cache, scratch1); - - Register probe = mask; - __ lw(probe, - FieldMemOperand(scratch1, FixedArray::kHeaderSize)); - __ JumpIfSmi(probe, not_found); - __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); - __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); - __ BranchF(&load_result_from_cache, NULL, eq, f12, f14); - __ Branch(not_found); - } + STATIC_ASSERT(8 == kDoubleSize); + __ Addu(scratch1, + object, + Operand(HeapNumber::kValueOffset - kHeapObjectTag)); + __ lw(scratch2, MemOperand(scratch1, kPointerSize)); + __ lw(scratch1, MemOperand(scratch1, 0)); + __ Xor(scratch1, scratch1, Operand(scratch2)); + __ And(scratch1, scratch1, Operand(mask)); + + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + __ sll(scratch1, scratch1, kPointerSizeLog2 + 1); + __ Addu(scratch1, number_string_cache, scratch1); + + Register probe = mask; + __ lw(probe, + FieldMemOperand(scratch1, FixedArray::kHeaderSize)); + __ JumpIfSmi(probe, not_found); + __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); + __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); + __ BranchF(&load_result_from_cache, NULL, eq, f12, f14); + __ Branch(not_found); __ bind(&is_smi); Register scratch = scratch1; @@ -1332,7 +1366,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, __ Addu(scratch, number_string_cache, scratch); // Check if the entry is the smi we are looking for. - Register probe = mask; __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); __ Branch(not_found, ne, object, Operand(probe)); @@ -1354,7 +1387,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) { __ lw(a1, MemOperand(sp, 0)); // Generate code to lookup number in the number string cache. - GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime); + GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, &runtime); __ DropAndRet(1); __ bind(&runtime); @@ -1586,294 +1619,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { } -void UnaryOpStub::PrintName(StringStream* stream) { - const char* op_name = Token::Name(op_); - const char* overwrite_name = NULL; // Make g++ happy. - switch (mode_) { - case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; - case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; - } - stream->Add("UnaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - UnaryOpIC::GetName(operand_type_)); -} - - -// TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::Generate(MacroAssembler* masm) { - switch (operand_type_) { - case UnaryOpIC::UNINITIALIZED: - GenerateTypeTransition(masm); - break; - case UnaryOpIC::SMI: - GenerateSmiStub(masm); - break; - case UnaryOpIC::NUMBER: - GenerateNumberStub(masm); - break; - case UnaryOpIC::GENERIC: - GenerateGenericStub(masm); - break; - } -} - - -void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { - // Argument is in a0 and v0 at this point, so we can overwrite a0. - __ li(a2, Operand(Smi::FromInt(op_))); - __ li(a1, Operand(Smi::FromInt(mode_))); - __ li(a0, Operand(Smi::FromInt(operand_type_))); - __ Push(v0, a2, a1, a0); - - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1); -} - - -// TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) { - switch (op_) { - case Token::SUB: - GenerateSmiStubSub(masm); - break; - case Token::BIT_NOT: - GenerateSmiStubBitNot(masm); - break; - default: - UNREACHABLE(); - } -} - - -void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) { - Label non_smi, slow; - GenerateSmiCodeSub(masm, &non_smi, &slow); - __ bind(&non_smi); - __ bind(&slow); - GenerateTypeTransition(masm); -} - - -void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) { - Label non_smi; - GenerateSmiCodeBitNot(masm, &non_smi); - __ bind(&non_smi); - GenerateTypeTransition(masm); -} - - -void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, - Label* non_smi, - Label* slow) { - __ JumpIfNotSmi(a0, non_smi); - - // The result of negating zero or the smallest negative smi is not a smi. - __ And(t0, a0, ~0x80000000); - __ Branch(slow, eq, t0, Operand(zero_reg)); - - // Return '0 - value'. - __ Ret(USE_DELAY_SLOT); - __ subu(v0, zero_reg, a0); -} - - -void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, - Label* non_smi) { - __ JumpIfNotSmi(a0, non_smi); - - // Flip bits and revert inverted smi-tag. - __ Neg(v0, a0); - __ And(v0, v0, ~kSmiTagMask); - __ Ret(); -} - - -// TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) { - switch (op_) { - case Token::SUB: - GenerateNumberStubSub(masm); - break; - case Token::BIT_NOT: - GenerateNumberStubBitNot(masm); - break; - default: - UNREACHABLE(); - } -} - - -void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) { - Label non_smi, slow, call_builtin; - GenerateSmiCodeSub(masm, &non_smi, &call_builtin); - __ bind(&non_smi); - GenerateHeapNumberCodeSub(masm, &slow); - __ bind(&slow); - GenerateTypeTransition(masm); - __ bind(&call_builtin); - GenerateGenericCodeFallback(masm); -} - - -void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) { - Label non_smi, slow; - GenerateSmiCodeBitNot(masm, &non_smi); - __ bind(&non_smi); - GenerateHeapNumberCodeBitNot(masm, &slow); - __ bind(&slow); - GenerateTypeTransition(masm); -} - - -void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, - Label* slow) { - EmitCheckForHeapNumber(masm, a0, a1, t2, slow); - // a0 is a heap number. Get a new heap number in a1. - if (mode_ == UNARY_OVERWRITE) { - __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); - __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. - __ Ret(USE_DELAY_SLOT); - __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); - } else { - Label slow_allocate_heapnumber, heapnumber_allocated; - __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber); - __ jmp(&heapnumber_allocated); - - __ bind(&slow_allocate_heapnumber); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(a0); - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(a1, v0); - __ pop(a0); - } - - __ bind(&heapnumber_allocated); - __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); - __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); - __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset)); - __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. - __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset)); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, a1); - } -} - - -void UnaryOpStub::GenerateHeapNumberCodeBitNot( - MacroAssembler* masm, - Label* slow) { - Label impossible; - - EmitCheckForHeapNumber(masm, a0, a1, t2, slow); - // Convert the heap number in a0 to an untagged integer in a1. - __ ConvertToInt32(a0, a1, a2, a3, f0, slow); - - // Do the bitwise operation and check if the result fits in a smi. - Label try_float; - __ Neg(a1, a1); - __ Addu(a2, a1, Operand(0x40000000)); - __ Branch(&try_float, lt, a2, Operand(zero_reg)); - - // Tag the result as a smi and we're done. - __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. - __ SmiTag(v0, a1); - - // Try to store the result in a heap number. - __ bind(&try_float); - if (mode_ == UNARY_NO_OVERWRITE) { - Label slow_allocate_heapnumber, heapnumber_allocated; - // Allocate a new heap number without zapping v0, which we need if it fails. - __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber); - __ jmp(&heapnumber_allocated); - - __ bind(&slow_allocate_heapnumber); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(v0); // Push the heap number, not the untagged int32. - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(a2, v0); // Move the new heap number into a2. - // Get the heap number into v0, now that the new heap number is in a2. - __ pop(v0); - } - - // Convert the heap number in v0 to an untagged integer in a1. - // This can't go slow-case because it's the same number we already - // converted once again. - __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible); - // Negate the result. - __ Xor(a1, a1, -1); - - __ bind(&heapnumber_allocated); - __ mov(v0, a2); // Move newly allocated heap number to v0. - } - - // Convert the int32 in a1 to the heap number in v0. a2 is corrupted. - __ mtc1(a1, f0); - __ cvt_d_w(f0, f0); - __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); - __ Ret(); - - __ bind(&impossible); - if (FLAG_debug_code) { - __ stop("Incorrect assumption in bit-not stub"); - } -} - - -// TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { - switch (op_) { - case Token::SUB: - GenerateGenericStubSub(masm); - break; - case Token::BIT_NOT: - GenerateGenericStubBitNot(masm); - break; - default: - UNREACHABLE(); - } -} - - -void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) { - Label non_smi, slow; - GenerateSmiCodeSub(masm, &non_smi, &slow); - __ bind(&non_smi); - GenerateHeapNumberCodeSub(masm, &slow); - __ bind(&slow); - GenerateGenericCodeFallback(masm); -} - - -void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) { - Label non_smi, slow; - GenerateSmiCodeBitNot(masm, &non_smi); - __ bind(&non_smi); - GenerateHeapNumberCodeBitNot(masm, &slow); - __ bind(&slow); - GenerateGenericCodeFallback(masm); -} - - -void UnaryOpStub::GenerateGenericCodeFallback( - MacroAssembler* masm) { - // Handle the slow case by jumping to the JavaScript builtin. - __ push(a0); - switch (op_) { - case Token::SUB: - __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); - break; - case Token::BIT_NOT: - __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - void BinaryOpStub::Initialize() { platform_specific_bit_ = true; // FPU is a base requirement for V8. } @@ -2388,8 +2133,8 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { __ GetObjectType(right, a2, a2); __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); - StringAddStub string_add_stub((StringAddFlags) - (ERECT_FRAME | NO_STRING_CHECK_IN_STUB)); + StringAddStub string_add_stub( + (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); GenerateRegisterArgsPush(masm); __ TailCallStub(&string_add_stub); @@ -2806,8 +2551,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { __ GetObjectType(left, a2, a2); __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE)); - StringAddStub string_add_left_stub((StringAddFlags) - (ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB)); + StringAddStub string_add_left_stub( + (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); GenerateRegisterArgsPush(masm); __ TailCallStub(&string_add_left_stub); @@ -2817,8 +2562,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { __ GetObjectType(right, a2, a2); __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); - StringAddStub string_add_right_stub((StringAddFlags) - (ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB)); + StringAddStub string_add_right_stub( + (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); GenerateRegisterArgsPush(masm); __ TailCallStub(&string_add_right_stub); @@ -3344,6 +3089,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { StubFailureTrampolineStub::GenerateAheadOfTime(isolate); RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); + CreateAllocationSiteStub::GenerateAheadOfTime(isolate); } @@ -3987,7 +3733,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) { StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss); __ bind(&miss); - StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); + StubCompiler::TailCallBuiltin( + masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); } @@ -4018,7 +3765,8 @@ void StringLengthStub::Generate(MacroAssembler* masm) { support_wrapper_); __ bind(&miss); - StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); + StubCompiler::TailCallBuiltin( + masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); } @@ -4088,7 +3836,8 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) { __ bind(&miss); - StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); + StubCompiler::TailCallBuiltin( + masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); } @@ -5043,20 +4792,17 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { // A monomorphic cache hit or an already megamorphic state: invoke the // function without changing the state. __ Branch(&done, eq, a3, Operand(a1)); - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - __ Branch(&done, eq, a3, Operand(at)); - // Special handling of the Array() function, which caches not only the - // monomorphic Array function but the initial ElementsKind with special - // sentinels - __ JumpIfNotSmi(a3, &miss); - if (FLAG_debug_code) { - Handle terminal_kind_sentinel = - TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), - LAST_FAST_ELEMENTS_KIND); - __ Assert(le, "Array function sentinel is not an ElementsKind", - a3, Operand(terminal_kind_sentinel)); - } + // If we came here, we need to see if we are the array function. + // If we didn't have a matching function, and we didn't find the megamorph + // sentinel, then we have in the cell either some other function or an + // AllocationSite. Do a map check on the object in a3. + Handle allocation_site_map( + masm->isolate()->heap()->allocation_site_map(), + masm->isolate()); + __ lw(t1, FieldMemOperand(a3, 0)); + __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); + __ Branch(&miss, ne, t1, Operand(at)); // Make sure the function is the Array() function __ LoadArrayFunction(a3); @@ -5083,14 +4829,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { __ LoadArrayFunction(a3); __ Branch(¬_array_function, ne, a1, Operand(a3)); - // The target function is the Array constructor, install a sentinel value in - // the constructor's type info cell that will track the initial ElementsKind - // that should be used for the array when its constructed. - Handle initial_kind_sentinel = - TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), - GetInitialFastElementsKind()); - __ li(a3, Operand(initial_kind_sentinel)); - __ sw(a3, FieldMemOperand(a2, Cell::kValueOffset)); + // The target function is the Array constructor. + // Create an AllocationSite if we don't already have it, store it in the cell. + { + FrameScope scope(masm, StackFrame::INTERNAL); + const RegList kSavedRegs = + 1 << 4 | // a0 + 1 << 5 | // a1 + 1 << 6; // a2 + + __ MultiPush(kSavedRegs); + + CreateAllocationSiteStub create_stub; + __ CallStub(&create_stub); + + __ MultiPop(kSavedRegs); + } __ Branch(&done); __ bind(¬_array_function); @@ -6111,7 +5865,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument. // Make sure that both arguments are strings if not known in advance. - if ((flags_ & NO_STRING_ADD_FLAGS) != 0) { + // Otherwise, at least one of the arguments is definitely a string, + // and we convert the one that is not known to be a string. + if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) { + ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT); + ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT); __ JumpIfEitherSmi(a0, a1, &call_runtime); // Load instance types. __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); @@ -6123,20 +5881,16 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ Or(t4, t0, Operand(t1)); __ And(t4, t4, Operand(kIsNotStringMask)); __ Branch(&call_runtime, ne, t4, Operand(zero_reg)); - } else { - // Here at least one of the arguments is definitely a string. - // We convert the one that is not known to be a string. - if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { - ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); - GenerateConvertArgument( - masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin); - builtin_id = Builtins::STRING_ADD_RIGHT; - } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { - ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); - GenerateConvertArgument( - masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin); - builtin_id = Builtins::STRING_ADD_LEFT; - } + } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) { + ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0); + GenerateConvertArgument( + masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin); + builtin_id = Builtins::STRING_ADD_RIGHT; + } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) { + ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0); + GenerateConvertArgument( + masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin); + builtin_id = Builtins::STRING_ADD_LEFT; } // Both arguments are strings. @@ -6187,7 +5941,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ Branch(&longer_than_two, ne, t2, Operand(2)); // Check that both strings are non-external ASCII strings. - if (flags_ != NO_STRING_ADD_FLAGS) { + if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); @@ -6231,7 +5985,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // If result is not supposed to be flat, allocate a cons string object. // If both strings are ASCII the result is an ASCII cons string. - if (flags_ != NO_STRING_ADD_FLAGS) { + if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); @@ -6314,7 +6068,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // t2: sum of lengths. Label first_prepared, second_prepared; __ bind(&string_add_flat_result); - if (flags_ != NO_STRING_ADD_FLAGS) { + if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); @@ -6400,7 +6154,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Just jump to runtime to add the two strings. __ bind(&call_runtime); - if ((flags_ & ERECT_FRAME) != 0) { + if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { GenerateRegisterArgsPop(masm); // Build a frame. { @@ -6415,7 +6169,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { if (call_builtin.is_linked()) { __ bind(&call_builtin); - if ((flags_ & ERECT_FRAME) != 0) { + if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { GenerateRegisterArgsPop(masm); // Build a frame. { @@ -6467,7 +6221,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, scratch2, scratch3, scratch4, - false, ¬_cached); __ mov(arg, scratch1); __ sw(arg, MemOperand(sp, stack_offset)); @@ -6623,13 +6376,10 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); - STATIC_ASSERT(kInternalizedTag != 0); - - __ And(tmp1, tmp1, Operand(kIsNotStringMask | kIsInternalizedMask)); - __ Branch(&miss, ne, tmp1, Operand(kInternalizedTag | kStringTag)); - - __ And(tmp2, tmp2, Operand(kIsNotStringMask | kIsInternalizedMask)); - __ Branch(&miss, ne, tmp2, Operand(kInternalizedTag | kStringTag)); + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); + __ Or(tmp1, tmp1, Operand(tmp2)); + __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); + __ Branch(&miss, ne, at, Operand(zero_reg)); // Make sure a0 is non-zero. At this point input operands are // guaranteed to be non-zero. @@ -6664,7 +6414,6 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { // Check that both operands are unique names. This leaves the instance // types loaded in tmp1 and tmp2. - STATIC_ASSERT(kInternalizedTag != 0); __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); @@ -6738,11 +6487,11 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { // strings. if (equality) { ASSERT(GetCondition() == eq); - STATIC_ASSERT(kInternalizedTag != 0); - __ And(tmp3, tmp1, Operand(tmp2)); - __ And(tmp5, tmp3, Operand(kIsInternalizedMask)); + STATIC_ASSERT(kInternalizedTag == 0); + __ Or(tmp3, tmp1, Operand(tmp2)); + __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask)); Label is_symbol; - __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg)); + __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg)); // Make sure a0 is non-zero. At this point input operands are // guaranteed to be non-zero. ASSERT(right.is(a0)); @@ -6815,6 +6564,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { GenerateMiss(masm); } + void ICCompareStub::GenerateMiss(MacroAssembler* masm) { { // Call the runtime system in a fresh internal frame. @@ -7145,6 +6895,7 @@ struct AheadOfTimeWriteBarrierStubList { RememberedSetAction action; }; + #define REG(Name) { kRegister_ ## Name ## _Code } static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { @@ -7207,6 +6958,9 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { StoreBufferOverflowStub stub1(kDontSaveFPRegs); stub1.GetCode(isolate)->set_is_pregenerated(true); + // Hydrogen code stubs need stub2 at snapshot time. + StoreBufferOverflowStub stub2(kSaveFPRegs); + stub2.GetCode(isolate)->set_is_pregenerated(true); } @@ -7612,10 +7366,6 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { ASSERT(FAST_DOUBLE_ELEMENTS == 4); ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); - Handle undefined_sentinel( - masm->isolate()->heap()->undefined_value(), - masm->isolate()); - // is the low bit set? If so, we are holey and that is good. Label normal_sequence; __ And(at, a3, Operand(1)); @@ -7626,17 +7376,19 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { __ Branch(&normal_sequence, eq, t1, Operand(zero_reg)); // We are going to create a holey array, but our kind is non-holey. - // Fix kind and retry + // Fix kind and retry (only if we have an allocation site in the cell). __ Addu(a3, a3, Operand(1)); - __ Branch(&normal_sequence, eq, a2, Operand(undefined_sentinel)); - - // The type cell may have gone megamorphic, don't overwrite if so. - __ lw(t1, FieldMemOperand(a2, kPointerSize)); - __ JumpIfNotSmi(t1, &normal_sequence); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&normal_sequence, eq, a2, Operand(at)); + __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset)); + __ lw(t1, FieldMemOperand(t1, 0)); + __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); + __ Branch(&normal_sequence, ne, t1, Operand(at)); // Save the resulting elements kind in type info __ SmiTag(a3); - __ sw(a3, FieldMemOperand(a2, kPointerSize)); + __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset)); + __ sw(a3, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset)); __ SmiUntag(a3); __ bind(&normal_sequence); @@ -7664,7 +7416,7 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); T stub(kind); stub.GetCode(isolate)->set_is_pregenerated(true); - if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { + if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES); stub1.GetCode(isolate)->set_is_pregenerated(true); } @@ -7705,10 +7457,6 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { // -- sp[0] : return address // -- sp[4] : last argument // ----------------------------------- - Handle undefined_sentinel( - masm->isolate()->heap()->undefined_value(), - masm->isolate()); - if (FLAG_debug_code) { // The array construct code is only set for the global and natives // builtin Array functions which always have maps. @@ -7723,10 +7471,11 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { __ Assert(eq, "Unexpected initial map for Array function", t0, Operand(MAP_TYPE)); - // We should either have undefined in a2 or a valid cell + // We should either have undefined in a2 or a valid cell. Label okay_here; Handle cell_map = masm->isolate()->factory()->cell_map(); - __ Branch(&okay_here, eq, a2, Operand(undefined_sentinel)); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&okay_here, eq, a2, Operand(at)); __ lw(a3, FieldMemOperand(a2, 0)); __ Assert(eq, "Expected property cell in register a2", a3, Operand(cell_map)); @@ -7735,9 +7484,20 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { Label no_info, switch_ready; // Get the elements kind and case on that. - __ Branch(&no_info, eq, a2, Operand(undefined_sentinel)); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&no_info, eq, a2, Operand(at)); __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset)); - __ JumpIfNotSmi(a3, &no_info); + + // The type cell may have undefined in its value. + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&no_info, eq, a3, Operand(at)); + + // The type cell has either an AllocationSite or a JSFunction. + __ lw(t0, FieldMemOperand(a3, 0)); + __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); + __ Branch(&no_info, ne, t0, Operand(at)); + + __ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset)); __ SmiUntag(a3); __ jmp(&switch_ready); __ bind(&no_info); diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h index bf5db10f63..1ae1d3454f 100644 --- a/deps/v8/src/mips/code-stubs-mips.h +++ b/deps/v8/src/mips/code-stubs-mips.h @@ -81,71 +81,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub { }; -class UnaryOpStub: public PlatformCodeStub { - public: - UnaryOpStub(Token::Value op, - UnaryOverwriteMode mode, - UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED) - : op_(op), - mode_(mode), - operand_type_(operand_type) { - } - - private: - Token::Value op_; - UnaryOverwriteMode mode_; - - // Operand type information determined at runtime. - UnaryOpIC::TypeInfo operand_type_; - - virtual void PrintName(StringStream* stream); - - class ModeBits: public BitField {}; - class OpBits: public BitField {}; - class OperandTypeInfoBits: public BitField {}; - - Major MajorKey() { return UnaryOp; } - int MinorKey() { - return ModeBits::encode(mode_) - | OpBits::encode(op_) - | OperandTypeInfoBits::encode(operand_type_); - } - - // Note: A lot of the helper functions below will vanish when we use virtual - // function instead of switch more often. - void Generate(MacroAssembler* masm); - - void GenerateTypeTransition(MacroAssembler* masm); - - void GenerateSmiStub(MacroAssembler* masm); - void GenerateSmiStubSub(MacroAssembler* masm); - void GenerateSmiStubBitNot(MacroAssembler* masm); - void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow); - void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow); - - void GenerateNumberStub(MacroAssembler* masm); - void GenerateNumberStubSub(MacroAssembler* masm); - void GenerateNumberStubBitNot(MacroAssembler* masm); - void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow); - void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow); - - void GenerateGenericStub(MacroAssembler* masm); - void GenerateGenericStubSub(MacroAssembler* masm); - void GenerateGenericStubBitNot(MacroAssembler* masm); - void GenerateGenericCodeFallback(MacroAssembler* masm); - - virtual Code::Kind GetCodeKind() const { return Code::UNARY_OP_IC; } - - virtual InlineCacheState GetICState() { - return UnaryOpIC::ToState(operand_type_); - } - - virtual void FinishCode(Handle code) { - code->set_unary_op_type(operand_type_); - } -}; - - class StringHelper : public AllStatic { public: // Generate code for copying characters using a simple loop. This should only @@ -210,21 +145,6 @@ class StringHelper : public AllStatic { }; -// Flag that indicates how to generate code for the stub StringAddStub. -enum StringAddFlags { - NO_STRING_ADD_FLAGS = 1 << 0, - // Omit left string check in stub (left is definitely a string). - NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1, - // Omit right string check in stub (right is definitely a string). - NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2, - // Stub needs a frame before calling the runtime - ERECT_FRAME = 1 << 3, - // Omit both string checks in stub. - NO_STRING_CHECK_IN_STUB = - NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB -}; - - class StringAddStub: public PlatformCodeStub { public: explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} @@ -363,7 +283,6 @@ class NumberToStringStub: public PlatformCodeStub { Register scratch1, Register scratch2, Register scratch3, - bool object_is_smi, Label* not_found); private: diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc index 7a95bc426b..3f74154f58 100644 --- a/deps/v8/src/mips/codegen-mips.cc +++ b/deps/v8/src/mips/codegen-mips.cc @@ -120,6 +120,7 @@ UnaryMathFunction CreateSqrtFunction() { return &sqrt; } + // ------------------------------------------------------------------------- // Platform-specific RuntimeCallHelper functions. @@ -136,6 +137,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { masm->set_has_frame(false); } + // ------------------------------------------------------------------------- // Code generators @@ -143,7 +145,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( MacroAssembler* masm, AllocationSiteMode mode, - Label* allocation_site_info_found) { + Label* allocation_memento_found) { // ----------- S t a t e ------------- // -- a0 : value // -- a1 : key @@ -153,9 +155,9 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( // -- t0 : scratch (elements) // ----------------------------------- if (mode == TRACK_ALLOCATION_SITE) { - ASSERT(allocation_site_info_found != NULL); - masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, - allocation_site_info_found); + ASSERT(allocation_memento_found != NULL); + masm->TestJSArrayForAllocationMemento(a2, t0, eq, + allocation_memento_found); } // Set transitioned map. @@ -186,7 +188,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( Register scratch = t6; if (mode == TRACK_ALLOCATION_SITE) { - masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail); + masm->TestJSArrayForAllocationMemento(a2, t0, eq, fail); } // Check for empty arrays, which only require a map transition and no changes @@ -314,7 +316,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( Label entry, loop, convert_hole, gc_required, only_change_map; if (mode == TRACK_ALLOCATION_SITE) { - masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail); + masm->TestJSArrayForAllocationMemento(a2, t0, eq, fail); } // Check for empty arrays, which only require a map transition and no changes @@ -601,7 +603,7 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) { if (!initialized) { CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength); patcher.masm()->Push(ra, fp, cp, a1); - patcher.masm()->LoadRoot(at, Heap::kUndefinedValueRootIndex); + patcher.masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize)); initialized = true; } diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc index a20ec5479a..2dd7a31f38 100644 --- a/deps/v8/src/mips/constants-mips.cc +++ b/deps/v8/src/mips/constants-mips.cc @@ -58,6 +58,7 @@ const char* Registers::names_[kNumSimuRegisters] = { "pc" }; + // List of alias names which can be used when referring to MIPS registers. const Registers::RegisterAlias Registers::aliases_[] = { {0, "zero"}, @@ -67,6 +68,7 @@ const Registers::RegisterAlias Registers::aliases_[] = { {kInvalidRegister, NULL} }; + const char* Registers::Name(int reg) { const char* result; if ((0 <= reg) && (reg < kNumSimuRegisters)) { @@ -106,11 +108,13 @@ const char* FPURegisters::names_[kNumFPURegisters] = { "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31" }; + // List of alias names which can be used when referring to MIPS registers. const FPURegisters::RegisterAlias FPURegisters::aliases_[] = { {kInvalidRegister, NULL} }; + const char* FPURegisters::Name(int creg) { const char* result; if ((0 <= creg) && (creg < kNumFPURegisters)) { diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc index 6978cde52b..840462e43f 100644 --- a/deps/v8/src/mips/deoptimizer-mips.cc +++ b/deps/v8/src/mips/deoptimizer-mips.cc @@ -457,22 +457,12 @@ void Deoptimizer::EntryGenerator::Generate() { // Get the bailout id from the stack. __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize)); - // Get the address of the location in the code object if possible (a3) (return + // Get the address of the location in the code object (a3) (return // address for lazy deoptimization) and compute the fp-to-sp delta in // register t0. - if (type() == EAGER || type() == SOFT) { - __ mov(a3, zero_reg); - // Correct one word for bailout id. - __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); - } else if (type() == OSR) { - __ mov(a3, ra); - // Correct one word for bailout id. - __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); - } else { - __ mov(a3, ra); - // Correct two words for bailout id and return address. - __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); - } + __ mov(a3, ra); + // Correct one word for bailout id. + __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); __ Subu(t0, fp, t0); @@ -521,13 +511,8 @@ void Deoptimizer::EntryGenerator::Generate() { __ sdc1(f0, MemOperand(a1, dst_offset)); } - // Remove the bailout id, eventually return address, and the saved registers - // from the stack. - if (type() == EAGER || type() == SOFT || type() == OSR) { - __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); - } else { - __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize))); - } + // Remove the bailout id and the saved registers from the stack. + __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); // Compute a pointer to the unwinding limit in register a2; that is // the first stack slot not part of the input frame. @@ -628,25 +613,19 @@ void Deoptimizer::EntryGenerator::Generate() { // Maximum size of a table entry generated below. -const int Deoptimizer::table_entry_size_ = 9 * Assembler::kInstrSize; +const int Deoptimizer::table_entry_size_ = 7 * Assembler::kInstrSize; void Deoptimizer::TableEntryGenerator::GeneratePrologue() { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); - // Create a sequence of deoptimization entries. Note that any - // registers may be still live. + // Create a sequence of deoptimization entries. + // Note that registers are still live when jumping to an entry. Label table_start; __ bind(&table_start); for (int i = 0; i < count(); i++) { Label start; __ bind(&start); - if (type() != EAGER && type() != SOFT) { - // Emulate ia32 like call by pushing return address to stack. - __ addiu(sp, sp, -2 * kPointerSize); - __ sw(ra, MemOperand(sp, 1 * kPointerSize)); - } else { - __ addiu(sp, sp, -1 * kPointerSize); - } + __ addiu(sp, sp, -1 * kPointerSize); // Jump over the remaining deopt entries (including this one). // This code is always reached by calling Jump, which puts the target (label // start) into t9. diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc index 032c1f5e5b..9c610c32f9 100644 --- a/deps/v8/src/mips/full-codegen-mips.cc +++ b/deps/v8/src/mips/full-codegen-mips.cc @@ -174,9 +174,7 @@ void FullCodeGenerator::Generate() { // The following three instructions must remain together and unmodified for // code aging to work properly. __ Push(ra, fp, cp, a1); - // Load undefined value here, so the value is ready for the loop - // below. - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ nop(Assembler::CODE_AGE_SEQUENCE_NOP); // Adjust fp to point to caller's fp. __ Addu(fp, sp, Operand(2 * kPointerSize)); info->AddNoFrameRange(0, masm_->pc_offset()); @@ -185,8 +183,11 @@ void FullCodeGenerator::Generate() { int locals_count = info->scope()->num_stack_slots(); // Generators allocate locals, if any, in context slots. ASSERT(!info->function()->is_generator() || locals_count == 0); - for (int i = 0; i < locals_count; i++) { - __ push(at); + if (locals_count > 0) { + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + for (int i = 0; i < locals_count; i++) { + __ push(at); + } } } @@ -3745,7 +3746,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) { VisitForStackValue(args->at(0)); VisitForStackValue(args->at(1)); - StringAddStub stub(NO_STRING_ADD_FLAGS); + StringAddStub stub(STRING_ADD_CHECK_BOTH); __ CallStub(&stub); context()->Plug(v0); } @@ -4400,10 +4401,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr, const char* comment) { // TODO(svenpanne): Allowing format strings in Comment would be nice here... Comment cmt(masm_, comment); - bool can_overwrite = expr->expression()->ResultOverwriteAllowed(); - UnaryOverwriteMode overwrite = - can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; - UnaryOpStub stub(expr->op(), overwrite); + UnaryOpStub stub(expr->op()); // GenericUnaryOpStub expects the argument to be in a0. VisitForAccumulatorValue(expr->expression()); SetSourcePosition(expr->position()); @@ -4472,7 +4470,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Call ToNumber only if operand is not a smi. Label no_conversion; - __ JumpIfSmi(v0, &no_conversion); + if (ShouldInlineSmiCase(expr->op())) { + __ JumpIfSmi(v0, &no_conversion); + } __ mov(a0, v0); ToNumberStub convert_stub; __ CallStub(&convert_stub); diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc index 896e03007b..ed67e829e3 100644 --- a/deps/v8/src/mips/ic-mips.cc +++ b/deps/v8/src/mips/ic-mips.cc @@ -330,9 +330,9 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, // bit test is enough. // map: key map __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); - STATIC_ASSERT(kInternalizedTag != 0); - __ And(at, hash, Operand(kIsInternalizedMask)); - __ Branch(not_unique, eq, at, Operand(zero_reg)); + STATIC_ASSERT(kInternalizedTag == 0); + __ And(at, hash, Operand(kIsNotInternalizedMask)); + __ Branch(not_unique, ne, at, Operand(zero_reg)); __ bind(&unique); } @@ -1261,8 +1261,8 @@ static void KeyedStoreGenerateGenericHelper( t0, slow); ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 - AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, - FAST_DOUBLE_ELEMENTS); + AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS); ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); @@ -1275,7 +1275,7 @@ static void KeyedStoreGenerateGenericHelper( t0, slow); ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 - mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); + mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, slow); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); @@ -1291,7 +1291,7 @@ static void KeyedStoreGenerateGenericHelper( t0, slow); ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 - mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); + mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); @@ -1495,8 +1495,8 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { // Must return the modified receiver in v0. if (!FLAG_trace_elements_transitions) { Label fail; - AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, - FAST_DOUBLE_ELEMENTS); + AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS); ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail); __ Ret(USE_DELAY_SLOT); __ mov(v0, a2); @@ -1518,8 +1518,8 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject( // Must return the modified receiver in v0. if (!FLAG_trace_elements_transitions) { Label fail; - AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, - FAST_ELEMENTS); + AllocationSiteMode mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, + FAST_ELEMENTS); ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail); __ Ret(USE_DELAY_SLOT); __ mov(v0, a2); @@ -1541,8 +1541,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm, // ----------------------------------- // Get the receiver from the stack and probe the stub cache. - Code::Flags flags = - Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode); + Code::Flags flags = Code::ComputeFlags( + Code::STUB, MONOMORPHIC, strict_mode, + Code::NORMAL, Code::STORE_IC); Isolate::Current()->stub_cache()->GenerateProbe( masm, flags, a1, a2, a3, t0, t1, t2); diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc index 8109e8a288..65b4a575f7 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.cc +++ b/deps/v8/src/mips/lithium-codegen-mips.cc @@ -159,9 +159,9 @@ bool LCodeGen::GeneratePrologue() { // The following three instructions must remain together and unmodified // for code aging to work properly. __ Push(ra, fp, cp, a1); - // Add unused load of ip to ensure prologue sequence is identical for + // Add unused nop to ensure prologue sequence is identical for // full-codegen and lithium-codegen. - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ nop(Assembler::CODE_AGE_SEQUENCE_NOP); // Adj. FP to point to saved FP. __ Addu(fp, sp, Operand(2 * kPointerSize)); } @@ -332,8 +332,7 @@ bool LCodeGen::GenerateDeoptJumpTable() { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); Label table_start; __ bind(&table_start); - Label needs_frame_not_call; - Label needs_frame_is_call; + Label needs_frame; for (int i = 0; i < deopt_jump_table_.length(); i++) { __ bind(&deopt_jump_table_[i].label); Address entry = deopt_jump_table_[i].address; @@ -346,43 +345,22 @@ bool LCodeGen::GenerateDeoptJumpTable() { } __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry))); if (deopt_jump_table_[i].needs_frame) { - if (type == Deoptimizer::LAZY) { - if (needs_frame_is_call.is_bound()) { - __ Branch(&needs_frame_is_call); - } else { - __ bind(&needs_frame_is_call); - __ MultiPush(cp.bit() | fp.bit() | ra.bit()); - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - ASSERT(info()->IsStub()); - __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); - __ push(scratch0()); - __ Addu(fp, sp, Operand(2 * kPointerSize)); - __ Call(t9); - } + if (needs_frame.is_bound()) { + __ Branch(&needs_frame); } else { - if (needs_frame_not_call.is_bound()) { - __ Branch(&needs_frame_not_call); - } else { - __ bind(&needs_frame_not_call); - __ MultiPush(cp.bit() | fp.bit() | ra.bit()); - // This variant of deopt can only be used with stubs. Since we don't - // have a function pointer to install in the stack frame that we're - // building, install a special marker there instead. - ASSERT(info()->IsStub()); - __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); - __ push(scratch0()); - __ Addu(fp, sp, Operand(2 * kPointerSize)); - __ Jump(t9); - } - } - } else { - if (type == Deoptimizer::LAZY) { + __ bind(&needs_frame); + __ MultiPush(cp.bit() | fp.bit() | ra.bit()); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + ASSERT(info()->IsStub()); + __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); + __ push(scratch0()); + __ Addu(fp, sp, Operand(2 * kPointerSize)); __ Call(t9); - } else { - __ Jump(t9); } + } else { + __ Call(t9); } } __ RecordComment("]"); @@ -766,7 +744,8 @@ void LCodeGen::DeoptimizeIf(Condition cc, if (FLAG_deopt_every_n_times == 1 && !info()->IsStub() && info()->opt_count() == id) { - __ Jump(entry, RelocInfo::RUNTIME_ENTRY); + ASSERT(frame_is_built_); + __ Call(entry, RelocInfo::RUNTIME_ENTRY); return; } @@ -780,13 +759,8 @@ void LCodeGen::DeoptimizeIf(Condition cc, } ASSERT(info()->IsStub() || frame_is_built_); - bool needs_lazy_deopt = info()->IsStub(); if (cc == al && frame_is_built_) { - if (needs_lazy_deopt) { - __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2); - } else { - __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2); - } + __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2); } else { // We often have several deopts to the same entry, reuse the last // jump entry if this is the case. @@ -1050,11 +1024,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) { CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } - case CodeStub::StringAdd: { - StringAddStub stub(NO_STRING_ADD_FLAGS); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); - break; - } case CodeStub::StringCompare: { StringCompareStub stub; CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); @@ -1971,11 +1940,11 @@ int LCodeGen::GetNextEmittedBlock() const { template void LCodeGen::EmitBranch(InstrType instr, Condition cc, Register src1, const Operand& src2) { - int right_block = instr->FalseDestination(chunk_); int left_block = instr->TrueDestination(chunk_); + int right_block = instr->FalseDestination(chunk_); int next_block = GetNextEmittedBlock(); - if (right_block == left_block) { + if (right_block == left_block || cc == al) { EmitGoto(left_block); } else if (left_block == next_block) { __ Branch(chunk_->GetAssemblyLabel(right_block), @@ -2015,6 +1984,25 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) { } +void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) { + Representation r = instr->hydrogen()->value()->representation(); + if (r.IsSmiOrInteger32() || r.IsDouble()) { + EmitBranch(instr, al, zero_reg, Operand(zero_reg)); + } else { + ASSERT(r.IsTagged()); + Register reg = ToRegister(instr->value()); + HType type = instr->hydrogen()->value()->type(); + if (type.IsTaggedNumber()) { + EmitBranch(instr, al, zero_reg, Operand(zero_reg)); + } + __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); + __ lw(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + EmitBranch(instr, eq, scratch0(), Operand(at)); + } +} + + void LCodeGen::DoBranch(LBranch* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsInteger32() || r.IsSmi()) { @@ -2183,7 +2171,7 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { } -void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { +void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { LOperand* left = instr->left(); LOperand* right = instr->right(); Condition cond = TokenToCondition(instr->op(), false); @@ -2801,6 +2789,19 @@ void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) { } +void LCodeGen::DoLinkObjectInList(LLinkObjectInList* instr) { + Register object = ToRegister(instr->object()); + ExternalReference sites_list_address = instr->GetReference(isolate()); + + __ li(at, Operand(sites_list_address)); + __ lw(at, MemOperand(at)); + __ sw(at, FieldMemOperand(object, + instr->hydrogen()->store_field().offset())); + __ li(at, Operand(sites_list_address)); + __ sw(object, MemOperand(at)); +} + + void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); @@ -3360,6 +3361,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { __ bind(&receiver_ok); } + void LCodeGen::DoApplyArguments(LApplyArguments* instr) { Register receiver = ToRegister(instr->receiver()); Register function = ToRegister(instr->function()); @@ -3886,6 +3888,7 @@ void LCodeGen::DoRandom(LRandom* instr) { __ sub_d(f0, f12, f14); } + void LCodeGen::DoDeferredRandom(LRandom* instr) { __ PrepareCallCFunction(1, scratch0()); __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); @@ -4043,7 +4046,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { __ li(a2, Operand(instr->hydrogen()->property_cell())); ElementsKind kind = instr->hydrogen()->elements_kind(); AllocationSiteOverrideMode override_mode = - (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE) + (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) ? DISABLE_ALLOCATION_SITES : DONT_OVERRIDE; ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED; @@ -4461,7 +4464,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { Register object = ToRegister(instr->object()); Register temp = ToRegister(instr->temp()); Label fail; - __ TestJSArrayForAllocationSiteInfo(object, temp, ne, &fail); + __ TestJSArrayForAllocationMemento(object, temp, ne, &fail); DeoptimizeIf(al, instr->environment()); __ bind(&fail); } @@ -4470,7 +4473,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { void LCodeGen::DoStringAdd(LStringAdd* instr) { __ push(ToRegister(instr->left())); __ push(ToRegister(instr->right())); - StringAddStub stub(NO_STRING_CHECK_IN_STUB); + StringAddStub stub(instr->hydrogen()->flags()); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -5277,80 +5280,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { } -void LCodeGen::DoAllocateObject(LAllocateObject* instr) { - class DeferredAllocateObject: public LDeferredCode { - public: - DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); } - virtual LInstruction* instr() { return instr_; } - private: - LAllocateObject* instr_; - }; - - DeferredAllocateObject* deferred = - new(zone()) DeferredAllocateObject(this, instr); - - Register result = ToRegister(instr->result()); - Register scratch = ToRegister(instr->temp()); - Register scratch2 = ToRegister(instr->temp2()); - Handle constructor = instr->hydrogen()->constructor(); - Handle initial_map = instr->hydrogen()->constructor_initial_map(); - int instance_size = initial_map->instance_size(); - ASSERT(initial_map->pre_allocated_property_fields() + - initial_map->unused_property_fields() - - initial_map->inobject_properties() == 0); - - __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(), - TAG_OBJECT); - - __ bind(deferred->exit()); - if (FLAG_debug_code) { - Label is_in_new_space; - __ JumpIfInNewSpace(result, scratch, &is_in_new_space); - __ Abort("Allocated object is not in new-space"); - __ bind(&is_in_new_space); - } - - // Load the initial map. - Register map = scratch; - __ LoadHeapObject(map, constructor); - __ lw(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset)); - - // Initialize map and fields of the newly allocated object. - ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE); - __ sw(map, FieldMemOperand(result, JSObject::kMapOffset)); - __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex); - __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset)); - __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset)); - if (initial_map->inobject_properties() != 0) { - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); - for (int i = 0; i < initial_map->inobject_properties(); i++) { - int property_offset = JSObject::kHeaderSize + i * kPointerSize; - __ sw(scratch, FieldMemOperand(result, property_offset)); - } - } -} - - -void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { - Register result = ToRegister(instr->result()); - Handle initial_map = instr->hydrogen()->constructor_initial_map(); - int instance_size = initial_map->instance_size(); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ mov(result, zero_reg); - - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); - __ li(a0, Operand(Smi::FromInt(instance_size))); - __ push(a0); - CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); - __ StoreToSafepointRegisterSlot(v0, result); -} - - void LCodeGen::DoAllocate(LAllocate* instr) { class DeferredAllocate: public LDeferredCode { public: @@ -5713,33 +5642,6 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) { } -void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { - Register object = ToRegister(instr->object()); - Register key = ToRegister(instr->key()); - Register strict = scratch0(); - __ li(strict, Operand(Smi::FromInt(strict_mode_flag()))); - __ Push(object, key, strict); - ASSERT(instr->HasPointerMap()); - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); - SafepointGenerator safepoint_generator( - this, pointers, Safepoint::kLazyDeopt); - __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator); -} - - -void LCodeGen::DoIn(LIn* instr) { - Register obj = ToRegister(instr->object()); - Register key = ToRegister(instr->key()); - __ Push(key, obj); - ASSERT(instr->HasPointerMap()); - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); - SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); - __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator); -} - - void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); __ CallRuntimeSaveDoubles(Runtime::kStackGuard); diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h index 3d31ef10ba..1cba8cf468 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.h +++ b/deps/v8/src/mips/lithium-codegen-mips.h @@ -148,7 +148,6 @@ class LCodeGen BASE_EMBEDDED { void DoDeferredRandom(LRandom* instr); void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); void DoDeferredStringCharFromCode(LStringCharFromCode* instr); - void DoDeferredAllocateObject(LAllocateObject* instr); void DoDeferredAllocate(LAllocate* instr); void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/mips/lithium-gap-resolver-mips.cc index 23a8f32f76..9705e1f41a 100644 --- a/deps/v8/src/mips/lithium-gap-resolver-mips.cc +++ b/deps/v8/src/mips/lithium-gap-resolver-mips.cc @@ -222,7 +222,6 @@ void LGapResolver::EmitMove(int index) { ASSERT(destination->IsStackSlot()); __ sw(source_register, cgen_->ToMemOperand(destination)); } - } else if (source->IsStackSlot()) { MemOperand source_operand = cgen_->ToMemOperand(source); if (destination->IsRegister()) { @@ -259,6 +258,10 @@ void LGapResolver::EmitMove(int index) { } else { __ LoadObject(dst, cgen_->ToHandle(constant_source)); } + } else if (source->IsDoubleRegister()) { + DoubleRegister result = cgen_->ToDoubleRegister(destination); + double v = cgen_->ToDouble(constant_source); + __ Move(result, v); } else { ASSERT(destination->IsStackSlot()); ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone. diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc index 638eaa4e8b..c64533cdfc 100644 --- a/deps/v8/src/mips/lithium-mips.cc +++ b/deps/v8/src/mips/lithium-mips.cc @@ -186,7 +186,8 @@ LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { return new(zone()) LDebugBreak(); } -void LCmpIDAndBranch::PrintDataTo(StringStream* stream) { + +void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) { stream->Add("if "); left()->PrintTo(stream); stream->Add(" %s ", Token::String(op())); @@ -276,6 +277,24 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) { } +ExternalReference LLinkObjectInList::GetReference(Isolate* isolate) { + switch (hydrogen()->known_list()) { + case HLinkObjectInList::ALLOCATION_SITE_LIST: + return ExternalReference::allocation_sites_list_address(isolate); + } + + UNREACHABLE(); + // Return a dummy value + return ExternalReference::isolate_address(isolate); +} + + +void LLinkObjectInList::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add(" offset %d", hydrogen()->store_field().offset()); +} + + void LLoadContextSlot::PrintDataTo(StringStream* stream) { context()->PrintTo(stream); stream->Add("[%d]", slot_index()); @@ -329,7 +348,6 @@ void LCallNewArray::PrintDataTo(StringStream* stream) { stream->Add("= "); constructor()->PrintTo(stream); stream->Add(" #%d / ", arity()); - ASSERT(hydrogen()->property_cell()->value()->IsSmi()); ElementsKind kind = hydrogen()->elements_kind(); stream->Add(" (%s) ", ElementsKindToString(kind)); } @@ -1605,8 +1623,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { } -LInstruction* LChunkBuilder::DoCompareIDAndBranch( - HCompareIDAndBranch* instr) { +LInstruction* LChunkBuilder::DoCompareNumericAndBranch( + HCompareNumericAndBranch* instr) { Representation r = instr->representation(); if (r.IsSmiOrInteger32()) { ASSERT(instr->left()->representation().IsSmiOrInteger32()); @@ -1614,14 +1632,14 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch( instr->right()->representation())); LOperand* left = UseRegisterOrConstantAtStart(instr->left()); LOperand* right = UseRegisterOrConstantAtStart(instr->right()); - return new(zone()) LCmpIDAndBranch(left, right); + return new(zone()) LCompareNumericAndBranch(left, right); } else { ASSERT(r.IsDouble()); ASSERT(instr->left()->representation().IsDouble()); ASSERT(instr->right()->representation().IsDouble()); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); - return new(zone()) LCmpIDAndBranch(left, right); + return new(zone()) LCompareNumericAndBranch(left, right); } } @@ -1918,6 +1936,18 @@ LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { } +LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + return AssignEnvironment(new(zone()) LCheckSmi(value)); +} + + +LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) { + return new(zone()) + LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value())); +} + + LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { LOperand* value = UseRegisterAtStart(instr->value()); LInstruction* result = new(zone()) LCheckInstanceType(value); @@ -2023,6 +2053,13 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) { } +LInstruction* LChunkBuilder::DoLinkObjectInList(HLinkObjectInList* instr) { + LOperand* object = UseRegister(instr->value()); + LLinkObjectInList* result = new(zone()) LLinkObjectInList(object); + return result; +} + + LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { LOperand* context = UseRegisterAtStart(instr->value()); LInstruction* result = @@ -2311,14 +2348,6 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { } -LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) { - info()->MarkAsDeferredCalling(); - LAllocateObject* result = - new(zone()) LAllocateObject(TempRegister(), TempRegister()); - return AssignPointerMap(DefineAsRegister(result)); -} - - LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { info()->MarkAsDeferredCalling(); LOperand* size = instr->size()->IsConstant() @@ -2341,14 +2370,6 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) { } -LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) { - LOperand* object = UseFixed(instr->object(), a0); - LOperand* key = UseFixed(instr->key(), a1); - LDeleteProperty* result = new(zone()) LDeleteProperty(object, key); - return MarkAsCall(DefineFixed(result, v0), instr); -} - - LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { ASSERT(argument_count_ == 0); allocator_->MarkAsOsrEntry(); @@ -2521,14 +2542,6 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { } -LInstruction* LChunkBuilder::DoIn(HIn* instr) { - LOperand* key = UseRegisterAtStart(instr->key()); - LOperand* object = UseRegisterAtStart(instr->object()); - LIn* result = new(zone()) LIn(key, object); - return MarkAsCall(DefineFixed(result, v0), instr); -} - - LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { LOperand* object = UseFixed(instr->enumerable(), a0); LForInPrepareMap* result = new(zone()) LForInPrepareMap(object); diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h index 06d30d03de..83a37c6230 100644 --- a/deps/v8/src/mips/lithium-mips.h +++ b/deps/v8/src/mips/lithium-mips.h @@ -49,7 +49,6 @@ class LCodeGen; #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ V(AccessArgumentsAt) \ V(AddI) \ - V(AllocateObject) \ V(Allocate) \ V(ApplyArguments) \ V(ArgumentsElements) \ @@ -81,7 +80,7 @@ class LCodeGen; V(ClampTToUint8) \ V(ClassOfTestAndBranch) \ V(CmpConstantEqAndBranch) \ - V(CmpIDAndBranch) \ + V(CompareNumericAndBranch) \ V(CmpObjectEqAndBranch) \ V(CmpMapAndBranch) \ V(CmpT) \ @@ -92,7 +91,6 @@ class LCodeGen; V(Context) \ V(DebugBreak) \ V(DeclareGlobals) \ - V(DeleteProperty) \ V(Deoptimize) \ V(DivI) \ V(DoubleToI) \ @@ -106,7 +104,6 @@ class LCodeGen; V(Goto) \ V(HasCachedArrayIndexAndBranch) \ V(HasInstanceTypeAndBranch) \ - V(In) \ V(InstanceOf) \ V(InstanceOfKnownGlobal) \ V(InstanceSize) \ @@ -118,10 +115,12 @@ class LCodeGen; V(IsConstructCallAndBranch) \ V(IsObjectAndBranch) \ V(IsStringAndBranch) \ + V(IsNumberAndBranch) \ V(IsSmiAndBranch) \ V(IsUndetectableAndBranch) \ V(Label) \ V(LazyBailout) \ + V(LinkObjectInList) \ V(LoadContextSlot) \ V(LoadExternalArrayPointer) \ V(LoadFunctionPrototype) \ @@ -711,9 +710,9 @@ class LDebugBreak: public LTemplateInstruction<0, 0, 0> { }; -class LCmpIDAndBranch: public LControlInstruction<2, 0> { +class LCompareNumericAndBranch: public LControlInstruction<2, 0> { public: - LCmpIDAndBranch(LOperand* left, LOperand* right) { + LCompareNumericAndBranch(LOperand* left, LOperand* right) { inputs_[0] = left; inputs_[1] = right; } @@ -721,8 +720,9 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> { LOperand* left() { return inputs_[0]; } LOperand* right() { return inputs_[1]; } - DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch") - DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch) + DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch, + "compare-numeric-and-branch") + DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch) Token::Value op() const { return hydrogen()->token(); } bool is_double() const { @@ -919,6 +919,19 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> { }; +class LIsNumberAndBranch: public LControlInstruction<1, 0> { + public: + explicit LIsNumberAndBranch(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch") + DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch) +}; + + class LIsStringAndBranch: public LControlInstruction<1, 1> { public: LIsStringAndBranch(LOperand* value, LOperand* temp) { @@ -1650,6 +1663,23 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> { }; +class LLinkObjectInList: public LTemplateInstruction<0, 1, 0> { + public: + explicit LLinkObjectInList(LOperand* object) { + inputs_[0] = object; + } + + LOperand* object() { return inputs_[0]; } + + ExternalReference GetReference(Isolate* isolate); + + DECLARE_CONCRETE_INSTRUCTION(LinkObjectInList, "link-object-in-list") + DECLARE_HYDROGEN_ACCESSOR(LinkObjectInList) + + virtual void PrintDataTo(StringStream* stream); +}; + + class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> { public: explicit LLoadContextSlot(LOperand* context) { @@ -2423,21 +2453,6 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> { }; -class LAllocateObject: public LTemplateInstruction<1, 1, 2> { - public: - LAllocateObject(LOperand* temp, LOperand* temp2) { - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object") - DECLARE_HYDROGEN_ACCESSOR(AllocateObject) -}; - - class LAllocate: public LTemplateInstruction<1, 2, 2> { public: LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) { @@ -2524,20 +2539,6 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> { }; -class LDeleteProperty: public LTemplateInstruction<1, 2, 0> { - public: - LDeleteProperty(LOperand* object, LOperand* key) { - inputs_[0] = object; - inputs_[1] = key; - } - - LOperand* object() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property") -}; - - class LOsrEntry: public LTemplateInstruction<0, 0, 0> { public: LOsrEntry() {} @@ -2559,20 +2560,6 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> { }; -class LIn: public LTemplateInstruction<1, 2, 0> { - public: - LIn(LOperand* key, LOperand* object) { - inputs_[0] = key; - inputs_[1] = object; - } - - LOperand* key() { return inputs_[0]; } - LOperand* object() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(In, "in") -}; - - class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> { public: explicit LForInPrepareMap(LOperand* object) { diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index 47e6ff93c7..8a44185ed7 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -768,6 +768,7 @@ void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { } } + //------------Pseudo-instructions------------- void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { @@ -1021,6 +1022,7 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd, mtc1(t8, fd); } + void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { if (kArchVariant == kLoongson && fd.is(fs)) { mfc1(t8, FPURegister::from_code(fs.code() + 1)); @@ -1031,6 +1033,7 @@ void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { } } + void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) { if (kArchVariant == kLoongson && fd.is(fs)) { mfc1(t8, FPURegister::from_code(fs.code() + 1)); @@ -2639,6 +2642,7 @@ void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) { nop(); } + void MacroAssembler::DropAndRet(int drop) { Ret(USE_DELAY_SLOT); addiu(sp, sp, drop * kPointerSize); @@ -3205,9 +3209,13 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result, void MacroAssembler::JumpIfNotUniqueName(Register reg, Label* not_unique_name) { - STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag); - Branch(not_unique_name, lt, reg, Operand(kIsInternalizedMask)); - Branch(not_unique_name, gt, reg, Operand(SYMBOL_TYPE)); + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); + Label succeed; + And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask)); + Branch(&succeed, eq, at, Operand(zero_reg)); + Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE)); + + bind(&succeed); } @@ -5474,26 +5482,26 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg, } -void MacroAssembler::TestJSArrayForAllocationSiteInfo( +void MacroAssembler::TestJSArrayForAllocationMemento( Register receiver_reg, Register scratch_reg, Condition cond, - Label* allocation_info_present) { - Label no_info_available; + Label* allocation_memento_present) { + Label no_memento_available; ExternalReference new_space_start = ExternalReference::new_space_start(isolate()); ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate()); Addu(scratch_reg, receiver_reg, - Operand(JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag)); - Branch(&no_info_available, lt, scratch_reg, Operand(new_space_start)); + Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); + Branch(&no_memento_available, lt, scratch_reg, Operand(new_space_start)); li(at, Operand(new_space_allocation_top)); lw(at, MemOperand(at)); - Branch(&no_info_available, gt, scratch_reg, Operand(at)); - lw(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize)); - Branch(allocation_info_present, cond, scratch_reg, - Operand(Handle(isolate()->heap()->allocation_site_info_map()))); - bind(&no_info_available); + Branch(&no_memento_available, gt, scratch_reg, Operand(at)); + lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); + Branch(allocation_memento_present, cond, scratch_reg, + Operand(Handle(isolate()->heap()->allocation_memento_map()))); + bind(&no_memento_available); } diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h index ffae2fd69e..bc3e7c48b4 100644 --- a/deps/v8/src/mips/macro-assembler-mips.h +++ b/deps/v8/src/mips/macro-assembler-mips.h @@ -1460,16 +1460,16 @@ class MacroAssembler: public Assembler { // in a0. Assumes that any other register can be used as a scratch. void CheckEnumCache(Register null_value, Label* call_runtime); - // AllocationSiteInfo support. Arrays may have an associated - // AllocationSiteInfo object that can be checked for in order to pretransition + // AllocationMemento support. Arrays may have an associated + // AllocationMemento object that can be checked for in order to pretransition // to another type. // On entry, receiver_reg should point to the array object. // scratch_reg gets clobbered. // If allocation info is present, jump to allocation_info_present - void TestJSArrayForAllocationSiteInfo(Register receiver_reg, - Register scratch_reg, - Condition cond, - Label* allocation_info_present); + void TestJSArrayForAllocationMemento(Register receiver_reg, + Register scratch_reg, + Condition cond, + Label* allocation_memento_present); private: void CallCFunctionHelper(Register function, diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc index 52211904d9..89d8e68d5e 100644 --- a/deps/v8/src/mips/stub-cache-mips.cc +++ b/deps/v8/src/mips/stub-cache-mips.cc @@ -429,89 +429,56 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm, } +void BaseStoreStubCompiler::GenerateNegativeHolderLookup( + MacroAssembler* masm, + Handle holder, + Register holder_reg, + Handle name, + Label* miss) { + if (holder->IsJSGlobalObject()) { + GenerateCheckPropertyCell( + masm, Handle::cast(holder), name, scratch1(), miss); + } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) { + GenerateDictionaryNegativeLookup( + masm, miss, holder_reg, name, scratch1(), scratch2()); + } +} + + // Generate StoreTransition code, value is passed in a0 register. // After executing generated code, the receiver_reg and name_reg // may be clobbered. -void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, - Handle object, - LookupResult* lookup, - Handle transition, - Handle name, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Label* miss_label, - Label* miss_restore_name, - Label* slow) { +void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, + Handle object, + LookupResult* lookup, + Handle transition, + Handle name, + Register receiver_reg, + Register storage_reg, + Register value_reg, + Register scratch1, + Register scratch2, + Register scratch3, + Label* miss_label, + Label* slow) { // a0 : value. Label exit; - // Check that the map of the object hasn't changed. - __ CheckMap(receiver_reg, scratch1, Handle(object->map()), miss_label, - DO_SMI_CHECK); - - // Perform global security token check if needed. - if (object->IsJSGlobalProxy()) { - __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label); - } - int descriptor = transition->LastAdded(); DescriptorArray* descriptors = transition->instance_descriptors(); PropertyDetails details = descriptors->GetDetails(descriptor); Representation representation = details.representation(); ASSERT(!representation.IsNone()); - // Ensure no transitions to deprecated maps are followed. - __ CheckMapDeprecated(transition, scratch1, miss_label); - - // Check that we are allowed to write this. - if (object->GetPrototype()->IsJSObject()) { - JSObject* holder; - // holder == object indicates that no property was found. - if (lookup->holder() != *object) { - holder = lookup->holder(); - } else { - // Find the top object. - holder = *object; - do { - holder = JSObject::cast(holder->GetPrototype()); - } while (holder->GetPrototype()->IsJSObject()); - } - Register holder_reg = CheckPrototypes( - object, receiver_reg, Handle(holder), name_reg, - scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER); - // If no property was found, and the holder (the last object in the - // prototype chain) is in slow mode, we need to do a negative lookup on the - // holder. - if (lookup->holder() == *object) { - if (holder->IsJSGlobalObject()) { - GenerateCheckPropertyCell( - masm, - Handle(GlobalObject::cast(holder)), - name, - scratch1, - miss_restore_name); - } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) { - GenerateDictionaryNegativeLookup( - masm, miss_restore_name, holder_reg, name, scratch1, scratch2); - } - } - } - - Register storage_reg = name_reg; - if (details.type() == CONSTANT_FUNCTION) { Handle constant( HeapObject::cast(descriptors->GetValue(descriptor))); __ LoadHeapObject(scratch1, constant); - __ Branch(miss_restore_name, ne, value_reg, Operand(scratch1)); + __ Branch(miss_label, ne, value_reg, Operand(scratch1)); } else if (FLAG_track_fields && representation.IsSmi()) { - __ JumpIfNotSmi(value_reg, miss_restore_name); + __ JumpIfNotSmi(value_reg, miss_label); } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { - __ JumpIfSmi(value_reg, miss_restore_name); + __ JumpIfSmi(value_reg, miss_label); } else if (FLAG_track_double_fields && representation.IsDouble()) { Label do_store, heap_number; __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex); @@ -525,7 +492,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, __ bind(&heap_number); __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, - miss_restore_name, DONT_DO_SMI_CHECK); + miss_label, DONT_DO_SMI_CHECK); __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); __ bind(&do_store); @@ -555,8 +522,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, __ li(scratch1, Operand(transition)); __ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); - // Update the write barrier for the map field and pass the now unused - // name_reg as scratch register. + // Update the write barrier for the map field. __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, @@ -594,19 +560,13 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, } if (!FLAG_track_fields || !representation.IsSmi()) { - // Skip updating write barrier if storing a smi. - __ JumpIfSmi(value_reg, &exit); - // Update the write barrier for the array address. - // Pass the now unused name_reg as a scratch register. if (!FLAG_track_double_fields || !representation.IsDouble()) { - __ mov(name_reg, value_reg); - } else { - ASSERT(storage_reg.is(name_reg)); + __ mov(storage_reg, value_reg); } __ RecordWriteField(receiver_reg, offset, - name_reg, + storage_reg, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs, @@ -626,19 +586,13 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, } if (!FLAG_track_fields || !representation.IsSmi()) { - // Skip updating write barrier if storing a smi. - __ JumpIfSmi(value_reg, &exit); - // Update the write barrier for the array address. - // Ok to clobber receiver_reg and name_reg, since we return. if (!FLAG_track_double_fields || !representation.IsDouble()) { - __ mov(name_reg, value_reg); - } else { - ASSERT(storage_reg.is(name_reg)); + __ mov(storage_reg, value_reg); } __ RecordWriteField(scratch1, offset, - name_reg, + storage_reg, receiver_reg, kRAHasNotBeenSaved, kDontSaveFPRegs, @@ -659,27 +613,18 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, // When leaving generated code after success, the receiver_reg and name_reg // may be clobbered. Upon branch to miss_label, the receiver and name // registers have their original values. -void StubCompiler::GenerateStoreField(MacroAssembler* masm, - Handle object, - LookupResult* lookup, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Label* miss_label) { +void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm, + Handle object, + LookupResult* lookup, + Register receiver_reg, + Register name_reg, + Register value_reg, + Register scratch1, + Register scratch2, + Label* miss_label) { // a0 : value Label exit; - // Check that the map of the object hasn't changed. - __ CheckMap(receiver_reg, scratch1, Handle(object->map()), miss_label, - DO_SMI_CHECK); - - // Perform global security token check if needed. - if (object->IsJSGlobalProxy()) { - __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label); - } - // Stub never generated for non-global objects that require access // checks. ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); @@ -1247,6 +1192,10 @@ Register StubCompiler::CheckPrototypes(Handle object, int save_at_depth, Label* miss, PrototypeCheckType check) { + // Make sure that the type feedback oracle harvests the receiver map. + // TODO(svenpanne) Remove this hack when all ICs are reworked. + __ li(scratch1, Operand(Handle(object->map()))); + Handle first = object; // Make sure there's no overlap between holder and object registers. ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); @@ -1348,7 +1297,8 @@ Register StubCompiler::CheckPrototypes(Handle object, } -void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success, +void BaseLoadStubCompiler::HandlerFrontendFooter(Handle name, + Label* success, Label* miss) { if (!miss->is_unused()) { __ Branch(success); @@ -1358,6 +1308,17 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success, } +void BaseStoreStubCompiler::HandlerFrontendFooter(Handle name, + Label* success, + Label* miss) { + if (!miss->is_unused()) { + __ b(success); + GenerateRestoreName(masm(), miss, name); + TailCallBuiltin(masm(), MissBuiltin(kind())); + } +} + + Register BaseLoadStubCompiler::CallbackHandlerFrontend( Handle object, Register object_reg, @@ -1399,7 +1360,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend( __ Branch(&miss, ne, scratch2(), Operand(callback)); } - HandlerFrontendFooter(success, &miss); + HandlerFrontendFooter(name, success, &miss); return reg; } @@ -1420,7 +1381,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend( GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss); } - HandlerFrontendFooter(success, &miss); + HandlerFrontendFooter(name, success, &miss); } @@ -1744,11 +1705,11 @@ Handle CallStubCompiler::CompileArrayCodeCall( GenerateLoadFunctionFromCell(cell, function, &miss); } - Handle kind(Smi::FromInt(GetInitialFastElementsKind()), isolate()); - Handle kind_feedback_cell = - isolate()->factory()->NewCell(kind); + Handle site = isolate()->factory()->NewAllocationSite(); + site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind())); + Handle site_feedback_cell = isolate()->factory()->NewCell(site); __ li(a0, Operand(argc)); - __ li(a2, Operand(kind_feedback_cell)); + __ li(a2, Operand(site_feedback_cell)); __ li(a1, Operand(function)); ArrayConstructorStub stub(isolate()); @@ -2866,15 +2827,13 @@ Handle CallStubCompiler::CompileCallGlobal( Handle StoreStubCompiler::CompileStoreCallback( - Handle name, Handle object, Handle holder, + Handle name, Handle callback) { - Label miss; - // Check that the maps haven't changed. - __ JumpIfSmi(receiver(), &miss); - CheckPrototypes(object, receiver(), holder, - scratch1(), scratch2(), scratch3(), name, &miss); + Label success; + HandlerFrontend(object, receiver(), holder, name, &success); + __ bind(&success); // Stub never generated for non-global objects that require access // checks. @@ -2882,19 +2841,17 @@ Handle StoreStubCompiler::CompileStoreCallback( __ push(receiver()); // Receiver. __ li(at, Operand(callback)); // Callback info. - __ Push(at, this->name(), value()); + __ push(at); + __ li(at, Operand(name)); + __ Push(at, value()); // Do tail-call to the runtime system. ExternalReference store_callback_property = ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate()); __ TailCallExternalReference(store_callback_property, 4, 1); - // Handle store cache miss. - __ bind(&miss); - TailCallBuiltin(masm(), MissBuiltin(kind())); - // Return the generated code. - return GetICCode(kind(), Code::CALLBACKS, name); + return GetCode(kind(), Code::CALLBACKS, name); } @@ -3144,7 +3101,7 @@ Handle LoadStubCompiler::CompileLoadGlobal( __ Branch(&miss, eq, t0, Operand(at)); } - HandlerFrontendFooter(&success, &miss); + HandlerFrontendFooter(name, &success, &miss); __ bind(&success); Counters* counters = isolate()->counters(); @@ -3157,7 +3114,7 @@ Handle LoadStubCompiler::CompileLoadGlobal( } -Handle BaseLoadStubCompiler::CompilePolymorphicIC( +Handle BaseLoadStoreStubCompiler::CompilePolymorphicIC( MapHandleList* receiver_maps, CodeHandleList* handlers, Handle name, -- cgit v1.2.1