diff options
Diffstat (limited to 'deps/v8/src/mips/code-stubs-mips.cc')
-rw-r--r-- | deps/v8/src/mips/code-stubs-mips.cc | 600 |
1 files changed, 180 insertions, 420 deletions
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc index 69b957afa8..f984b3a7b7 100644 --- a/deps/v8/src/mips/code-stubs-mips.cc +++ b/deps/v8/src/mips/code-stubs-mips.cc @@ -61,6 +61,16 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( } +void CreateAllocationSiteStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a2 }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = NULL; +} + + void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { @@ -227,8 +237,42 @@ void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( } +void UnaryOpStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a0 }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(UnaryOpIC_Miss); +} + + +void StoreGlobalStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a1, a2, a0 }; + descriptor->register_param_count_ = 3; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(StoreIC_MissFromStubFailure); +} + + +void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a0, a3, a1, a2 }; + descriptor->register_param_count_ = 4; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); +} + + #define __ ACCESS_MASM(masm) + static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, Condition cc); @@ -1181,17 +1225,10 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, // Now that we have the types we might as well check for // internalized-internalized. - Label not_internalized; - STATIC_ASSERT(kInternalizedTag != 0); - __ And(t2, a2, Operand(kIsNotStringMask | kIsInternalizedMask)); - __ Branch(¬_internalized, ne, t2, - Operand(kInternalizedTag | kStringTag)); - - __ And(a3, a3, Operand(kIsNotStringMask | kIsInternalizedMask)); - __ Branch(&return_not_equal, eq, a3, - Operand(kInternalizedTag | kStringTag)); - - __ bind(¬_internalized); + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); + __ Or(a2, a2, Operand(a3)); + __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask)); + __ Branch(&return_not_equal, eq, at, Operand(zero_reg)); } @@ -1227,15 +1264,15 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, // a2 is object type of rhs. Label object_test; - STATIC_ASSERT(kInternalizedTag != 0); + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); __ And(at, a2, Operand(kIsNotStringMask)); __ Branch(&object_test, ne, at, Operand(zero_reg)); - __ And(at, a2, Operand(kIsInternalizedMask)); - __ Branch(possible_strings, eq, at, Operand(zero_reg)); + __ And(at, a2, Operand(kIsNotInternalizedMask)); + __ Branch(possible_strings, ne, at, Operand(zero_reg)); __ GetObjectType(rhs, a3, a3); __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE)); - __ And(at, a3, Operand(kIsInternalizedMask)); - __ Branch(possible_strings, eq, at, Operand(zero_reg)); + __ And(at, a3, Operand(kIsNotInternalizedMask)); + __ Branch(possible_strings, ne, at, Operand(zero_reg)); // Both are internalized strings. We already checked they weren't the same // pointer so they are not equal. @@ -1266,7 +1303,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3, - bool object_is_smi, Label* not_found) { // Use of registers. Register result is used as a temporary. Register number_string_cache = result; @@ -1289,37 +1325,35 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, Isolate* isolate = masm->isolate(); Label is_smi; Label load_result_from_cache; - if (!object_is_smi) { - __ JumpIfSmi(object, &is_smi); - __ CheckMap(object, - scratch1, - Heap::kHeapNumberMapRootIndex, - not_found, - DONT_DO_SMI_CHECK); + __ JumpIfSmi(object, &is_smi); + __ CheckMap(object, + scratch1, + Heap::kHeapNumberMapRootIndex, + not_found, + DONT_DO_SMI_CHECK); - STATIC_ASSERT(8 == kDoubleSize); - __ Addu(scratch1, - object, - Operand(HeapNumber::kValueOffset - kHeapObjectTag)); - __ lw(scratch2, MemOperand(scratch1, kPointerSize)); - __ lw(scratch1, MemOperand(scratch1, 0)); - __ Xor(scratch1, scratch1, Operand(scratch2)); - __ And(scratch1, scratch1, Operand(mask)); - - // Calculate address of entry in string cache: each entry consists - // of two pointer sized fields. - __ sll(scratch1, scratch1, kPointerSizeLog2 + 1); - __ Addu(scratch1, number_string_cache, scratch1); - - Register probe = mask; - __ lw(probe, - FieldMemOperand(scratch1, FixedArray::kHeaderSize)); - __ JumpIfSmi(probe, not_found); - __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); - __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); - __ BranchF(&load_result_from_cache, NULL, eq, f12, f14); - __ Branch(not_found); - } + STATIC_ASSERT(8 == kDoubleSize); + __ Addu(scratch1, + object, + Operand(HeapNumber::kValueOffset - kHeapObjectTag)); + __ lw(scratch2, MemOperand(scratch1, kPointerSize)); + __ lw(scratch1, MemOperand(scratch1, 0)); + __ Xor(scratch1, scratch1, Operand(scratch2)); + __ And(scratch1, scratch1, Operand(mask)); + + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + __ sll(scratch1, scratch1, kPointerSizeLog2 + 1); + __ Addu(scratch1, number_string_cache, scratch1); + + Register probe = mask; + __ lw(probe, + FieldMemOperand(scratch1, FixedArray::kHeaderSize)); + __ JumpIfSmi(probe, not_found); + __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); + __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); + __ BranchF(&load_result_from_cache, NULL, eq, f12, f14); + __ Branch(not_found); __ bind(&is_smi); Register scratch = scratch1; @@ -1332,7 +1366,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, __ Addu(scratch, number_string_cache, scratch); // Check if the entry is the smi we are looking for. - Register probe = mask; __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); __ Branch(not_found, ne, object, Operand(probe)); @@ -1354,7 +1387,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) { __ lw(a1, MemOperand(sp, 0)); // Generate code to lookup number in the number string cache. - GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime); + GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, &runtime); __ DropAndRet(1); __ bind(&runtime); @@ -1586,294 +1619,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { } -void UnaryOpStub::PrintName(StringStream* stream) { - const char* op_name = Token::Name(op_); - const char* overwrite_name = NULL; // Make g++ happy. - switch (mode_) { - case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; - case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; - } - stream->Add("UnaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - UnaryOpIC::GetName(operand_type_)); -} - - -// TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::Generate(MacroAssembler* masm) { - switch (operand_type_) { - case UnaryOpIC::UNINITIALIZED: - GenerateTypeTransition(masm); - break; - case UnaryOpIC::SMI: - GenerateSmiStub(masm); - break; - case UnaryOpIC::NUMBER: - GenerateNumberStub(masm); - break; - case UnaryOpIC::GENERIC: - GenerateGenericStub(masm); - break; - } -} - - -void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { - // Argument is in a0 and v0 at this point, so we can overwrite a0. - __ li(a2, Operand(Smi::FromInt(op_))); - __ li(a1, Operand(Smi::FromInt(mode_))); - __ li(a0, Operand(Smi::FromInt(operand_type_))); - __ Push(v0, a2, a1, a0); - - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1); -} - - -// TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) { - switch (op_) { - case Token::SUB: - GenerateSmiStubSub(masm); - break; - case Token::BIT_NOT: - GenerateSmiStubBitNot(masm); - break; - default: - UNREACHABLE(); - } -} - - -void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) { - Label non_smi, slow; - GenerateSmiCodeSub(masm, &non_smi, &slow); - __ bind(&non_smi); - __ bind(&slow); - GenerateTypeTransition(masm); -} - - -void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) { - Label non_smi; - GenerateSmiCodeBitNot(masm, &non_smi); - __ bind(&non_smi); - GenerateTypeTransition(masm); -} - - -void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, - Label* non_smi, - Label* slow) { - __ JumpIfNotSmi(a0, non_smi); - - // The result of negating zero or the smallest negative smi is not a smi. - __ And(t0, a0, ~0x80000000); - __ Branch(slow, eq, t0, Operand(zero_reg)); - - // Return '0 - value'. - __ Ret(USE_DELAY_SLOT); - __ subu(v0, zero_reg, a0); -} - - -void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, - Label* non_smi) { - __ JumpIfNotSmi(a0, non_smi); - - // Flip bits and revert inverted smi-tag. - __ Neg(v0, a0); - __ And(v0, v0, ~kSmiTagMask); - __ Ret(); -} - - -// TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) { - switch (op_) { - case Token::SUB: - GenerateNumberStubSub(masm); - break; - case Token::BIT_NOT: - GenerateNumberStubBitNot(masm); - break; - default: - UNREACHABLE(); - } -} - - -void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) { - Label non_smi, slow, call_builtin; - GenerateSmiCodeSub(masm, &non_smi, &call_builtin); - __ bind(&non_smi); - GenerateHeapNumberCodeSub(masm, &slow); - __ bind(&slow); - GenerateTypeTransition(masm); - __ bind(&call_builtin); - GenerateGenericCodeFallback(masm); -} - - -void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) { - Label non_smi, slow; - GenerateSmiCodeBitNot(masm, &non_smi); - __ bind(&non_smi); - GenerateHeapNumberCodeBitNot(masm, &slow); - __ bind(&slow); - GenerateTypeTransition(masm); -} - - -void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, - Label* slow) { - EmitCheckForHeapNumber(masm, a0, a1, t2, slow); - // a0 is a heap number. Get a new heap number in a1. - if (mode_ == UNARY_OVERWRITE) { - __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); - __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. - __ Ret(USE_DELAY_SLOT); - __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); - } else { - Label slow_allocate_heapnumber, heapnumber_allocated; - __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber); - __ jmp(&heapnumber_allocated); - - __ bind(&slow_allocate_heapnumber); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(a0); - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(a1, v0); - __ pop(a0); - } - - __ bind(&heapnumber_allocated); - __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); - __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); - __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset)); - __ Xor(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. - __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset)); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, a1); - } -} - - -void UnaryOpStub::GenerateHeapNumberCodeBitNot( - MacroAssembler* masm, - Label* slow) { - Label impossible; - - EmitCheckForHeapNumber(masm, a0, a1, t2, slow); - // Convert the heap number in a0 to an untagged integer in a1. - __ ConvertToInt32(a0, a1, a2, a3, f0, slow); - - // Do the bitwise operation and check if the result fits in a smi. - Label try_float; - __ Neg(a1, a1); - __ Addu(a2, a1, Operand(0x40000000)); - __ Branch(&try_float, lt, a2, Operand(zero_reg)); - - // Tag the result as a smi and we're done. - __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. - __ SmiTag(v0, a1); - - // Try to store the result in a heap number. - __ bind(&try_float); - if (mode_ == UNARY_NO_OVERWRITE) { - Label slow_allocate_heapnumber, heapnumber_allocated; - // Allocate a new heap number without zapping v0, which we need if it fails. - __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber); - __ jmp(&heapnumber_allocated); - - __ bind(&slow_allocate_heapnumber); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(v0); // Push the heap number, not the untagged int32. - __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(a2, v0); // Move the new heap number into a2. - // Get the heap number into v0, now that the new heap number is in a2. - __ pop(v0); - } - - // Convert the heap number in v0 to an untagged integer in a1. - // This can't go slow-case because it's the same number we already - // converted once again. - __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible); - // Negate the result. - __ Xor(a1, a1, -1); - - __ bind(&heapnumber_allocated); - __ mov(v0, a2); // Move newly allocated heap number to v0. - } - - // Convert the int32 in a1 to the heap number in v0. a2 is corrupted. - __ mtc1(a1, f0); - __ cvt_d_w(f0, f0); - __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); - __ Ret(); - - __ bind(&impossible); - if (FLAG_debug_code) { - __ stop("Incorrect assumption in bit-not stub"); - } -} - - -// TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { - switch (op_) { - case Token::SUB: - GenerateGenericStubSub(masm); - break; - case Token::BIT_NOT: - GenerateGenericStubBitNot(masm); - break; - default: - UNREACHABLE(); - } -} - - -void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) { - Label non_smi, slow; - GenerateSmiCodeSub(masm, &non_smi, &slow); - __ bind(&non_smi); - GenerateHeapNumberCodeSub(masm, &slow); - __ bind(&slow); - GenerateGenericCodeFallback(masm); -} - - -void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) { - Label non_smi, slow; - GenerateSmiCodeBitNot(masm, &non_smi); - __ bind(&non_smi); - GenerateHeapNumberCodeBitNot(masm, &slow); - __ bind(&slow); - GenerateGenericCodeFallback(masm); -} - - -void UnaryOpStub::GenerateGenericCodeFallback( - MacroAssembler* masm) { - // Handle the slow case by jumping to the JavaScript builtin. - __ push(a0); - switch (op_) { - case Token::SUB: - __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); - break; - case Token::BIT_NOT: - __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - void BinaryOpStub::Initialize() { platform_specific_bit_ = true; // FPU is a base requirement for V8. } @@ -2388,8 +2133,8 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { __ GetObjectType(right, a2, a2); __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); - StringAddStub string_add_stub((StringAddFlags) - (ERECT_FRAME | NO_STRING_CHECK_IN_STUB)); + StringAddStub string_add_stub( + (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); GenerateRegisterArgsPush(masm); __ TailCallStub(&string_add_stub); @@ -2806,8 +2551,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { __ GetObjectType(left, a2, a2); __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE)); - StringAddStub string_add_left_stub((StringAddFlags) - (ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB)); + StringAddStub string_add_left_stub( + (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); GenerateRegisterArgsPush(masm); __ TailCallStub(&string_add_left_stub); @@ -2817,8 +2562,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { __ GetObjectType(right, a2, a2); __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); - StringAddStub string_add_right_stub((StringAddFlags) - (ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB)); + StringAddStub string_add_right_stub( + (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); GenerateRegisterArgsPush(masm); __ TailCallStub(&string_add_right_stub); @@ -3344,6 +3089,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { StubFailureTrampolineStub::GenerateAheadOfTime(isolate); RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); + CreateAllocationSiteStub::GenerateAheadOfTime(isolate); } @@ -3987,7 +3733,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) { StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss); __ bind(&miss); - StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); + StubCompiler::TailCallBuiltin( + masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); } @@ -4018,7 +3765,8 @@ void StringLengthStub::Generate(MacroAssembler* masm) { support_wrapper_); __ bind(&miss); - StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); + StubCompiler::TailCallBuiltin( + masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); } @@ -4088,7 +3836,8 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) { __ bind(&miss); - StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); + StubCompiler::TailCallBuiltin( + masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); } @@ -5043,20 +4792,17 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { // A monomorphic cache hit or an already megamorphic state: invoke the // function without changing the state. __ Branch(&done, eq, a3, Operand(a1)); - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - __ Branch(&done, eq, a3, Operand(at)); - // Special handling of the Array() function, which caches not only the - // monomorphic Array function but the initial ElementsKind with special - // sentinels - __ JumpIfNotSmi(a3, &miss); - if (FLAG_debug_code) { - Handle<Object> terminal_kind_sentinel = - TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), - LAST_FAST_ELEMENTS_KIND); - __ Assert(le, "Array function sentinel is not an ElementsKind", - a3, Operand(terminal_kind_sentinel)); - } + // If we came here, we need to see if we are the array function. + // If we didn't have a matching function, and we didn't find the megamorph + // sentinel, then we have in the cell either some other function or an + // AllocationSite. Do a map check on the object in a3. + Handle<Map> allocation_site_map( + masm->isolate()->heap()->allocation_site_map(), + masm->isolate()); + __ lw(t1, FieldMemOperand(a3, 0)); + __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); + __ Branch(&miss, ne, t1, Operand(at)); // Make sure the function is the Array() function __ LoadArrayFunction(a3); @@ -5083,14 +4829,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { __ LoadArrayFunction(a3); __ Branch(¬_array_function, ne, a1, Operand(a3)); - // The target function is the Array constructor, install a sentinel value in - // the constructor's type info cell that will track the initial ElementsKind - // that should be used for the array when its constructed. - Handle<Object> initial_kind_sentinel = - TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), - GetInitialFastElementsKind()); - __ li(a3, Operand(initial_kind_sentinel)); - __ sw(a3, FieldMemOperand(a2, Cell::kValueOffset)); + // The target function is the Array constructor. + // Create an AllocationSite if we don't already have it, store it in the cell. + { + FrameScope scope(masm, StackFrame::INTERNAL); + const RegList kSavedRegs = + 1 << 4 | // a0 + 1 << 5 | // a1 + 1 << 6; // a2 + + __ MultiPush(kSavedRegs); + + CreateAllocationSiteStub create_stub; + __ CallStub(&create_stub); + + __ MultiPop(kSavedRegs); + } __ Branch(&done); __ bind(¬_array_function); @@ -6111,7 +5865,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ lw(a1, MemOperand(sp, 0 * kPointerSize)); // Second argument. // Make sure that both arguments are strings if not known in advance. - if ((flags_ & NO_STRING_ADD_FLAGS) != 0) { + // Otherwise, at least one of the arguments is definitely a string, + // and we convert the one that is not known to be a string. + if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) { + ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT); + ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT); __ JumpIfEitherSmi(a0, a1, &call_runtime); // Load instance types. __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); @@ -6123,20 +5881,16 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ Or(t4, t0, Operand(t1)); __ And(t4, t4, Operand(kIsNotStringMask)); __ Branch(&call_runtime, ne, t4, Operand(zero_reg)); - } else { - // Here at least one of the arguments is definitely a string. - // We convert the one that is not known to be a string. - if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { - ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); - GenerateConvertArgument( - masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin); - builtin_id = Builtins::STRING_ADD_RIGHT; - } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { - ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); - GenerateConvertArgument( - masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin); - builtin_id = Builtins::STRING_ADD_LEFT; - } + } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) { + ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0); + GenerateConvertArgument( + masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin); + builtin_id = Builtins::STRING_ADD_RIGHT; + } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) { + ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0); + GenerateConvertArgument( + masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin); + builtin_id = Builtins::STRING_ADD_LEFT; } // Both arguments are strings. @@ -6187,7 +5941,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ Branch(&longer_than_two, ne, t2, Operand(2)); // Check that both strings are non-external ASCII strings. - if (flags_ != NO_STRING_ADD_FLAGS) { + if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); @@ -6231,7 +5985,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // If result is not supposed to be flat, allocate a cons string object. // If both strings are ASCII the result is an ASCII cons string. - if (flags_ != NO_STRING_ADD_FLAGS) { + if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); @@ -6314,7 +6068,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // t2: sum of lengths. Label first_prepared, second_prepared; __ bind(&string_add_flat_result); - if (flags_ != NO_STRING_ADD_FLAGS) { + if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset)); __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); @@ -6400,7 +6154,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Just jump to runtime to add the two strings. __ bind(&call_runtime); - if ((flags_ & ERECT_FRAME) != 0) { + if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { GenerateRegisterArgsPop(masm); // Build a frame. { @@ -6415,7 +6169,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { if (call_builtin.is_linked()) { __ bind(&call_builtin); - if ((flags_ & ERECT_FRAME) != 0) { + if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { GenerateRegisterArgsPop(masm); // Build a frame. { @@ -6467,7 +6221,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, scratch2, scratch3, scratch4, - false, ¬_cached); __ mov(arg, scratch1); __ sw(arg, MemOperand(sp, stack_offset)); @@ -6623,13 +6376,10 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); - STATIC_ASSERT(kInternalizedTag != 0); - - __ And(tmp1, tmp1, Operand(kIsNotStringMask | kIsInternalizedMask)); - __ Branch(&miss, ne, tmp1, Operand(kInternalizedTag | kStringTag)); - - __ And(tmp2, tmp2, Operand(kIsNotStringMask | kIsInternalizedMask)); - __ Branch(&miss, ne, tmp2, Operand(kInternalizedTag | kStringTag)); + STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); + __ Or(tmp1, tmp1, Operand(tmp2)); + __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); + __ Branch(&miss, ne, at, Operand(zero_reg)); // Make sure a0 is non-zero. At this point input operands are // guaranteed to be non-zero. @@ -6664,7 +6414,6 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { // Check that both operands are unique names. This leaves the instance // types loaded in tmp1 and tmp2. - STATIC_ASSERT(kInternalizedTag != 0); __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); @@ -6738,11 +6487,11 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { // strings. if (equality) { ASSERT(GetCondition() == eq); - STATIC_ASSERT(kInternalizedTag != 0); - __ And(tmp3, tmp1, Operand(tmp2)); - __ And(tmp5, tmp3, Operand(kIsInternalizedMask)); + STATIC_ASSERT(kInternalizedTag == 0); + __ Or(tmp3, tmp1, Operand(tmp2)); + __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask)); Label is_symbol; - __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg)); + __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg)); // Make sure a0 is non-zero. At this point input operands are // guaranteed to be non-zero. ASSERT(right.is(a0)); @@ -6815,6 +6564,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { GenerateMiss(masm); } + void ICCompareStub::GenerateMiss(MacroAssembler* masm) { { // Call the runtime system in a fresh internal frame. @@ -7145,6 +6895,7 @@ struct AheadOfTimeWriteBarrierStubList { RememberedSetAction action; }; + #define REG(Name) { kRegister_ ## Name ## _Code } static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { @@ -7207,6 +6958,9 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { StoreBufferOverflowStub stub1(kDontSaveFPRegs); stub1.GetCode(isolate)->set_is_pregenerated(true); + // Hydrogen code stubs need stub2 at snapshot time. + StoreBufferOverflowStub stub2(kSaveFPRegs); + stub2.GetCode(isolate)->set_is_pregenerated(true); } @@ -7612,10 +7366,6 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { ASSERT(FAST_DOUBLE_ELEMENTS == 4); ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); - Handle<Object> undefined_sentinel( - masm->isolate()->heap()->undefined_value(), - masm->isolate()); - // is the low bit set? If so, we are holey and that is good. Label normal_sequence; __ And(at, a3, Operand(1)); @@ -7626,17 +7376,19 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { __ Branch(&normal_sequence, eq, t1, Operand(zero_reg)); // We are going to create a holey array, but our kind is non-holey. - // Fix kind and retry + // Fix kind and retry (only if we have an allocation site in the cell). __ Addu(a3, a3, Operand(1)); - __ Branch(&normal_sequence, eq, a2, Operand(undefined_sentinel)); - - // The type cell may have gone megamorphic, don't overwrite if so. - __ lw(t1, FieldMemOperand(a2, kPointerSize)); - __ JumpIfNotSmi(t1, &normal_sequence); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&normal_sequence, eq, a2, Operand(at)); + __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset)); + __ lw(t1, FieldMemOperand(t1, 0)); + __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); + __ Branch(&normal_sequence, ne, t1, Operand(at)); // Save the resulting elements kind in type info __ SmiTag(a3); - __ sw(a3, FieldMemOperand(a2, kPointerSize)); + __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset)); + __ sw(a3, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset)); __ SmiUntag(a3); __ bind(&normal_sequence); @@ -7664,7 +7416,7 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); T stub(kind); stub.GetCode(isolate)->set_is_pregenerated(true); - if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { + if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES); stub1.GetCode(isolate)->set_is_pregenerated(true); } @@ -7705,10 +7457,6 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { // -- sp[0] : return address // -- sp[4] : last argument // ----------------------------------- - Handle<Object> undefined_sentinel( - masm->isolate()->heap()->undefined_value(), - masm->isolate()); - if (FLAG_debug_code) { // The array construct code is only set for the global and natives // builtin Array functions which always have maps. @@ -7723,10 +7471,11 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { __ Assert(eq, "Unexpected initial map for Array function", t0, Operand(MAP_TYPE)); - // We should either have undefined in a2 or a valid cell + // We should either have undefined in a2 or a valid cell. Label okay_here; Handle<Map> cell_map = masm->isolate()->factory()->cell_map(); - __ Branch(&okay_here, eq, a2, Operand(undefined_sentinel)); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&okay_here, eq, a2, Operand(at)); __ lw(a3, FieldMemOperand(a2, 0)); __ Assert(eq, "Expected property cell in register a2", a3, Operand(cell_map)); @@ -7735,9 +7484,20 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { Label no_info, switch_ready; // Get the elements kind and case on that. - __ Branch(&no_info, eq, a2, Operand(undefined_sentinel)); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&no_info, eq, a2, Operand(at)); __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset)); - __ JumpIfNotSmi(a3, &no_info); + + // The type cell may have undefined in its value. + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&no_info, eq, a3, Operand(at)); + + // The type cell has either an AllocationSite or a JSFunction. + __ lw(t0, FieldMemOperand(a3, 0)); + __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); + __ Branch(&no_info, ne, t0, Operand(at)); + + __ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset)); __ SmiUntag(a3); __ jmp(&switch_ready); __ bind(&no_info); |