diff options
Diffstat (limited to 'deps/v8/src/arm/code-stubs-arm.cc')
-rw-r--r-- | deps/v8/src/arm/code-stubs-arm.cc | 894 |
1 files changed, 457 insertions, 437 deletions
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index 9484f85f9..ceb108ffa 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -41,7 +41,8 @@ namespace internal { static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, - Condition cond); + Condition cond, + bool never_nan_nan); static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs, Register rhs, @@ -626,6 +627,24 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, } +void FloatingPointHelper::LoadOperands( + MacroAssembler* masm, + FloatingPointHelper::Destination destination, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* slow) { + + // Load right operand (r0) to d6 or r2/r3. + LoadNumber(masm, destination, + r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow); + + // Load left operand (r1) to d7 or r0/r1. + LoadNumber(masm, destination, + r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow); +} + + void FloatingPointHelper::LoadNumber(MacroAssembler* masm, Destination destination, Register object, @@ -729,13 +748,13 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, Register int_scratch, Destination destination, DwVfpRegister double_dst, - Register dst_mantissa, - Register dst_exponent, + Register dst1, + Register dst2, Register scratch2, SwVfpRegister single_scratch) { ASSERT(!int_scratch.is(scratch2)); - ASSERT(!int_scratch.is(dst_mantissa)); - ASSERT(!int_scratch.is(dst_exponent)); + ASSERT(!int_scratch.is(dst1)); + ASSERT(!int_scratch.is(dst2)); Label done; @@ -744,57 +763,56 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, __ vmov(single_scratch, int_scratch); __ vcvt_f64_s32(double_dst, single_scratch); if (destination == kCoreRegisters) { - __ vmov(dst_mantissa, dst_exponent, double_dst); + __ vmov(dst1, dst2, double_dst); } } else { Label fewer_than_20_useful_bits; // Expected output: - // | dst_exponent | dst_mantissa | + // | dst2 | dst1 | // | s | exp | mantissa | // Check for zero. __ cmp(int_scratch, Operand::Zero()); - __ mov(dst_exponent, int_scratch); - __ mov(dst_mantissa, int_scratch); + __ mov(dst2, int_scratch); + __ mov(dst1, int_scratch); __ b(eq, &done); // Preload the sign of the value. - __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC); + __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC); // Get the absolute value of the object (as an unsigned integer). __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi); // Get mantissa[51:20]. // Get the position of the first set bit. - __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2); - __ rsb(dst_mantissa, dst_mantissa, Operand(31)); + __ CountLeadingZeros(dst1, int_scratch, scratch2); + __ rsb(dst1, dst1, Operand(31)); // Set the exponent. - __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias)); - __ Bfi(dst_exponent, scratch2, scratch2, + __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias)); + __ Bfi(dst2, scratch2, scratch2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); // Clear the first non null bit. __ mov(scratch2, Operand(1)); - __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa)); + __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1)); - __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); + __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); // Get the number of bits to set in the lower part of the mantissa. - __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord), - SetCC); + __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); __ b(mi, &fewer_than_20_useful_bits); // Set the higher 20 bits of the mantissa. - __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2)); + __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2)); __ rsb(scratch2, scratch2, Operand(32)); - __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2)); + __ mov(dst1, Operand(int_scratch, LSL, scratch2)); __ b(&done); __ bind(&fewer_than_20_useful_bits); - __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); + __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); __ mov(scratch2, Operand(int_scratch, LSL, scratch2)); - __ orr(dst_exponent, dst_exponent, scratch2); + __ orr(dst2, dst2, scratch2); // Set dst1 to 0. - __ mov(dst_mantissa, Operand::Zero()); + __ mov(dst1, Operand::Zero()); } __ bind(&done); } @@ -805,8 +823,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, Destination destination, DwVfpRegister double_dst, DwVfpRegister double_scratch, - Register dst_mantissa, - Register dst_exponent, + Register dst1, + Register dst2, Register heap_number_map, Register scratch1, Register scratch2, @@ -822,8 +840,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, __ JumpIfNotSmi(object, &obj_is_not_smi); __ SmiUntag(scratch1, object); - ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa, - dst_exponent, scratch2, single_scratch); + ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2, + scratch2, single_scratch); __ b(&done); __ bind(&obj_is_not_smi); @@ -850,52 +868,26 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, __ b(ne, not_int32); if (destination == kCoreRegisters) { - __ vmov(dst_mantissa, dst_exponent, double_dst); + __ vmov(dst1, dst2, double_dst); } } else { ASSERT(!scratch1.is(object) && !scratch2.is(object)); - // Load the double value in the destination registers. - bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent); - if (save_registers) { - // Save both output registers, because the other one probably holds - // an important value too. - __ Push(dst_exponent, dst_mantissa); - } - __ Ldrd(dst_mantissa, dst_exponent, - FieldMemOperand(object, HeapNumber::kValueOffset)); + // Load the double value in the destination registers.. + __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); // Check for 0 and -0. - Label zero; - __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask)); - __ orr(scratch1, scratch1, Operand(dst_mantissa)); + __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask)); + __ orr(scratch1, scratch1, Operand(dst2)); __ cmp(scratch1, Operand::Zero()); - __ b(eq, &zero); + __ b(eq, &done); // Check that the value can be exactly represented by a 32-bit integer. // Jump to not_int32 if that's not the case. - Label restore_input_and_miss; - DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2, - &restore_input_and_miss); - - // dst_* were trashed. Reload the double value. - if (save_registers) { - __ Pop(dst_exponent, dst_mantissa); - } - __ Ldrd(dst_mantissa, dst_exponent, - FieldMemOperand(object, HeapNumber::kValueOffset)); - __ b(&done); - - __ bind(&restore_input_and_miss); - if (save_registers) { - __ Pop(dst_exponent, dst_mantissa); - } - __ b(not_int32); + DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32); - __ bind(&zero); - if (save_registers) { - __ Drop(2); - } + // dst1 and dst2 were trashed. Reload the double value. + __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); } __ bind(&done); @@ -918,15 +910,14 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, !scratch1.is(scratch3) && !scratch2.is(scratch3)); - Label done, maybe_undefined; + Label done; __ UntagAndJumpIfSmi(dst, object, &done); __ AssertRootValue(heap_number_map, Heap::kHeapNumberMapRootIndex, "HeapNumberMap register clobbered."); - - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); + __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); // Object is a heap number. // Convert the floating point value to a 32-bit integer. @@ -973,28 +964,20 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, __ tst(scratch1, Operand(HeapNumber::kSignMask)); __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi); } - __ b(&done); - - __ bind(&maybe_undefined); - __ CompareRoot(object, Heap::kUndefinedValueRootIndex); - __ b(ne, not_int32); - // |undefined| is truncated to 0. - __ mov(dst, Operand(Smi::FromInt(0))); - // Fall through. __ bind(&done); } void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, - Register src_exponent, - Register src_mantissa, + Register src1, + Register src2, Register dst, Register scratch, Label* not_int32) { // Get exponent alone in scratch. __ Ubfx(scratch, - src_exponent, + src1, HeapNumber::kExponentShift, HeapNumber::kExponentBits); @@ -1014,11 +997,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, // Another way to put it is that if (exponent - signbit) > 30 then the // number cannot be represented as an int32. Register tmp = dst; - __ sub(tmp, scratch, Operand(src_exponent, LSR, 31)); + __ sub(tmp, scratch, Operand(src1, LSR, 31)); __ cmp(tmp, Operand(30)); __ b(gt, not_int32); // - Bits [21:0] in the mantissa are not null. - __ tst(src_mantissa, Operand(0x3fffff)); + __ tst(src2, Operand(0x3fffff)); __ b(ne, not_int32); // Otherwise the exponent needs to be big enough to shift left all the @@ -1029,19 +1012,19 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, // Get the 32 higher bits of the mantissa in dst. __ Ubfx(dst, - src_mantissa, + src2, HeapNumber::kMantissaBitsInTopWord, 32 - HeapNumber::kMantissaBitsInTopWord); __ orr(dst, dst, - Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord)); + Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord)); // Create the mask and test the lower bits (of the higher bits). __ rsb(scratch, scratch, Operand(32)); - __ mov(src_mantissa, Operand(1)); - __ mov(src_exponent, Operand(src_mantissa, LSL, scratch)); - __ sub(src_exponent, src_exponent, Operand(1)); - __ tst(dst, src_exponent); + __ mov(src2, Operand(1)); + __ mov(src1, Operand(src2, LSL, scratch)); + __ sub(src1, src1, Operand(1)); + __ tst(dst, src1); __ b(ne, not_int32); } @@ -1165,43 +1148,48 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { // for "identity and not NaN". static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, - Condition cond) { + Condition cond, + bool never_nan_nan) { Label not_identical; Label heap_number, return_equal; __ cmp(r0, r1); __ b(ne, ¬_identical); - // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), - // so we do the second best thing - test it ourselves. - // They are both equal and they are not both Smis so both of them are not - // Smis. If it's not a heap number, then return equal. - if (cond == lt || cond == gt) { - __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, slow); - } else { - __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); - __ b(eq, &heap_number); - // Comparing JS objects with <=, >= is complicated. - if (cond != eq) { - __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); + // The two objects are identical. If we know that one of them isn't NaN then + // we now know they test equal. + if (cond != eq || !never_nan_nan) { + // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), + // so we do the second best thing - test it ourselves. + // They are both equal and they are not both Smis so both of them are not + // Smis. If it's not a heap number, then return equal. + if (cond == lt || cond == gt) { + __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); __ b(ge, slow); - // Normally here we fall through to return_equal, but undefined is - // special: (undefined == undefined) == true, but - // (undefined <= undefined) == false! See ECMAScript 11.8.5. - if (cond == le || cond == ge) { - __ cmp(r4, Operand(ODDBALL_TYPE)); - __ b(ne, &return_equal); - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - __ cmp(r0, r2); - __ b(ne, &return_equal); - if (cond == le) { - // undefined <= undefined should fail. - __ mov(r0, Operand(GREATER)); - } else { - // undefined >= undefined should fail. - __ mov(r0, Operand(LESS)); + } else { + __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); + __ b(eq, &heap_number); + // Comparing JS objects with <=, >= is complicated. + if (cond != eq) { + __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ b(ge, slow); + // Normally here we fall through to return_equal, but undefined is + // special: (undefined == undefined) == true, but + // (undefined <= undefined) == false! See ECMAScript 11.8.5. + if (cond == le || cond == ge) { + __ cmp(r4, Operand(ODDBALL_TYPE)); + __ b(ne, &return_equal); + __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); + __ cmp(r0, r2); + __ b(ne, &return_equal); + if (cond == le) { + // undefined <= undefined should fail. + __ mov(r0, Operand(GREATER)); + } else { + // undefined >= undefined should fail. + __ mov(r0, Operand(LESS)); + } + __ Ret(); } - __ Ret(); } } } @@ -1216,45 +1204,47 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, } __ Ret(); - // For less and greater we don't have to check for NaN since the result of - // x < x is false regardless. For the others here is some code to check - // for NaN. - if (cond != lt && cond != gt) { - __ bind(&heap_number); - // It is a heap number, so return non-equal if it's NaN and equal if it's - // not NaN. - - // The representation of NaN values has all exponent bits (52..62) set, - // and not all mantissa bits (0..51) clear. - // Read top bits of double representation (second word of value). - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - // Test that exponent bits are all set. - __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r3, Operand(-1)); - __ b(ne, &return_equal); - - // Shift out flag and all exponent bits, retaining only mantissa. - __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); - // Or with all low-bits of mantissa. - __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ orr(r0, r3, Operand(r2), SetCC); - // For equal we already have the right value in r0: Return zero (equal) - // if all bits in mantissa are zero (it's an Infinity) and non-zero if - // not (it's a NaN). For <= and >= we need to load r0 with the failing - // value if it's a NaN. - if (cond != eq) { - // All-zero means Infinity means equal. - __ Ret(eq); - if (cond == le) { - __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. - } else { - __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. + if (cond != eq || !never_nan_nan) { + // For less and greater we don't have to check for NaN since the result of + // x < x is false regardless. For the others here is some code to check + // for NaN. + if (cond != lt && cond != gt) { + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if it's + // not NaN. + + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // Read top bits of double representation (second word of value). + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + // Test that exponent bits are all set. + __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); + // NaNs have all-one exponents so they sign extend to -1. + __ cmp(r3, Operand(-1)); + __ b(ne, &return_equal); + + // Shift out flag and all exponent bits, retaining only mantissa. + __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); + // Or with all low-bits of mantissa. + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + __ orr(r0, r3, Operand(r2), SetCC); + // For equal we already have the right value in r0: Return zero (equal) + // if all bits in mantissa are zero (it's an Infinity) and non-zero if + // not (it's a NaN). For <= and >= we need to load r0 with the failing + // value if it's a NaN. + if (cond != eq) { + // All-zero means Infinity means equal. + __ Ret(eq); + if (cond == le) { + __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. + } else { + __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. + } } + __ Ret(); } - __ Ret(); + // No fall through here. } - // No fall through here. __ bind(¬_identical); } @@ -1688,60 +1678,42 @@ void NumberToStringStub::Generate(MacroAssembler* masm) { } -static void ICCompareStub_CheckInputType(MacroAssembler* masm, - Register input, - Register scratch, - CompareIC::State expected, - Label* fail) { - Label ok; - if (expected == CompareIC::SMI) { - __ JumpIfNotSmi(input, fail); - } else if (expected == CompareIC::HEAP_NUMBER) { - __ JumpIfSmi(input, &ok); - __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, - DONT_DO_SMI_CHECK); - } - // We could be strict about symbol/string here, but as long as - // hydrogen doesn't care, the stub doesn't have to care either. - __ bind(&ok); -} - - -// On entry r1 and r2 are the values to be compared. +// On entry lhs_ and rhs_ are the values to be compared. // On exit r0 is 0, positive or negative to indicate the result of // the comparison. -void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { - Register lhs = r1; - Register rhs = r0; - Condition cc = GetCondition(); - - Label miss; - ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss); - ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss); +void CompareStub::Generate(MacroAssembler* masm) { + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); Label slow; // Call builtin. Label not_smis, both_loaded_as_doubles, lhs_not_nan; - Label not_two_smis, smi_done; - __ orr(r2, r1, r0); - __ JumpIfNotSmi(r2, ¬_two_smis); - __ mov(r1, Operand(r1, ASR, 1)); - __ sub(r0, r1, Operand(r0, ASR, 1)); - __ Ret(); - __ bind(¬_two_smis); + if (include_smi_compare_) { + Label not_two_smis, smi_done; + __ orr(r2, r1, r0); + __ JumpIfNotSmi(r2, ¬_two_smis); + __ mov(r1, Operand(r1, ASR, 1)); + __ sub(r0, r1, Operand(r0, ASR, 1)); + __ Ret(); + __ bind(¬_two_smis); + } else if (FLAG_debug_code) { + __ orr(r2, r1, r0); + __ tst(r2, Operand(kSmiTagMask)); + __ Assert(ne, "CompareStub: unexpected smi operands."); + } // NOTICE! This code is only reached after a smi-fast-case check, so // it is certain that at least one operand isn't a smi. // Handle the case where the objects are identical. Either returns the answer // or goes to slow. Only falls through if the objects were not identical. - EmitIdenticalObjectComparison(masm, &slow, cc); + EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); // If either is a Smi (we know that not both are), then they can only // be strictly equal if the other is a HeapNumber. STATIC_ASSERT(kSmiTag == 0); ASSERT_EQ(0, Smi::FromInt(0)); - __ and_(r2, lhs, Operand(rhs)); + __ and_(r2, lhs_, Operand(rhs_)); __ JumpIfNotSmi(r2, ¬_smis); // One operand is a smi. EmitSmiNonsmiComparison generates code that can: // 1) Return the answer. @@ -1752,7 +1724,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // comparison. If VFP3 is supported the double values of the numbers have // been loaded into d7 and d6. Otherwise, the double values have been loaded // into r0, r1, r2, and r3. - EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); + EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); __ bind(&both_loaded_as_doubles); // The arguments have been converted to doubles and stored in d6 and d7, if @@ -1775,7 +1747,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // If one of the sides was a NaN then the v flag is set. Load r0 with // whatever it takes to make the comparison fail, since comparisons with NaN // always fail. - if (cc == lt || cc == le) { + if (cc_ == lt || cc_ == le) { __ mov(r0, Operand(GREATER)); } else { __ mov(r0, Operand(LESS)); @@ -1784,19 +1756,19 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { } else { // Checks for NaN in the doubles we have loaded. Can return the answer or // fall through if neither is a NaN. Also binds lhs_not_nan. - EmitNanCheck(masm, &lhs_not_nan, cc); + EmitNanCheck(masm, &lhs_not_nan, cc_); // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the // answer. Never falls through. - EmitTwoNonNanDoubleComparison(masm, cc); + EmitTwoNonNanDoubleComparison(masm, cc_); } __ bind(¬_smis); // At this point we know we are dealing with two different objects, // and neither of them is a Smi. The objects are in rhs_ and lhs_. - if (strict()) { + if (strict_) { // This returns non-equal for some object types, or falls through if it // was not lucky. - EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); + EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); } Label check_for_symbols; @@ -1806,8 +1778,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // that case. If the inputs are not doubles then jumps to check_for_symbols. // In this case r2 will contain the type of rhs_. Never falls through. EmitCheckForTwoHeapNumbers(masm, - lhs, - rhs, + lhs_, + rhs_, &both_loaded_as_doubles, &check_for_symbols, &flat_string_check); @@ -1815,31 +1787,31 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { __ bind(&check_for_symbols); // In the strict case the EmitStrictTwoHeapObjectCompare already took care of // symbols. - if (cc == eq && !strict()) { + if (cc_ == eq && !strict_) { // Returns an answer for two symbols or two detectable objects. // Otherwise jumps to string case or not both strings case. // Assumes that r2 is the type of rhs_ on entry. - EmitCheckForSymbolsOrObjects(masm, lhs, rhs, &flat_string_check, &slow); + EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); } // Check for both being sequential ASCII strings, and inline if that is the // case. __ bind(&flat_string_check); - __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow); + __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3); - if (cc == eq) { + if (cc_ == eq) { StringCompareStub::GenerateFlatAsciiStringEquals(masm, - lhs, - rhs, + lhs_, + rhs_, r2, r3, r4); } else { StringCompareStub::GenerateCompareFlatAsciiStrings(masm, - lhs, - rhs, + lhs_, + rhs_, r2, r3, r4, @@ -1849,18 +1821,18 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { __ bind(&slow); - __ Push(lhs, rhs); + __ Push(lhs_, rhs_); // Figure out which native to call and setup the arguments. Builtins::JavaScript native; - if (cc == eq) { - native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + if (cc_ == eq) { + native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; } else { native = Builtins::COMPARE; int ncr; // NaN compare result - if (cc == lt || cc == le) { + if (cc_ == lt || cc_ == le) { ncr = GREATER; } else { - ASSERT(cc == gt || cc == ge); // remaining cases + ASSERT(cc_ == gt || cc_ == ge); // remaining cases ncr = LESS; } __ mov(r0, Operand(Smi::FromInt(ncr))); @@ -1870,9 +1842,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ InvokeBuiltin(native, JUMP_FUNCTION); - - __ bind(&miss); - GenerateMiss(masm); } @@ -2356,23 +2325,20 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { } -void BinaryOpStub::Initialize() { - platform_specific_bit_ = CpuFeatures::IsSupported(VFP2); -} - - void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { Label get_result; __ Push(r1, r0); __ mov(r2, Operand(Smi::FromInt(MinorKey()))); - __ push(r2); + __ mov(r1, Operand(Smi::FromInt(op_))); + __ mov(r0, Operand(Smi::FromInt(operands_type_))); + __ Push(r2, r1, r0); __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()), - 3, + 5, 1); } @@ -2383,8 +2349,59 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( } -void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, - Token::Value op) { +void BinaryOpStub::Generate(MacroAssembler* masm) { + // Explicitly allow generation of nested stubs. It is safe here because + // generation code does not use any raw pointers. + AllowStubCallsScope allow_stub_calls(masm, true); + + switch (operands_type_) { + case BinaryOpIC::UNINITIALIZED: + GenerateTypeTransition(masm); + break; + case BinaryOpIC::SMI: + GenerateSmiStub(masm); + break; + case BinaryOpIC::INT32: + GenerateInt32Stub(masm); + break; + case BinaryOpIC::HEAP_NUMBER: + GenerateHeapNumberStub(masm); + break; + case BinaryOpIC::ODDBALL: + GenerateOddballStub(masm); + break; + case BinaryOpIC::BOTH_STRING: + GenerateBothStringStub(masm); + break; + case BinaryOpIC::STRING: + GenerateStringStub(masm); + break; + case BinaryOpIC::GENERIC: + GenerateGeneric(masm); + break; + default: + UNREACHABLE(); + } +} + + +void BinaryOpStub::PrintName(StringStream* stream) { + const char* op_name = Token::Name(op_); + const char* overwrite_name; + switch (mode_) { + case NO_OVERWRITE: overwrite_name = "Alloc"; break; + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; + default: overwrite_name = "UnknownOverwrite"; break; + } + stream->Add("BinaryOpStub_%s_%s_%s", + op_name, + overwrite_name, + BinaryOpIC::GetName(operands_type_)); +} + + +void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { Register left = r1; Register right = r0; Register scratch1 = r7; @@ -2394,7 +2411,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, STATIC_ASSERT(kSmiTag == 0); Label not_smi_result; - switch (op) { + switch (op_) { case Token::ADD: __ add(right, left, Operand(right), SetCC); // Add optimistically. __ Ret(vc); @@ -2509,24 +2526,10 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, } -void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required, - OverwriteMode mode); - - -void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, - BinaryOpIC::TypeInfo left_type, - BinaryOpIC::TypeInfo right_type, - bool smi_operands, - Label* not_numbers, - Label* gc_required, - Label* miss, - Token::Value op, - OverwriteMode mode) { +void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, + bool smi_operands, + Label* not_numbers, + Label* gc_required) { Register left = r1; Register right = r0; Register scratch1 = r7; @@ -2538,17 +2541,11 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, __ AssertSmi(left); __ AssertSmi(right); } - if (left_type == BinaryOpIC::SMI) { - __ JumpIfNotSmi(left, miss); - } - if (right_type == BinaryOpIC::SMI) { - __ JumpIfNotSmi(right, miss); - } Register heap_number_map = r6; __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - switch (op) { + switch (op_) { case Token::ADD: case Token::SUB: case Token::MUL: @@ -2558,44 +2555,25 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // depending on whether VFP3 is available or not. FloatingPointHelper::Destination destination = CpuFeatures::IsSupported(VFP2) && - op != Token::MOD ? + op_ != Token::MOD ? FloatingPointHelper::kVFPRegisters : FloatingPointHelper::kCoreRegisters; // Allocate new heap number for result. Register result = r5; - BinaryOpStub_GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); // Load the operands. if (smi_operands) { FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); } else { - // Load right operand to d7 or r2/r3. - if (right_type == BinaryOpIC::INT32) { - FloatingPointHelper::LoadNumberAsInt32Double( - masm, right, destination, d7, d8, r2, r3, heap_number_map, - scratch1, scratch2, s0, miss); - } else { - Label* fail = (right_type == BinaryOpIC::HEAP_NUMBER) ? miss - : not_numbers; - FloatingPointHelper::LoadNumber( - masm, destination, right, d7, r2, r3, heap_number_map, - scratch1, scratch2, fail); - } - // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it - // jumps to |miss|. - if (left_type == BinaryOpIC::INT32) { - FloatingPointHelper::LoadNumberAsInt32Double( - masm, left, destination, d6, d8, r0, r1, heap_number_map, - scratch1, scratch2, s0, miss); - } else { - Label* fail = (left_type == BinaryOpIC::HEAP_NUMBER) ? miss - : not_numbers; - FloatingPointHelper::LoadNumber( - masm, destination, left, d6, r0, r1, heap_number_map, - scratch1, scratch2, fail); - } + FloatingPointHelper::LoadOperands(masm, + destination, + heap_number_map, + scratch1, + scratch2, + not_numbers); } // Calculate the result. @@ -2604,7 +2582,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // d6: Left value // d7: Right value CpuFeatures::Scope scope(VFP2); - switch (op) { + switch (op_) { case Token::ADD: __ vadd(d5, d6, d7); break; @@ -2628,7 +2606,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, } else { // Call the C function to handle the double operation. FloatingPointHelper::CallCCodeForDoubleOperation(masm, - op, + op_, result, scratch1); if (FLAG_debug_code) { @@ -2669,7 +2647,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, } Label result_not_a_smi; - switch (op) { + switch (op_) { case Token::BIT_OR: __ orr(r2, r3, Operand(r2)); break; @@ -2720,9 +2698,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, __ AllocateHeapNumber( result, scratch1, scratch2, heap_number_map, gc_required); } else { - BinaryOpStub_GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required, - mode); + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); } // r2: Answer as signed int32. @@ -2737,7 +2714,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // mentioned above SHR needs to always produce a positive result. CpuFeatures::Scope scope(VFP2); __ vmov(s0, r2); - if (op == Token::SHR) { + if (op_ == Token::SHR) { __ vcvt_f64_u32(d0, s0); } else { __ vcvt_f64_s32(d0, s0); @@ -2762,14 +2739,12 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // Generate the smi code. If the operation on smis are successful this return is // generated. If the result is not a smi and heap number allocation is not // requested the code falls through. If number allocation is requested but a -// heap number cannot be allocated the code jumps to the label gc_required. -void BinaryOpStub_GenerateSmiCode( +// heap number cannot be allocated the code jumps to the lable gc_required. +void BinaryOpStub::GenerateSmiCode( MacroAssembler* masm, Label* use_runtime, Label* gc_required, - Token::Value op, - BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, - OverwriteMode mode) { + SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { Label not_smis; Register left = r1; @@ -2782,14 +2757,12 @@ void BinaryOpStub_GenerateSmiCode( __ JumpIfNotSmi(scratch1, ¬_smis); // If the smi-smi operation results in a smi return is generated. - BinaryOpStub_GenerateSmiSmiOperation(masm, op); + GenerateSmiSmiOperation(masm); // If heap number results are possible generate the result in an allocated // heap number. - if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { - BinaryOpStub_GenerateFPOperation( - masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, - use_runtime, gc_required, ¬_smis, op, mode); + if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { + GenerateFPOperation(masm, true, use_runtime, gc_required); } __ bind(¬_smis); } @@ -2801,14 +2774,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { if (result_type_ == BinaryOpIC::UNINITIALIZED || result_type_ == BinaryOpIC::SMI) { // Only allow smi results. - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); + GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS); } else { // Allow heap number result and don't make a transition if a heap number // cannot be allocated. - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, - mode_); + GenerateSmiCode(masm, + &call_runtime, + &call_runtime, + ALLOW_HEAPNUMBER_RESULTS); } // Code falls through if the result is not returned as either a smi or heap @@ -2816,14 +2789,23 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { GenerateTypeTransition(masm); __ bind(&call_runtime); - GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } +void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { + ASSERT(operands_type_ == BinaryOpIC::STRING); + ASSERT(op_ == Token::ADD); + // Try to add arguments as strings, otherwise, transition to the generic + // BinaryOpIC type. + GenerateAddStrings(masm); + GenerateTypeTransition(masm); +} + + void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { Label call_runtime; - ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); + ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING); ASSERT(op_ == Token::ADD); // If both arguments are strings, call the string add stub. // Otherwise, do a transition. @@ -2852,7 +2834,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { - ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); + ASSERT(operands_type_ == BinaryOpIC::INT32); Register left = r1; Register right = r0; @@ -2874,7 +2856,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { Label skip; __ orr(scratch1, left, right); __ JumpIfNotSmi(scratch1, &skip); - BinaryOpStub_GenerateSmiSmiOperation(masm, op_); + GenerateSmiSmiOperation(masm); // Fall through if the result is not a smi. __ bind(&skip); @@ -2884,15 +2866,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { case Token::MUL: case Token::DIV: case Token::MOD: { - // It could be that only SMIs have been seen at either the left - // or the right operand. For precise type feedback, patch the IC - // again if this changes. - if (left_type_ == BinaryOpIC::SMI) { - __ JumpIfNotSmi(left, &transition); - } - if (right_type_ == BinaryOpIC::SMI) { - __ JumpIfNotSmi(right, &transition); - } // Load both operands and check that they are 32-bit integer. // Jump to type transition if they are not. The registers r0 and r1 (right // and left) are preserved for the runtime call. @@ -2991,13 +2964,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { : BinaryOpIC::INT32)) { // We are using vfp registers so r5 is available. heap_number_result = r5; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime, - mode_); + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime); __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); __ vstr(d5, r0, HeapNumber::kValueOffset); __ mov(r0, heap_number_result); @@ -3016,13 +2988,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Allocate a heap number to store the result. heap_number_result = r5; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &pop_and_call_runtime, - mode_); + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &pop_and_call_runtime); // Load the left value from the value saved on the stack. __ Pop(r1, r0); @@ -3127,13 +3098,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ bind(&return_heap_number); heap_number_result = r5; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime, - mode_); + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime); if (CpuFeatures::IsSupported(VFP2)) { CpuFeatures::Scope scope(VFP2); @@ -3177,7 +3147,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } __ bind(&call_runtime); - GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } @@ -3216,32 +3185,20 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { - Label call_runtime, transition; - BinaryOpStub_GenerateFPOperation( - masm, left_type_, right_type_, false, - &transition, &call_runtime, &transition, op_, mode_); - - __ bind(&transition); - GenerateTypeTransition(masm); + Label call_runtime; + GenerateFPOperation(masm, false, &call_runtime, &call_runtime); __ bind(&call_runtime); - GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - Label call_runtime, call_string_add_or_runtime, transition; + Label call_runtime, call_string_add_or_runtime; - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); + GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); - BinaryOpStub_GenerateFPOperation( - masm, left_type_, right_type_, false, - &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); - - __ bind(&transition); - GenerateTypeTransition(masm); + GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); __ bind(&call_string_add_or_runtime); if (op_ == Token::ADD) { @@ -3249,7 +3206,6 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { } __ bind(&call_runtime); - GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } @@ -3285,20 +3241,61 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { } -void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required, - OverwriteMode mode) { +void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { + GenerateRegisterArgsPush(masm); + switch (op_) { + case Token::ADD: + __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); + break; + case Token::SUB: + __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); + break; + case Token::MUL: + __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); + break; + case Token::DIV: + __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); + break; + case Token::MOD: + __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); + break; + case Token::BIT_OR: + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); + break; + case Token::BIT_AND: + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); + break; + case Token::BIT_XOR: + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); + break; + case Token::SAR: + __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); + break; + case Token::SHR: + __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); + break; + case Token::SHL: + __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); + break; + default: + UNREACHABLE(); + } +} + + +void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, + Register result, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* gc_required) { // Code below will scratch result if allocation fails. To keep both arguments // intact for the runtime call result cannot be one of these. ASSERT(!result.is(r0) && !result.is(r1)); - if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { + if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) { Label skip_allocation, allocated; - Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0; + Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0; // If the overwritable operand is already an object, we skip the // allocation of a heap number. __ JumpIfNotSmi(overwritable_operand, &skip_allocation); @@ -3311,7 +3308,7 @@ void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, __ mov(result, Operand(overwritable_operand)); __ bind(&allocated); } else { - ASSERT(mode == NO_OVERWRITE); + ASSERT(mode_ == NO_OVERWRITE); __ AllocateHeapNumber( result, scratch1, scratch2, heap_number_map, gc_required); } @@ -4926,7 +4923,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // subject: Subject string // regexp_data: RegExp data (FixedArray) // r0: Instance type of subject string - STATIC_ASSERT(4 == kOneByteStringTag); + STATIC_ASSERT(4 == kAsciiStringTag); STATIC_ASSERT(kTwoByteStringTag == 0); // Find the code object based on the assumptions above. __ and_(r0, r0, Operand(kStringEncodingMask)); @@ -5150,7 +5147,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ ldr(subject, FieldMemOperand(subject, ExternalString::kResourceDataOffset)); // Move the pointer so that offset-wise, it looks like a sequential string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); __ sub(subject, subject, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); @@ -5428,6 +5425,48 @@ void CallConstructStub::Generate(MacroAssembler* masm) { } +// Unfortunately you have to run without snapshots to see most of these +// names in the profile since most compare stubs end up in the snapshot. +void CompareStub::PrintName(StringStream* stream) { + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + const char* cc_name; + switch (cc_) { + case lt: cc_name = "LT"; break; + case gt: cc_name = "GT"; break; + case le: cc_name = "LE"; break; + case ge: cc_name = "GE"; break; + case eq: cc_name = "EQ"; break; + case ne: cc_name = "NE"; break; + default: cc_name = "UnknownCondition"; break; + } + bool is_equality = cc_ == eq || cc_ == ne; + stream->Add("CompareStub_%s", cc_name); + stream->Add(lhs_.is(r0) ? "_r0" : "_r1"); + stream->Add(rhs_.is(r0) ? "_r0" : "_r1"); + if (strict_ && is_equality) stream->Add("_STRICT"); + if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); + if (!include_number_compare_) stream->Add("_NO_NUMBER"); + if (!include_smi_compare_) stream->Add("_NO_SMI"); +} + + +int CompareStub::MinorKey() { + // Encode the three parameters in a unique 16 bit value. To avoid duplicate + // stubs the never NaN NaN condition is only taken into account if the + // condition is equals. + ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12)); + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + return ConditionField::encode(static_cast<unsigned>(cc_) >> 28) + | RegisterField::encode(lhs_.is(r0)) + | StrictField::encode(strict_) + | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) + | IncludeNumberCompareField::encode(include_number_compare_) + | IncludeSmiCompareField::encode(include_smi_compare_); +} + + // StringCharCodeAtGenerator void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { Label flat_string; @@ -5877,7 +5916,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Check if the two characters match. // Assumes that word load is little endian. - __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize)); + __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); __ cmp(chars, scratch); __ b(eq, &found_in_symbol_table); __ bind(&next_probe[i]); @@ -5960,28 +5999,23 @@ void SubStringStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - // Arithmetic shift right by one un-smi-tags. In this case we rotate right - // instead because we bail out on non-smi values: ROR and ASR are equivalent - // for smis but they set the flags in a way that's easier to optimize. - __ mov(r2, Operand(r2, ROR, 1), SetCC); - __ mov(r3, Operand(r3, ROR, 1), SetCC, cc); - // If either to or from had the smi tag bit set, then C is set now, and N - // has the same value: we rotated by 1, so the bottom bit is now the top bit. + // I.e., arithmetic shift right by one un-smi-tags. + __ mov(r2, Operand(r2, ASR, 1), SetCC); + __ mov(r3, Operand(r3, ASR, 1), SetCC, cc); + // If either to or from had the smi tag bit set, then carry is set now. + __ b(cs, &runtime); // Either "from" or "to" is not a smi. // We want to bailout to runtime here if From is negative. In that case, the // next instruction is not executed and we fall through to bailing out to - // runtime. - // Executed if both r2 and r3 are untagged integers. - __ sub(r2, r2, Operand(r3), SetCC, cc); - // One of the above un-smis or the above SUB could have set N==1. - __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to. + // runtime. pl is the opposite of mi. + // Both r2 and r3 are untagged integers. + __ sub(r2, r2, Operand(r3), SetCC, pl); + __ b(mi, &runtime); // Fail if from > to. // Make sure first argument is a string. __ ldr(r0, MemOperand(sp, kStringOffset)); STATIC_ASSERT(kSmiTag == 0); - // Do a JumpIfSmi, but fold its jump into the subsequent string test. - __ tst(r0, Operand(kSmiTagMask)); - Condition is_string = masm->IsObjectStringType(r0, r1, ne); - ASSERT(is_string == eq); + __ JumpIfSmi(r0, &runtime); + Condition is_string = masm->IsObjectStringType(r0, r1); __ b(NegateCondition(is_string), &runtime); // Short-cut for the case of trivial substring. @@ -6052,7 +6086,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // string's encoding is wrong because we always have to recheck encoding of // the newly created string's parent anyways due to externalized strings. Label two_byte_slice, set_slice_header; - STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ tst(r1, Operand(kStringEncodingMask)); __ b(eq, &two_byte_slice); @@ -6090,12 +6124,12 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ bind(&sequential_string); // Locate first character of underlying subject string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); - __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ bind(&allocate_result); // Sequential acii string. Allocate the result. - STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); + STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0); __ tst(r1, Operand(kStringEncodingMask)); __ b(eq, &two_byte_sequential); @@ -6105,13 +6139,13 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Locate first character of substring to copy. __ add(r5, r5, r3); // Locate first character of result. - __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // r0: result string // r1: first character of result string // r2: result string length // r5: first character of substring to copy - STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); + STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, COPY_ASCII | DEST_ALWAYS_ALIGNED); __ jmp(&return_r0); @@ -6236,7 +6270,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( // doesn't need an additional compare. __ SmiUntag(length); __ add(scratch1, length, - Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ add(left, left, Operand(scratch1)); __ add(right, right, Operand(scratch1)); __ rsb(length, length, Operand::Zero()); @@ -6389,8 +6423,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { &call_runtime); // Get the two characters forming the sub string. - __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); - __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); + __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); + __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); // Try to lookup two character string in symbol table. If it is not found // just allocate a new one. @@ -6409,7 +6443,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // in a little endian mode) __ mov(r6, Operand(2)); __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); - __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); + __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); __ IncrementCounter(counters->string_add_native(), 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); @@ -6459,6 +6493,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ tst(r4, Operand(kAsciiDataHintMask)); __ tst(r5, Operand(kAsciiDataHintMask), ne); __ b(ne, &ascii_data); + __ eor(r4, r4, Operand(r5)); + STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); + __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); + __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); + __ b(eq, &ascii_data); // Allocate a two byte cons string. __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); @@ -6491,10 +6530,10 @@ void StringAddStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSeqStringTag == 0); __ tst(r4, Operand(kStringRepresentationMask)); - STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); + STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); __ add(r7, r0, - Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag), LeaveCC, eq); __ b(eq, &first_prepared); @@ -6507,10 +6546,10 @@ void StringAddStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSeqStringTag == 0); __ tst(r5, Operand(kStringRepresentationMask)); - STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); + STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); __ add(r1, r1, - Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag), LeaveCC, eq); __ b(eq, &second_prepared); @@ -6533,7 +6572,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ b(eq, &non_ascii_string_add_flat_result); __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); - __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // r0: result string. // r7: first character of first string. // r1: first character of second string. @@ -6624,7 +6663,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SMI); + ASSERT(state_ == CompareIC::SMIS); Label miss; __ orr(r2, r1, r0); __ JumpIfNotSmi(r2, &miss); @@ -6645,53 +6684,31 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::HEAP_NUMBER); + ASSERT(state_ == CompareIC::HEAP_NUMBERS); Label generic_stub; Label unordered, maybe_undefined1, maybe_undefined2; Label miss; + __ and_(r2, r1, Operand(r0)); + __ JumpIfSmi(r2, &generic_stub); - if (left_ == CompareIC::SMI) { - __ JumpIfNotSmi(r1, &miss); - } - if (right_ == CompareIC::SMI) { - __ JumpIfNotSmi(r0, &miss); - } + __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); + __ b(ne, &maybe_undefined1); + __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); + __ b(ne, &maybe_undefined2); // Inlining the double comparison and falling back to the general compare - // stub if NaN is involved or VFP2 is unsupported. + // stub if NaN is involved or VFP3 is unsupported. if (CpuFeatures::IsSupported(VFP2)) { CpuFeatures::Scope scope(VFP2); - // Load left and right operand. - Label done, left, left_smi, right_smi; - __ JumpIfSmi(r0, &right_smi); - __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, - DONT_DO_SMI_CHECK); - __ sub(r2, r0, Operand(kHeapObjectTag)); - __ vldr(d1, r2, HeapNumber::kValueOffset); - __ b(&left); - __ bind(&right_smi); - __ SmiUntag(r2, r0); // Can't clobber r0 yet. - SwVfpRegister single_scratch = d2.low(); - __ vmov(single_scratch, r2); - __ vcvt_f64_s32(d1, single_scratch); - - __ bind(&left); - __ JumpIfSmi(r1, &left_smi); - __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, - DONT_DO_SMI_CHECK); + // Load left and right operand __ sub(r2, r1, Operand(kHeapObjectTag)); __ vldr(d0, r2, HeapNumber::kValueOffset); - __ b(&done); - __ bind(&left_smi); - __ SmiUntag(r2, r1); // Can't clobber r1 yet. - single_scratch = d3.low(); - __ vmov(single_scratch, r2); - __ vcvt_f64_s32(d0, single_scratch); + __ sub(r2, r0, Operand(kHeapObjectTag)); + __ vldr(d1, r2, HeapNumber::kValueOffset); - __ bind(&done); - // Compare operands. + // Compare operands __ VFPCompareAndSetFlags(d0, d1); // Don't base result on status bits when a NaN is involved. @@ -6705,16 +6722,14 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { } __ bind(&unordered); + CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); __ bind(&generic_stub); - ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, - CompareIC::GENERIC); __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); __ bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op_)) { __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); __ b(ne, &miss); - __ JumpIfSmi(r1, &unordered); __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); __ b(ne, &maybe_undefined2); __ jmp(&unordered); @@ -6732,7 +6747,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SYMBOL); + ASSERT(state_ == CompareIC::SYMBOLS); Label miss; // Registers containing left and right operands respectively. @@ -6770,7 +6785,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { void ICCompareStub::GenerateStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::STRING); + ASSERT(state_ == CompareIC::STRINGS); Label miss; bool equality = Token::IsEqualityOp(op_); @@ -6848,7 +6863,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::OBJECT); + ASSERT(state_ == CompareIC::OBJECTS); Label miss; __ and_(r2, r1, Operand(r0)); __ JumpIfSmi(r2, &miss); @@ -7381,7 +7396,12 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { ASSERT(!address.is(r0)); __ Move(address, regs_.address()); __ Move(r0, regs_.object()); - __ Move(r1, address); + if (mode == INCREMENTAL_COMPACTION) { + __ Move(r1, address); + } else { + ASSERT(mode == INCREMENTAL); + __ ldr(r1, MemOperand(address, 0)); + } __ mov(r2, Operand(ExternalReference::isolate_address())); AllowExternalCallThatCantCauseGC scope(masm); @@ -7539,7 +7559,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. __ bind(&double_elements); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ StoreNumberToDoubleElements(r0, r3, + __ StoreNumberToDoubleElements(r0, r3, r1, // Overwrites all regs after this. r5, r6, r7, r9, r2, &slow_elements); @@ -7549,7 +7569,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (entry_hook_ != NULL) { - PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize); + PredictableCodeSizeScope predictable(masm); ProfileEntryHookStub stub; __ push(lr); __ CallStub(&stub); |