summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2010-06-23 20:31:48 -0700
committerRyan Dahl <ry@tinyclouds.org>2010-06-23 20:32:06 -0700
commit2c0d91be6c8972f325b64a15c798f45e68bf183a (patch)
treef77c0d7fcb6a80f8483d1fa4d0dd0df0b88d68d6 /deps/v8/src/arm
parentba792ea2020f38ebf84925e740b42823b739d0d3 (diff)
downloadnode-new-2c0d91be6c8972f325b64a15c798f45e68bf183a.tar.gz
Upgrade V8 to 2.2.19
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h5
-rw-r--r--deps/v8/src/arm/assembler-arm.cc66
-rw-r--r--deps/v8/src/arm/assembler-arm.h20
-rw-r--r--deps/v8/src/arm/codegen-arm.cc140
-rw-r--r--deps/v8/src/arm/codegen-arm.h4
-rw-r--r--deps/v8/src/arm/constants-arm.h3
-rw-r--r--deps/v8/src/arm/disasm-arm.cc22
-rw-r--r--deps/v8/src/arm/fast-codegen-arm.cc3
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc25
-rw-r--r--deps/v8/src/arm/ic-arm.cc17
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc71
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h15
-rw-r--r--deps/v8/src/arm/simulator-arm.cc9
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc8
14 files changed, 296 insertions, 112 deletions
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 8ca91265ba..114ec234d2 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -45,11 +45,6 @@
namespace v8 {
namespace internal {
-Condition NegateCondition(Condition cc) {
- ASSERT(cc != al);
- return static_cast<Condition>(cc ^ ne);
-}
-
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 16dc5cdfcf..f8d98db926 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -282,6 +282,11 @@ const Instr kBlxRegPattern =
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
const Instr kMovMvnPattern = 0xd * B21;
const Instr kMovMvnFlip = B22;
+const Instr kMovLeaveCCMask = 0xdff * B16;
+const Instr kMovLeaveCCPattern = 0x1a0 * B16;
+const Instr kMovwMask = 0xff * B20;
+const Instr kMovwPattern = 0x30 * B20;
+const Instr kMovwLeaveCCFlip = 0x5 * B21;
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21;
@@ -389,6 +394,12 @@ void Assembler::Align(int m) {
}
+void Assembler::CodeTargetAlign() {
+ // Preferred alignment of jump targets on some ARM chips.
+ Align(8);
+}
+
+
bool Assembler::IsNop(Instr instr, int type) {
// Check for mov rx, rx.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
@@ -640,6 +651,12 @@ void Assembler::next(Label* L) {
}
+static Instr EncodeMovwImmediate(uint32_t immediate) {
+ ASSERT(immediate < 0x10000);
+ return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
+}
+
+
// Low-level code emission routines depending on the addressing mode.
// If this returns true then you have to use the rotate_imm and immed_8
// that it returns, because it may have already changed the instruction
@@ -664,6 +681,15 @@ static bool fits_shifter(uint32_t imm32,
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kMovMvnFlip;
return true;
+ } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ if (imm32 < 0x10000) {
+ *instr ^= kMovwLeaveCCFlip;
+ *instr |= EncodeMovwImmediate(imm32);
+ *rotate_imm = *immed_8 = 0; // Not used for movw.
+ return true;
+ }
+ }
}
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
@@ -695,7 +721,7 @@ static bool fits_shifter(uint32_t imm32,
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
-static bool MustUseIp(RelocInfo::Mode rmode) {
+static bool MustUseConstantPool(RelocInfo::Mode rmode) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
@@ -712,7 +738,7 @@ static bool MustUseIp(RelocInfo::Mode rmode) {
bool Operand::is_single_instruction() const {
if (rm_.is_valid()) return true;
- if (MustUseIp(rmode_)) return false;
+ if (MustUseConstantPool(rmode_)) return false;
uint32_t dummy1, dummy2;
return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
}
@@ -728,19 +754,34 @@ void Assembler::addrmod1(Instr instr,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (MustUseIp(x.rmode_) ||
+ if (MustUseConstantPool(x.rmode_) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
// However, if the original instruction is a 'mov rd, x' (not setting the
// condition code), then replace it with a 'ldr rd, [pc]'.
- RecordRelocInfo(x.rmode_, x.imm32_);
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = static_cast<Condition>(instr & CondMask);
if ((instr & ~CondMask) == 13*B21) { // mov, S not set
- ldr(rd, MemOperand(pc, 0), cond);
+ if (MustUseConstantPool(x.rmode_) ||
+ !CpuFeatures::IsSupported(ARMv7)) {
+ RecordRelocInfo(x.rmode_, x.imm32_);
+ ldr(rd, MemOperand(pc, 0), cond);
+ } else {
+ // Will probably use movw, will certainly not use constant pool.
+ mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
+ movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
+ }
} else {
- ldr(ip, MemOperand(pc, 0), cond);
+ // If this is not a mov or mvn instruction we may still be able to avoid
+ // a constant pool entry by using mvn or movw.
+ if (!MustUseConstantPool(x.rmode_) &&
+ (instr & kMovMvnMask) != kMovMvnPattern) {
+ mov(ip, x, LeaveCC, cond);
+ } else {
+ RecordRelocInfo(x.rmode_, x.imm32_);
+ ldr(ip, MemOperand(pc, 0), cond);
+ }
addrmod1(instr, rn, rd, Operand(ip));
}
return;
@@ -1051,6 +1092,17 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
}
+void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
+ ASSERT(immediate < 0x10000);
+ mov(reg, Operand(immediate), LeaveCC, cond);
+}
+
+
+void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
+ emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
+}
+
+
void Assembler::bic(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 14*B21 | s, src1, dst, src2);
@@ -1231,7 +1283,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (MustUseIp(src.rmode_) ||
+ if (MustUseConstantPool(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index f4b43006f2..869227a7a8 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -279,7 +279,10 @@ enum Condition {
// Returns the equivalent of !cc.
-INLINE(Condition NegateCondition(Condition cc));
+inline Condition NegateCondition(Condition cc) {
+ ASSERT(cc != al);
+ return static_cast<Condition>(cc ^ ne);
+}
// Corresponds to transposing the operands of a comparison.
@@ -545,6 +548,12 @@ extern const Instr kMovMvnMask;
extern const Instr kMovMvnPattern;
extern const Instr kMovMvnFlip;
+extern const Instr kMovLeaveCCMask;
+extern const Instr kMovLeaveCCPattern;
+extern const Instr kMovwMask;
+extern const Instr kMovwPattern;
+extern const Instr kMovwLeaveCCFlip;
+
extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip;
@@ -694,6 +703,8 @@ class Assembler : public Malloced {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
// Branch instructions
void b(int branch_offset, Condition cond = al);
@@ -772,6 +783,13 @@ class Assembler : public Malloced {
mov(dst, Operand(src), s, cond);
}
+ // ARMv7 instructions for loading a 32 bit immediate in two instructions.
+ // This may actually emit a different mov instruction, but on an ARMv7 it
+ // is guaranteed to only emit one instruction.
+ void movw(Register reg, uint32_t immediate, Condition cond = al);
+ // The constant for movt should be in the range 0-0xffff.
+ void movt(Register reg, uint32_t immediate, Condition cond = al);
+
void bic(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 27eec4b424..8e87614c96 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -268,8 +268,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Load the offset into r3.
int slot_offset =
FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(slot_offset));
- __ RecordWrite(r2, r3, r1);
+ __ RecordWrite(r2, Operand(slot_offset), r3, r1);
}
}
}
@@ -3109,9 +3108,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
exit.Branch(eq);
// scratch is loaded with context when calling SlotOperand above.
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(offset));
// r1 could be identical with tos, but that doesn't matter.
- __ RecordWrite(scratch, r3, r1);
+ __ RecordWrite(scratch, Operand(offset), r3, r1);
}
// If we definitely did not jump over the assignment, we do not need
// to bind the exit label. Doing so can defeat peephole
@@ -3464,8 +3462,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
__ str(r0, FieldMemOperand(r1, offset));
// Update the write barrier for the array address.
- __ mov(r3, Operand(offset));
- __ RecordWrite(r1, r3, r2);
+ __ RecordWrite(r1, Operand(offset), r3, r2);
}
ASSERT_EQ(original_height + 1, frame_->height());
}
@@ -4279,8 +4276,7 @@ void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
// Store the value.
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier.
- __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
- __ RecordWrite(r1, r2, r3);
+ __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
// Leave.
leave.Bind();
frame_->EmitPush(r0);
@@ -4710,7 +4706,8 @@ void CodeGenerator::GenerateRandomHeapNumber(
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
- __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
@@ -7207,7 +7204,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
void RecordWriteStub::Generate(MacroAssembler* masm) {
- __ RecordWriteHelper(object_, offset_, scratch_);
+ __ RecordWriteHelper(object_, Operand(offset_), offset_, scratch_);
__ Ret();
}
@@ -7367,12 +7364,16 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
+ Register heap_number_map = r6;
if (ShouldGenerateSmiCode()) {
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
// Smi-smi case (overflow).
// Since both are Smis there is no heap number to overwrite, so allocate.
- // The new heap number is in r5. r6 and r7 are scratch.
- __ AllocateHeapNumber(r5, r6, r7, lhs.is(r0) ? &slow_reverse : &slow);
+ // The new heap number is in r5. r3 and r7 are scratch.
+ __ AllocateHeapNumber(
+ r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values.
@@ -7385,14 +7386,14 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ vmov(s13, r7);
__ vcvt_f64_s32(d6, s13);
} else {
- // Write Smi from rhs to r3 and r2 in double format. r6 is scratch.
+ // Write Smi from rhs to r3 and r2 in double format. r3 is scratch.
__ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ ConvertToDoubleStub stub1(r3, r2, r7, r9);
__ push(lr);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from lhs to r1 and r0 in double format. r6 is scratch.
+ // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
__ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ ConvertToDoubleStub stub2(r1, r0, r7, r9);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
@@ -7401,6 +7402,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
// We branch here if at least one of r0 and r1 is not a Smi.
__ bind(not_smi);
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// After this point we have the left hand side in r1 and the right hand side
// in r0.
@@ -7423,18 +7425,22 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
default:
break;
}
+ // Restore heap number map register.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
}
if (mode_ == NO_OVERWRITE) {
// In the case where there is no chance of an overwritable float we may as
// well do the allocation immediately while r0 and r1 are untouched.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
}
// Move r0 to a double in r2-r3.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_RIGHT) {
__ mov(r5, Operand(r0)); // Overwrite this heap number.
@@ -7452,7 +7458,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ bind(&r0_is_smi);
if (mode_ == OVERWRITE_RIGHT) {
// We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
if (use_fp_registers) {
@@ -7464,7 +7470,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r6);
+ ConvertToDoubleStub stub3(r3, r2, r7, r4);
__ push(lr);
__ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
@@ -7477,6 +7483,8 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &r1_is_not_smi);
GenerateTypeTransition(masm);
+ // Restore heap number map register.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&r1_is_smi);
}
@@ -7486,7 +7494,9 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ bind(&r1_is_not_smi);
- __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_LEFT) {
__ mov(r5, Operand(r1)); // Overwrite this heap number.
@@ -7504,7 +7514,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ bind(&r1_is_smi);
if (mode_ == OVERWRITE_LEFT) {
// We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
if (use_fp_registers) {
@@ -7516,7 +7526,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r6);
+ ConvertToDoubleStub stub4(r1, r0, r7, r9);
__ push(lr);
__ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
@@ -7577,13 +7587,14 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
}
}
-
if (lhs.is(r0)) {
__ b(&slow);
__ bind(&slow_reverse);
__ Swap(r0, r1, ip);
}
+ heap_number_map = no_reg; // Don't use this any more from here on.
+
// We jump to here if something goes wrong (one param is not a number of any
// sort or new-space allocation fails).
__ bind(&slow);
@@ -7749,9 +7760,13 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
Label rhs_is_smi, lhs_is_smi;
Label done_checking_rhs, done_checking_lhs;
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
__ tst(lhs, Operand(kSmiTagMask));
__ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
GetInt32(masm, lhs, r3, r5, r4, &slow);
__ jmp(&done_checking_lhs);
@@ -7761,7 +7776,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
__ tst(rhs, Operand(kSmiTagMask));
__ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
GetInt32(masm, rhs, r2, r5, r4, &slow);
__ jmp(&done_checking_rhs);
@@ -7821,8 +7837,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
break;
}
case NO_OVERWRITE: {
- // Get a new heap number in r5. r6 and r7 are scratch.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ // Get a new heap number in r5. r4 and r7 are scratch.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
default: break;
}
@@ -7841,8 +7857,8 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
if (mode_ != NO_OVERWRITE) {
__ bind(&have_to_allocate);
- // Get a new heap number in r5. r6 and r7 are scratch.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ // Get a new heap number in r5. r4 and r7 are scratch.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
__ jmp(&got_a_heap_number);
}
@@ -7968,10 +7984,11 @@ const char* GenericBinaryOpStub::GetName() {
}
OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s",
+ "GenericBinaryOpStub_%s_%s%s_%s",
op_name,
overwrite_name,
- specialized_on_rhs_ ? "_ConstantRhs" : 0);
+ specialized_on_rhs_ ? "_ConstantRhs" : "",
+ BinaryOpIC::GetName(runtime_operands_type_));
return name_;
}
@@ -8164,6 +8181,28 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
__ Ret();
__ bind(&smi_is_unsuitable);
+ } else if (op_ == Token::MOD &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS) {
+ // Do generate a bit of smi code for modulus even though the default for
+ // modulus is not to do it, but as the ARM processor has no coprocessor
+ // support for modulus checking for smis makes sense.
+ Label slow;
+ ASSERT(!ShouldGenerateSmiCode());
+ ASSERT(kSmiTag == 0); // Adjust code below.
+ // Check for two positive smis.
+ __ orr(smi_test_reg, lhs, Operand(rhs));
+ __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
+ __ b(ne, &slow);
+ // Check that rhs is a power of two and not zero.
+ __ sub(scratch, rhs, Operand(1), SetCC);
+ __ b(mi, &slow);
+ __ tst(rhs, scratch);
+ __ b(ne, &slow);
+ // Calculate power of two modulus.
+ __ and_(result, lhs, Operand(scratch));
+ __ Ret();
+ __ bind(&slow);
}
HandleBinaryOpSlowCases(
masm,
@@ -8391,6 +8430,9 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Label slow, done;
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
if (op_ == Token::SUB) {
// Check whether the value is a smi.
Label try_float;
@@ -8411,7 +8453,9 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ b(&done);
__ bind(&try_float);
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, heap_number_map);
__ b(ne, &slow);
// r0 is a heap number. Get a new heap number in r1.
if (overwrite_) {
@@ -8419,7 +8463,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
} else {
- __ AllocateHeapNumber(r1, r2, r3, &slow);
+ __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
@@ -8429,7 +8473,9 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
}
} else if (op_ == Token::BIT_NOT) {
// Check if the operand is a heap number.
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, heap_number_map);
__ b(ne, &slow);
// Convert the heap number is r0 to an untagged integer in r1.
@@ -8449,7 +8495,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
// Allocate a fresh heap number, but don't overwrite r0 until
// we're sure we can do it without going through the slow case
// that needs the value in r0.
- __ AllocateHeapNumber(r2, r3, r4, &slow);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ mov(r0, Operand(r2));
}
@@ -9431,17 +9477,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
__ mov(r3, last_match_info_elements); // Moved up to reduce latency.
- __ mov(r2, Operand(RegExpImpl::kLastSubjectOffset)); // Ditto.
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
- __ RecordWrite(r3, r2, r7);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
__ mov(r3, last_match_info_elements);
- __ mov(r2, Operand(RegExpImpl::kLastInputOffset));
- __ RecordWrite(r3, r2, r7);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -10543,13 +10587,14 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
- Label non_ascii, allocated;
+ Label non_ascii, allocated, ascii_data;
ASSERT_EQ(0, kTwoByteStringTag);
__ tst(r4, Operand(kStringEncodingMask));
__ tst(r5, Operand(kStringEncodingMask), ne);
__ b(eq, &non_ascii);
// Allocate an ASCII cons string.
+ __ bind(&ascii_data);
__ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
@@ -10561,6 +10606,19 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // r4: first instance type.
+ // r5: second instance type.
+ __ tst(r4, Operand(kAsciiDataHintMask));
+ __ tst(r5, Operand(kAsciiDataHintMask), ne);
+ __ b(ne, &ascii_data);
+ __ eor(r4, r4, Operand(r5));
+ ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ b(eq, &ascii_data);
+
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
__ jmp(&allocated);
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index eeb89e0712..be4d556196 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -669,7 +669,9 @@ class GenericBinaryOpStub : public CodeStub {
}
void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm, Register lhs, Register rhs);
+ void HandleNonSmiBitwiseOp(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
Register lhs,
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index e36f595c3d..fa9adbd704 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -284,6 +284,9 @@ class Instr {
// with immediate
inline int RotateField() const { return Bits(11, 8); }
inline int Immed8Field() const { return Bits(7, 0); }
+ inline int Immed4Field() const { return Bits(19, 16); }
+ inline int ImmedMovwMovtField() const {
+ return Immed4Field() << 12 | Offset12Field(); }
// Fields used in Load/Store instructions
inline int PUField() const { return Bits(24, 23); }
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 1c05bc3a4a..400536993a 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -101,6 +101,7 @@ class Decoder {
void PrintSRegister(int reg);
void PrintDRegister(int reg);
int FormatVFPRegister(Instr* instr, const char* format);
+ void PrintMovwMovt(Instr* instr);
int FormatVFPinstruction(Instr* instr, const char* format);
void PrintCondition(Instr* instr);
void PrintShiftRm(Instr* instr);
@@ -375,6 +376,16 @@ int Decoder::FormatVFPinstruction(Instr* instr, const char* format) {
}
+// Print the movw or movt instruction.
+void Decoder::PrintMovwMovt(Instr* instr) {
+ int imm = instr->ImmedMovwMovtField();
+ int rd = instr->RdField();
+ PrintRegister(rd);
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", #%d", imm);
+}
+
+
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
@@ -430,7 +441,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
return 1;
}
case 'm': {
- if (format[1] == 'e') { // 'memop: load/store instructions
+ if (format[1] == 'w') {
+ // 'mw: movt/movw instructions.
+ PrintMovwMovt(instr);
+ return 2;
+ }
+ if (format[1] == 'e') { // 'memop: load/store instructions.
ASSERT(STRING_STARTS_WITH(format, "memop"));
if (instr->HasL()) {
Print("ldr");
@@ -776,7 +792,7 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) {
Format(instr, "tst'cond 'rn, 'shift_op");
} else {
- Unknown(instr); // not used by V8
+ Format(instr, "movw'cond 'mw");
}
break;
}
@@ -794,7 +810,7 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) {
Format(instr, "cmp'cond 'rn, 'shift_op");
} else {
- Unknown(instr); // not used by V8
+ Format(instr, "movt'cond 'mw");
}
break;
}
diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc
index 48eaf46aaf..36ac2aa3d3 100644
--- a/deps/v8/src/arm/fast-codegen-arm.cc
+++ b/deps/v8/src/arm/fast-codegen-arm.cc
@@ -102,8 +102,7 @@ void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
}
if (needs_write_barrier) {
- __ mov(scratch1(), Operand(offset));
- __ RecordWrite(scratch0(), scratch1(), scratch2());
+ __ RecordWrite(scratch0(), Operand(offset), scratch1(), scratch2());
}
if (destination().is(accumulator1())) {
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 58d737834b..673287388a 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -110,10 +110,10 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
__ mov(r1, Operand(Context::SlotOffset(slot->index())));
__ str(r0, MemOperand(cp, r1));
// Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
+ // registers, so we have to use two more registers to avoid
// clobbering cp.
__ mov(r2, Operand(cp));
- __ RecordWrite(r2, r1, r0);
+ __ RecordWrite(r2, Operand(r1), r3, r0);
}
}
}
@@ -666,8 +666,10 @@ void FullCodeGenerator::Move(Slot* dst,
__ str(src, location);
// Emit the write barrier code if the location is in the heap.
if (dst->type() == Slot::CONTEXT) {
- __ mov(scratch2, Operand(Context::SlotOffset(dst->index())));
- __ RecordWrite(scratch1, scratch2, src);
+ __ RecordWrite(scratch1,
+ Operand(Context::SlotOffset(dst->index())),
+ scratch2,
+ src);
}
}
@@ -715,10 +717,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
__ str(result_register(),
CodeGenerator::ContextOperand(cp, slot->index()));
int offset = Context::SlotOffset(slot->index());
- __ mov(r2, Operand(offset));
// We know that we have written a function, which is not a smi.
__ mov(r1, Operand(cp));
- __ RecordWrite(r1, r2, result_register());
+ __ RecordWrite(r1, Operand(offset), r2, result_register());
}
break;
@@ -1252,8 +1253,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Update the write barrier for the array store with r0 as the scratch
// register.
- __ mov(r2, Operand(offset));
- __ RecordWrite(r1, r2, result_register());
+ __ RecordWrite(r1, Operand(offset), r2, result_register());
}
if (result_saved) {
@@ -1493,8 +1493,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r2, Operand(offset));
- __ RecordWrite(r1, r2, r3);
+ __ RecordWrite(r1, Operand(offset), r2, r3);
break;
}
@@ -2157,7 +2156,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
- __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
@@ -2276,8 +2276,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
- __ RecordWrite(r1, r2, r3);
+ __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
__ bind(&done);
Apply(context_, r0);
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 5eb98b12ba..c6de4d8ef4 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -1339,7 +1339,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ bind(&box_int);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Use r0 for result as key is not needed any more.
- __ AllocateHeapNumber(r0, r3, r4, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r0, r3, r4, r6, &slow);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
@@ -1370,7 +1371,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
- __ AllocateHeapNumber(r2, r3, r4, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_u32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
@@ -1407,7 +1409,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
// clobbers all registers - also when jumping due to exhausted young
// space.
- __ AllocateHeapNumber(r4, r5, r6, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
@@ -1423,7 +1426,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
- __ AllocateHeapNumber(r2, r3, r4, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_f32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
@@ -1434,7 +1438,8 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
- __ AllocateHeapNumber(r3, r4, r5, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)
@@ -1692,7 +1697,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ Ret(eq);
// Update write barrier for the elements array address.
__ sub(r4, r5, Operand(elements));
- __ RecordWrite(elements, r4, r5);
+ __ RecordWrite(elements, Operand(r4), r5, r6);
__ Ret();
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 9bbc31f773..29e168c51e 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -270,6 +270,17 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
}
+void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
+ ASSERT(lsb < 32);
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ bic(dst, dst, Operand(mask));
+ } else {
+ bfc(dst, lsb, width, cond);
+ }
+}
+
+
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);
@@ -299,31 +310,32 @@ void MacroAssembler::StoreRoot(Register source,
void MacroAssembler::RecordWriteHelper(Register object,
- Register offset,
- Register scratch) {
+ Operand offset,
+ Register scratch0,
+ Register scratch1) {
if (FLAG_debug_code) {
// Check that the object is not in new space.
Label not_in_new_space;
- InNewSpace(object, scratch, ne, &not_in_new_space);
+ InNewSpace(object, scratch1, ne, &not_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(&not_in_new_space);
}
- mov(ip, Operand(Page::kPageAlignmentMask)); // Load mask only once.
-
- // Calculate region number.
- add(offset, object, Operand(offset)); // Add offset into the object.
- and_(offset, offset, Operand(ip)); // Offset into page of the object.
- mov(offset, Operand(offset, LSR, Page::kRegionSizeLog2));
+ // Add offset into the object.
+ add(scratch0, object, offset);
// Calculate page address.
- bic(object, object, Operand(ip));
+ Bfc(object, 0, kPageSizeBits);
+
+ // Calculate region number.
+ Ubfx(scratch0, scratch0, Page::kRegionSizeLog2,
+ kPageSizeBits - Page::kRegionSizeLog2);
// Mark region dirty.
- ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+ ldr(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
- orr(scratch, scratch, Operand(ip, LSL, offset));
- str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+ orr(scratch1, scratch1, Operand(ip, LSL, scratch0));
+ str(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
}
@@ -341,21 +353,23 @@ void MacroAssembler::InNewSpace(Register object,
// Will clobber 4 registers: object, offset, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object, Register offset,
- Register scratch) {
+void MacroAssembler::RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
- ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp));
+ ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
Label done;
// First, test that the object is not in the new space. We cannot set
// region marks for new space pages.
- InNewSpace(object, scratch, eq, &done);
+ InNewSpace(object, scratch0, eq, &done);
// Record the actual write.
- RecordWriteHelper(object, offset, scratch);
+ RecordWriteHelper(object, offset, scratch0, scratch1);
bind(&done);
@@ -363,8 +377,8 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
// turned on to provoke errors.
if (FLAG_debug_code) {
mov(object, Operand(BitCast<int32_t>(kZapValue)));
- mov(offset, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
}
}
@@ -1514,6 +1528,16 @@ void MacroAssembler::Assert(Condition cc, const char* msg) {
}
+void MacroAssembler::AssertRegisterIsRoot(Register reg,
+ Heap::RootListIndex index) {
+ if (FLAG_debug_code) {
+ LoadRoot(ip, index);
+ cmp(reg, ip);
+ Check(eq, "Register did not match expected root");
+ }
+}
+
+
void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
b(cc, &L);
@@ -1632,6 +1656,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
+ Register heap_number_map,
Label* gc_required) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
@@ -1642,9 +1667,9 @@ void MacroAssembler::AllocateHeapNumber(Register result,
gc_required,
TAG_OBJECT);
- // Get heap number map and store it in the allocated object.
- LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
- str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ // Store heap number map in the allocated object.
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index f1eb0912af..e02a6c8a3e 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -100,6 +100,7 @@ class MacroAssembler: public Assembler {
Condition cond = al);
void Sbfx(Register dst, Register src, int lsb, int width,
Condition cond = al);
+ void Bfc(Register dst, int lsb, int width, Condition cond = al);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
@@ -127,13 +128,19 @@ class MacroAssembler: public Assembler {
// For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page.
- void RecordWriteHelper(Register object, Register offset, Register scratch);
+ void RecordWriteHelper(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1);
// For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page.
- // The 'scratch' register is used in the implementation and all 3 registers
+ // The 'scratch' registers are used in the implementation and all 3 registers
// are clobbered by the operation, as well as the ip register.
- void RecordWrite(Register object, Register offset, Register scratch);
+ void RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
@@ -372,6 +379,7 @@ class MacroAssembler: public Assembler {
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
+ Register heap_number_map,
Label* gc_required);
// ---------------------------------------------------------------------------
@@ -551,6 +559,7 @@ class MacroAssembler: public Assembler {
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
+ void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 3bdca38eba..77776c2b6d 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1859,7 +1859,9 @@ void Simulator::DecodeType01(Instr* instr) {
SetNZFlags(alu_out);
SetCFlag(shifter_carry_out);
} else {
- UNIMPLEMENTED();
+ // Format(instr, "movw'cond 'rd, 'imm").
+ alu_out = instr->ImmedMovwMovtField();
+ set_register(rd, alu_out);
}
break;
}
@@ -1888,7 +1890,10 @@ void Simulator::DecodeType01(Instr* instr) {
SetCFlag(!BorrowFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
} else {
- UNIMPLEMENTED();
+ // Format(instr, "movt'cond 'rd, 'imm").
+ alu_out = (get_register(rd) & 0xffff) |
+ (instr->ImmedMovwMovtField() << 16);
+ set_register(rd, alu_out);
}
break;
}
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index dc0f4a7c4c..3e5ba1126f 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -336,9 +336,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
__ b(eq, &exit);
// Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, Operand(offset));
- __ RecordWrite(receiver_reg, name_reg, scratch);
+ // Pass the now unused name_reg as a scratch register.
+ __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -352,8 +351,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, Operand(offset));
- __ RecordWrite(scratch, name_reg, receiver_reg);
+ __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
}
// Return the value (register r0).